هنگامی که خانواده مدل Gemini 2.0 را معرفی کردیم، مجموعه جدیدی از Google Gen AI SDK را برای کار با Gemini API منتشر کردیم:
این SDK های به روز شده کاملاً با تمام مدل ها و ویژگی های Gemini API، از جمله افزوده های اخیر مانند Live API و Veo سازگار خواهند بود.
توصیه میکنیم پروژههای خود را از SDKهای قدیمی Gemini به SDKهای نسل جدید AI شروع کنید. این راهنما نمونههایی از قبل و بعد از کدهای انتقال داده را ارائه میکند تا به شما در شروع کار کمک کند. ما به افزودن مثالهایی در اینجا ادامه میدهیم تا به شما در راهاندازی و اجرای SDKهای جدید کمک کنیم.
SDK را نصب کنید
قبل از
pip install -U -q "google-generativeai"
npm install @google/generative-ai
بعد از
pip install -U -q "google-genai"
npm install @google/genai
احراز هویت
با استفاده از کلید API احراز هویت می توانید کلید API خود را در Google AI Studio ایجاد کنید .
قبل از
SDK قدیمی شیء کلاینت API را بطور ضمنی مدیریت می کرد. در SDK جدید، مشتری API را ایجاد میکنید و از آن برای فراخوانی API استفاده میکنید. به یاد داشته باشید، در هر صورت، SDK کلید API شما را از متغیر محیطی GOOGLE_API_KEY
دریافت میکند، اگر یکی را به مشتری ارسال نکنید.
import google.generativeai as genai
genai.configure(api_key=...)
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
بعد از
export GOOGLE_API_KEY="YOUR_API_KEY"
from google import genai
client = genai.Client() # Set the API key using the GOOGLE_API_KEY env var.
# Alternatively, you could set the API key explicitly:
# client = genai.Client(api_key="your_api_key")
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({apiKey: "GEMINI_API_KEY"});
تولید محتوا
قبل از
SDK جدید دسترسی به تمام متدهای API را از طریق شی Client
فراهم می کند. به جز چند مورد خاص حالت دار ( chat
و session
Api زنده)، همه اینها توابع بدون حالت هستند. برای سودمندی و یکنواختی، اشیاء برگردانده شده طبقات pydantic
هستند.
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content(
'Tell me a story in 300 words'
)
print(response.text)
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const prompt = "Tell me a story in 300 words";
const result = await model.generateContent(prompt);
console.log(result.response.text());
بعد از
from google import genai
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='Tell me a story in 300 words.'
)
print(response.text)
print(response.model_dump_json(
exclude_none=True, indent=4))
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const response = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: "Tell me a story in 300 words.",
});
console.log(response.text);
قبل از
بسیاری از ویژگی های راحتی مشابه در SDK جدید وجود دارد. به عنوان مثال، اشیاء PIL.Image
به طور خودکار تبدیل می شوند.
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content([
'Tell me a story based on this image',
Image.open(image_path)
])
print(response.text)
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
function fileToGenerativePart(path, mimeType) {
return {
inlineData: {
data: Buffer.from(fs.readFileSync(path)).toString("base64"),
mimeType,
},
};
}
const prompt = "Tell me a story based on this image";
const imagePart = fileToGenerativePart(
`path/to/organ.jpg`,
"image/jpeg",
);
const result = await model.generateContent([prompt, imagePart]);
console.log(result.response.text());
بعد از
from google import genai
from PIL import Image
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents=[
'Tell me a story based on this image',
Image.open(image_path)
]
)
print(response.text)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const organ = await ai.files.upload({
file: "path/to/organ.jpg",
});
const response = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: [
createUserContent([
"Tell me a story based on this image",
createPartFromUri(organ.uri, organ.mimeType)
]),
],
});
console.log(response.text);
پخش جریانی
قبل از
import google.generativeai as genai
response = model.generate_content(
"Write a cute story about cats.",
stream=True)
for chunk in response:
print(chunk.text)
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const prompt = "Write a story about a magic backpack.";
const result = await model.generateContentStream(prompt);
// Print text as it comes in.
for await (const chunk of result.stream) {
const chunkText = chunk.text();
process.stdout.write(chunkText);
}
بعد از
from google import genai
client = genai.Client()
for chunk in client.models.generate_content_stream(
model='gemini-2.0-flash',
contents='Tell me a story in 300 words.'
):
print(chunk.text)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const response = await ai.models.generateContentStream({
model: "gemini-2.0-flash",
contents: "Write a story about a magic backpack.",
});
let text = "";
for await (const chunk of response) {
console.log(chunk.text);
text += chunk.text;
}
استدلال های اختیاری
قبل از
برای همه روشها در SDK جدید، آرگومانهای مورد نیاز به عنوان آرگومانهای کلیدواژه ارائه میشوند. تمام ورودی های اختیاری در آرگومان config
ارائه می شوند. آرگومان های پیکربندی را می توان به عنوان دیکشنری پایتون یا کلاس های Config
در فضای نام google.genai.types
مشخص کرد. برای سودمندی و یکنواختی، تمام تعاریف درون ماژول types
، کلاسهای pydantic
هستند.
import google.generativeai as genai
model = genai.GenerativeModel(
'gemini-1.5-flash',
system_instruction='you are a story teller for kids under 5 years old',
generation_config=genai.GenerationConfig(
max_output_tokens=400,
top_k=2,
top_p=0.5,
temperature=0.5,
response_mime_type='application/json',
stop_sequences=['\n'],
)
)
response = model.generate_content('tell me a story in 100 words')
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({
model: "gemini-1.5-flash",
generationConfig: {
candidateCount: 1,
stopSequences: ["x"],
maxOutputTokens: 20,
temperature: 1.0,
},
});
const result = await model.generateContent(
"Tell me a story about a magic backpack.",
);
console.log(result.response.text())
بعد از
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='Tell me a story in 100 words.',
config=types.GenerateContentConfig(
system_instruction='you are a story teller for kids under 5 years old',
max_output_tokens= 400,
top_k= 2,
top_p= 0.5,
temperature= 0.5,
response_mime_type= 'application/json',
stop_sequences= ['\n'],
seed=42,
),
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const response = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: "Tell me a story about a magic backpack.",
config: {
candidateCount: 1,
stopSequences: ["x"],
maxOutputTokens: 20,
temperature: 1.0,
},
});
console.log(response.text);
تنظیمات ایمنی
ایجاد پاسخ با تنظیمات ایمنی:
قبل از
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content(
'say something bad',
safety_settings={
'HATE': 'BLOCK_ONLY_HIGH',
'HARASSMENT': 'BLOCK_ONLY_HIGH',
}
)
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({
model: "gemini-1.5-flash",
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
},
],
});
const unsafePrompt =
"I support Martians Soccer Club and I think " +
"Jupiterians Football Club sucks! Write an ironic phrase telling " +
"them how I feel about them.";
const result = await model.generateContent(unsafePrompt);
try {
result.response.text();
} catch (e) {
console.error(e);
console.log(result.response.candidates[0].safetyRatings);
}
بعد از
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='say something bad',
config=types.GenerateContentConfig(
safety_settings= [
types.SafetySetting(
category='HARM_CATEGORY_HATE_SPEECH',
threshold='BLOCK_ONLY_HIGH'
),
]
),
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const unsafePrompt =
"I support Martians Soccer Club and I think " +
"Jupiterians Football Club sucks! Write an ironic phrase telling " +
"them how I feel about them.";
const response = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: unsafePrompt,
config: {
safetySettings: [
{
category: "HARM_CATEGORY_HARASSMENT",
threshold: "BLOCK_ONLY_HIGH",
},
],
},
});
console.log("Finish reason:", response.candidates[0].finishReason);
console.log("Safety ratings:", response.candidates[0].safetyRatings);
همگام
قبل از
برای استفاده از SDK جدید با asyncio
, یک پیاده سازی async
جداگانه برای هر روش تحت client.aio
وجود دارد .
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content_async(
'tell me a story in 100 words'
)
بعد از
from google import genai
client = genai.Client()
response = await client.aio.models.generate_content(
model='gemini-2.0-flash',
contents='Tell me a story in 300 words.'
)
چت کنید
یک چت را شروع کنید و به مدل پیام دهید:
قبل از
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
chat = model.start_chat()
response = chat.send_message(
"Tell me a story in 100 words")
response = chat.send_message(
"What happened after that?")
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
const chat = model.startChat({
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
let result = await chat.sendMessage("I have 2 dogs in my house.");
console.log(result.response.text());
result = await chat.sendMessage("How many paws are in my house?");
console.log(result.response.text());
بعد از
from google import genai
client = genai.Client()
chat = client.chats.create(model='gemini-2.0-flash')
response = chat.send_message(
message='Tell me a story in 100 words')
response = chat.send_message(
message='What happened after that?')
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const chat = ai.chats.create({
model: "gemini-2.0-flash",
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
const response1 = await chat.sendMessage({
message: "I have 2 dogs in my house.",
});
console.log("Chat response 1:", response1.text);
const response2 = await chat.sendMessage({
message: "How many paws are in my house?",
});
console.log("Chat response 2:", response2.text);
فراخوانی تابع
قبل از
در SDK جدید، فراخوانی خودکار عملکرد پیشفرض است. در اینجا، شما آن را غیرفعال کنید.
import google.generativeai as genai
from enum import Enum
def get_current_weather(location: str) -> str:
"""Get the current whether in a given location.
Args:
location: required, The city and state, e.g. San Franciso, CA
unit: celsius or fahrenheit
"""
print(f'Called with: {location=}')
return "23C"
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
tools=[get_current_weather]
)
response = model.generate_content("What is the weather in San Francisco?")
function_call = response.candidates[0].parts[0].function_call
بعد از
from google import genai
from google.genai import types
client = genai.Client()
def get_current_weather(location: str) -> str:
"""Get the current whether in a given location.
Args:
location: required, The city and state, e.g. San Franciso, CA
unit: celsius or fahrenheit
"""
print(f'Called with: {location=}')
return "23C"
response = client.models.generate_content(
model='gemini-2.0-flash',
contents="What is the weather like in Boston?",
config=types.GenerateContentConfig(
tools=[get_current_weather],
automatic_function_calling={'disable': True},
),
)
function_call = response.candidates[0].content.parts[0].function_call
فراخوانی خودکار عملکرد
قبل از
SDK قدیمی فقط از تماس خودکار عملکرد در چت پشتیبانی می کند. در SDK جدید این رفتار پیشفرض در generate_content
است.
import google.generativeai as genai
def get_current_weather(city: str) -> str:
return "23C"
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
tools=[get_current_weather]
)
chat = model.start_chat(
enable_automatic_function_calling=True)
result = chat.send_message("What is the weather in San Francisco?")
بعد از
from google import genai
from google.genai import types
client = genai.Client()
def get_current_weather(city: str) -> str:
return "23C"
response = client.models.generate_content(
model='gemini-2.0-flash',
contents="What is the weather like in Boston?",
config=types.GenerateContentConfig(
tools=[get_current_weather]
),
)
اجرای کد
اجرای کد ابزاری است که به مدل اجازه می دهد کد پایتون را تولید کند، آن را اجرا کند و نتیجه را برگرداند.
قبل از
import google.generativeai as genai
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
tools="code_execution"
)
result = model.generate_content(
"What is the sum of the first 50 prime numbers? Generate and run code for "
"the calculation, and make sure you get all 50.")
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({
model: "gemini-1.5-flash",
tools: [{ codeExecution: {} }],
});
const result = await model.generateContent(
"What is the sum of the first 50 prime numbers? " +
"Generate and run code for the calculation, and make sure you get " +
"all 50.",
);
console.log(result.response.text());
بعد از
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='What is the sum of the first 50 prime numbers? Generate and run '
'code for the calculation, and make sure you get all 50.',
config=types.GenerateContentConfig(
tools=[types.Tool(code_execution=types.ToolCodeExecution)],
),
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const response = await ai.models.generateContent({
model: "gemini-2.0-pro-exp-02-05",
contents: `Write and execute code that calculates the sum of the first 50 prime numbers.
Ensure that only the executable code and its resulting output are generated.`,
});
// Each part may contain text, executable code, or an execution result.
for (const part of response.candidates[0].content.parts) {
console.log(part);
console.log("\n");
}
console.log("-".repeat(80));
// The `.text` accessor concatenates the parts into a markdown-formatted text.
console.log("\n", response.text);
جستجوی زمین
GoogleSearch
(Gemini>=2.0) و GoogleSearchRetrieval
(Gemini < 2.0) ابزارهایی هستند که به مدل اجازه میدهند تا دادههای وب عمومی را برای اتصال به زمین، که توسط Google ارائه میشود، بازیابی کند.
قبل از
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content(
contents="what is the Google stock price?",
tools='google_search_retrieval'
)
بعد از
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='What is the Google stock price?',
config=types.GenerateContentConfig(
tools=[
types.Tool(
google_search=types.GoogleSearch()
)
]
)
)
پاسخ JSON
پاسخ ها را در قالب JSON ایجاد کنید.
قبل از
با مشخص کردن یک response_schema
و تنظیم response_mime_type="application/json"
کاربران می توانند مدل را محدود کنند تا پاسخ JSON
را به دنبال یک ساختار داده شده تولید کند. SDK جدید از کلاسهای pydantic
برای ارائه طرحواره استفاده میکند (اگرچه میتوانید یک genai.types.Schema
یا dict
معادل آن را ارسال کنید). در صورت امکان، SDK JSON برگشتی را تجزیه می کند و نتیجه را در response.parsed
برمی گرداند. اگر یک کلاس pydantic
را به عنوان طرحواره ارائه کرده باشید، SDK آن JSON
به نمونه ای از کلاس تبدیل می کند.
import google.generativeai as genai
import typing_extensions as typing
class CountryInfo(typing.TypedDict):
name: str
population: int
capital: str
continent: str
major_cities: list[str]
gdp: int
official_language: str
total_area_sq_mi: int
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
result = model.generate_content(
"Give me information of the United States",
generation_config=genai.GenerationConfig(
response_mime_type="application/json",
response_schema = CountryInfo
),
)
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const schema = {
description: "List of recipes",
type: SchemaType.ARRAY,
items: {
type: SchemaType.OBJECT,
properties: {
recipeName: {
type: SchemaType.STRING,
description: "Name of the recipe",
nullable: false,
},
},
required: ["recipeName"],
},
};
const model = genAI.getGenerativeModel({
model: "gemini-1.5-pro",
generationConfig: {
responseMimeType: "application/json",
responseSchema: schema,
},
});
const result = await model.generateContent(
"List a few popular cookie recipes.",
);
console.log(result.response.text());
بعد از
from google import genai
from pydantic import BaseModel
client = genai.Client()
class CountryInfo(BaseModel):
name: str
population: int
capital: str
continent: str
major_cities: list[str]
gdp: int
official_language: str
total_area_sq_mi: int
response = client.models.generate_content(
model='gemini-2.0-flash',
contents='Give me information of the United States.',
config={
'response_mime_type': 'application/json',
'response_schema': CountryInfo,
},
)
response.parsed
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const response = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: "List a few popular cookie recipes.",
config: {
responseMimeType: "application/json",
responseSchema: {
type: "array",
items: {
type: "object",
properties: {
recipeName: { type: "string" },
ingredients: { type: "array", items: { type: "string" } },
},
required: ["recipeName", "ingredients"],
},
},
},
});
console.log(response.text);
فایل ها
آپلود کنید
آپلود فایل:
قبل از
import requests
import pathlib
import google.generativeai as genai
# Download file
response = requests.get(
'https://storage.googleapis.com/generativeai-downloads/data/a11.txt')
pathlib.Path('a11.txt').write_text(response.text)
file = genai.upload_file(path='a11.txt')
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content([
'Can you summarize this file:',
my_file
])
print(response.text)
بعد از
import requests
import pathlib
from google import genai
client = genai.Client()
# Download file
response = requests.get(
'https://storage.googleapis.com/generativeai-downloads/data/a11.txt')
pathlib.Path('a11.txt').write_text(response.text)
my_file = client.files.upload(file='a11.txt')
response = client.models.generate_content(
model='gemini-2.0-flash',
contents=[
'Can you summarize this file:',
my_file
]
)
print(response.text)
لیست کنید و دریافت کنید
فایل های آپلود شده را فهرست کنید و یک فایل آپلود شده با نام فایل دریافت کنید:
قبل از
import google.generativeai as genai
for file in genai.list_files():
print(file.name)
file = genai.get_file(name=file.name)
بعد از
from google import genai
client = genai.Client()
for file in client.files.list():
print(file.name)
file = client.files.get(name=file.name)
حذف کنید
حذف یک فایل:
قبل از
import pathlib
import google.generativeai as genai
pathlib.Path('dummy.txt').write_text(dummy)
dummy_file = genai.upload_file(path='dummy.txt')
file = genai.delete_file(name=dummy_file.name)
بعد از
import pathlib
from google import genai
client = genai.Client()
pathlib.Path('dummy.txt').write_text(dummy)
dummy_file = client.files.upload(file='dummy.txt')
response = client.files.delete(name=dummy_file.name)
ذخیره سازی متن
ذخیره سازی متن به کاربر امکان می دهد یک بار محتوا را به مدل منتقل کند، نشانه های ورودی را در حافظه پنهان نگه دارد و سپس در تماس های بعدی به نشانه های کش شده مراجعه کند تا هزینه را کاهش دهد.
قبل از
import requests
import pathlib
import google.generativeai as genai
from google.generativeai import caching
# Download file
response = requests.get(
'https://storage.googleapis.com/generativeai-downloads/data/a11.txt')
pathlib.Path('a11.txt').write_text(response.text)
# Upload file
document = genai.upload_file(path="a11.txt")
# Create cache
apollo_cache = caching.CachedContent.create(
model="gemini-1.5-flash-001",
system_instruction="You are an expert at analyzing transcripts.",
contents=[document],
)
# Generate response
apollo_model = genai.GenerativeModel.from_cached_content(
cached_content=apollo_cache
)
response = apollo_model.generate_content("Find a lighthearted moment from this transcript")
import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server";
import { GoogleGenerativeAI } from "@google/generative-ai";
const cacheManager = new GoogleAICacheManager("GOOGLE_API_KEY");
const fileManager = new GoogleAIFileManager("GOOGLE_API_KEY");
const uploadResult = await fileManager.uploadFile("path/to/a11.txt", {
mimeType: "text/plain",
});
const cacheResult = await cacheManager.create({
model: "models/gemini-1.5-flash",
contents: [
{
role: "user",
parts: [
{
fileData: {
fileUri: uploadResult.file.uri,
mimeType: uploadResult.file.mimeType,
},
},
],
},
],
});
console.log(cacheResult);
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModelFromCachedContent(cacheResult);
const result = await model.generateContent(
"Please summarize this transcript.",
);
console.log(result.response.text());
بعد از
import requests
import pathlib
from google import genai
from google.genai import types
client = genai.Client()
# Check which models support caching.
for m in client.models.list():
for action in m.supported_actions:
if action == "createCachedContent":
print(m.name)
break
# Download file
response = requests.get(
'https://storage.googleapis.com/generativeai-downloads/data/a11.txt')
pathlib.Path('a11.txt').write_text(response.text)
# Upload file
document = client.files.upload(file='a11.txt')
# Create cache
model='gemini-1.5-flash-001'
apollo_cache = client.caches.create(
model=model,
config={
'contents': [document],
'system_instruction': 'You are an expert at analyzing transcripts.',
},
)
# Generate response
response = client.models.generate_content(
model=model,
contents='Find a lighthearted moment from this transcript',
config=types.GenerateContentConfig(
cached_content=apollo_cache.name,
)
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const filePath = path.join(media, "a11.txt");
const document = await ai.files.upload({
file: filePath,
config: { mimeType: "text/plain" },
});
console.log("Uploaded file name:", document.name);
const modelName = "gemini-1.5-flash";
const contents = [
createUserContent(createPartFromUri(document.uri, document.mimeType)),
];
const cache = await ai.caches.create({
model: modelName,
config: {
contents: contents,
systemInstruction: "You are an expert analyzing transcripts.",
},
});
console.log("Cache created:", cache);
const response = await ai.models.generateContent({
model: modelName,
contents: "Please summarize this transcript",
config: { cachedContent: cache.name },
});
console.log("Response text:", response.text);
توکن ها را بشمار
تعداد توکن های یک درخواست را بشمارید.
قبل از
import google.generativeai as genai
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.count_tokens(
'The quick brown fox jumps over the lazy dog.')
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY+);
const model = genAI.getGenerativeModel({
model: "gemini-1.5-flash",
});
// Count tokens in a prompt without calling text generation.
const countResult = await model.countTokens(
"The quick brown fox jumps over the lazy dog.",
);
console.log(countResult.totalTokens); // 11
const generateResult = await model.generateContent(
"The quick brown fox jumps over the lazy dog.",
);
// On the response for `generateContent`, use `usageMetadata`
// to get separate input and output token counts
// (`promptTokenCount` and `candidatesTokenCount`, respectively),
// as well as the combined token count (`totalTokenCount`).
console.log(generateResult.response.usageMetadata);
// candidatesTokenCount and totalTokenCount depend on response, may vary
// { promptTokenCount: 11, candidatesTokenCount: 124, totalTokenCount: 135 }
بعد از
from google import genai
client = genai.Client()
response = client.models.count_tokens(
model='gemini-2.0-flash',
contents='The quick brown fox jumps over the lazy dog.',
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const prompt = "The quick brown fox jumps over the lazy dog.";
const countTokensResponse = await ai.models.countTokens({
model: "gemini-2.0-flash",
contents: prompt,
});
console.log(countTokensResponse.totalTokens);
const generateResponse = await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: prompt,
});
console.log(generateResponse.usageMetadata);
تولید تصاویر
تولید تصاویر:
قبل از
#pip install https://github.com/google-gemini/generative-ai-python@imagen
import google.generativeai as genai
imagen = genai.ImageGenerationModel(
"imagen-3.0-generate-001")
gen_images = imagen.generate_images(
prompt="Robot holding a red skateboard",
number_of_images=1,
safety_filter_level="block_low_and_above",
person_generation="allow_adult",
aspect_ratio="3:4",
)
بعد از
from google import genai
client = genai.Client()
gen_images = client.models.generate_images(
model='imagen-3.0-generate-001',
prompt='Robot holding a red skateboard',
config=types.GenerateImagesConfig(
number_of_images= 1,
safety_filter_level= "BLOCK_LOW_AND_ABOVE",
person_generation= "ALLOW_ADULT",
aspect_ratio= "3:4",
)
)
for n, image in enumerate(gen_images.generated_images):
pathlib.Path(f'{n}.png').write_bytes(
image.image.image_bytes)
جاسازی محتوا
جاسازی محتوا را ایجاد کنید.
قبل از
import google.generativeai as genai
response = genai.embed_content(
model='models/text-embedding-004',
content='Hello world'
)
import { GoogleGenerativeAI } from "@google/generative-ai";
const genAI = new GoogleGenerativeAI("GOOGLE_API_KEY");
const model = genAI.getGenerativeModel({
model: "text-embedding-004",
});
const result = await model.embedContent("Hello world!");
console.log(result.embedding);
بعد از
from google import genai
client = genai.Client()
response = client.models.embed_content(
model='text-embedding-004',
contents='Hello world',
)
import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: "GOOGLE_API_KEY" });
const text = "Hello World!";
const result = await ai.models.embedContent({
model: "text-embedding-004",
contents: text,
config: { outputDimensionality: 10 },
});
console.log(result.embeddings);
یک مدل را تنظیم کنید
یک مدل تنظیم شده ایجاد و استفاده کنید.
SDK جدید تنظیم را با client.tunings.tune
ساده میکند، که کار تنظیم را راهاندازی میکند و تا پایان کار نظرسنجی میکند.
قبل از
import google.generativeai as genai
import random
# create tuning model
train_data = {}
for i in range(1, 6):
key = f'input {i}'
value = f'output {i}'
train_data[key] = value
name = f'generate-num-{random.randint(0,10000)}'
operation = genai.create_tuned_model(
source_model='models/gemini-1.5-flash-001-tuning',
training_data=train_data,
id = name,
epoch_count = 5,
batch_size=4,
learning_rate=0.001,
)
# wait for tuning complete
tuningProgress = operation.result()
# generate content with the tuned model
model = genai.GenerativeModel(model_name=f'tunedModels/{name}')
response = model.generate_content('55')
بعد از
from google import genai
from google.genai import types
client = genai.Client()
# Check which models are available for tuning.
for m in client.models.list():
for action in m.supported_actions:
if action == "createTunedModel":
print(m.name)
break
# create tuning model
training_dataset=types.TuningDataset(
examples=[
types.TuningExample(
text_input=f'input {i}',
output=f'output {i}',
)
for i in range(5)
],
)
tuning_job = client.tunings.tune(
base_model='models/gemini-1.5-flash-001-tuning',
training_dataset=training_dataset,
config=types.CreateTuningJobConfig(
epoch_count= 5,
batch_size=4,
learning_rate=0.001,
tuned_model_display_name="test tuned model"
)
)
# generate content with the tuned model
response = client.models.generate_content(
model=tuning_job.tuned_model.model,
contents='55',
)