জেমিনি মডেলগুলি ওপেনএআই লাইব্রেরিগুলি (পাইথন এবং টাইপস্ক্রিপ্ট / জাভাস্ক্রিপ্ট) REST API সহ, কোডের তিনটি লাইন আপডেট করে এবং আপনার Gemini API কী ব্যবহার করে অ্যাক্সেসযোগ্য। আপনি যদি ইতিমধ্যেই OpenAI লাইব্রেরিগুলি ব্যবহার না করে থাকেন, তাহলে আমরা আপনাকে সরাসরি Gemini API কল করার পরামর্শ দিই৷
পাইথন
from openai import OpenAI
client = OpenAI(
api_key="GEMINI_API_KEY",
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
response = client.chat.completions.create(
model="gemini-1.5-flash",
n=1,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": "Explain to me how AI works"
}
]
)
print(response.choices[0].message)
Node.js
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "GEMINI_API_KEY",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/"
});
const response = await openai.chat.completions.create({
model: "gemini-1.5-flash",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{
role: "user",
content: "Explain to me how AI works",
},
],
});
console.log(response.choices[0].message);
বিশ্রাম
curl "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer GEMINI_API_KEY" \
-d '{
"model": "gemini-1.5-flash",
"messages": [
{"role": "user", "content": "Explain to me how AI works"}
]
}'
স্ট্রিমিং
Gemini API স্ট্রিমিং প্রতিক্রিয়া সমর্থন করে।
পাইথন
from openai import OpenAI
client = OpenAI(
api_key="GEMINI_API_KEY",
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
],
stream=True
)
for chunk in response:
print(chunk.choices[0].delta)
Node.js
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "GEMINI_API_KEY",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/"
});
async function main() {
const completion = await openai.chat.completions.create({
model: "gemini-1.5-flash",
messages: [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
],
stream: true,
});
for await (const chunk of completion) {
console.log(chunk.choices[0].delta.content);
}
}
main();
বিশ্রাম
curl "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer GEMINI_API_KEY" \
-d '{
"model": "gemini-1.5-flash",
"messages": [
{"role": "user", "content": "Explain to me how AI works"}
],
"stream": true
}'
ফাংশন কলিং
ফাংশন কলিং আপনার জন্য জেনারেটিভ মডেল থেকে স্ট্রাকচার্ড ডেটা আউটপুট পেতে সহজ করে তোলে এবং জেমিনি API-তে সমর্থিত ।
পাইথন
from openai import OpenAI
client = OpenAI(
api_key="GEMINI_API_KEY",
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. Chicago, IL",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
}
]
messages = [{"role": "user", "content": "What's the weather like in Chicago today?"}]
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=messages,
tools=tools,
tool_choice="auto"
)
print(response)
Node.js
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "GEMINI_API_KEY",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/"
});
async function main() {
const messages = [{"role": "user", "content": "What's the weather like in Chicago today?"}];
const tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. Chicago, IL",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
}
];
const response = await openai.chat.completions.create({
model: "gemini-1.5-flash",
messages: messages,
tools: tools,
tool_choice: "auto",
});
console.log(response);
}
main();
বিশ্রাম
curl "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer GEMINI_API_KEY" \
-d '{
"model": "gemini-1.5-flash",
"messages": [
{
"role": "user",
"content": "What'\''s the weather like in Chicago today?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. Chicago, IL"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
],
"tool_choice": "auto"
}'
ইমেজ বোঝার
মিথুন মডেলগুলি স্থানীয়ভাবে মাল্টিমোডাল এবং অনেক সাধারণ দৃষ্টিভঙ্গি কাজের ক্ষেত্রে ক্লাসে সেরা কর্মক্ষমতা প্রদান করে।
পাইথন
import base64
from openai import OpenAI
client = OpenAI(
api_key="GEMINI_API_KEY",
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Getting the base64 string
base64_image = encode_image("Path/to/agi/image.jpeg")
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is in this image?",
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
},
],
}
],
)
print(response.choices[0])
Node.js
import OpenAI from "openai";
import fs from 'fs/promises';
const openai = new OpenAI({
apiKey: "GEMINI_API_KEY",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/"
});
async function encodeImage(imagePath) {
try {
const imageBuffer = await fs.readFile(imagePath);
return imageBuffer.toString('base64');
} catch (error) {
console.error("Error encoding image:", error);
return null;
}
}
async function main() {
const imagePath = "Path/to/agi/image.jpeg";
const base64Image = await encodeImage(imagePath);
const messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is in this image?",
},
{
"type": "image_url",
"image_url": {
"url": `data:image/jpeg;base64,${base64Image}`
},
},
],
}
];
try {
const response = await openai.chat.completions.create({
model: "gemini-1.5-flash",
messages: messages,
});
console.log(response.choices[0]);
} catch (error) {
console.error("Error calling Gemini API:", error);
}
}
main();
বিশ্রাম
bash -c '
base64_image=$(base64 -i "Path/to/agi/image.jpeg");
curl "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer GEMINI_API_KEY" \
-d "{
\"model\": \"gemini-1.5-flash\",
\"messages\": [
{
\"role\": \"user\",
\"content\": [
{ \"type\": \"text\", \"text\": \"What is in this image?\" },
{
\"type\": \"image_url\",
\"image_url\": { \"url\": \"data:image/jpeg;base64,${base64_image}\" }
}
]
}
]
}"
'
এমবেডিং
টেক্সট এম্বেডিং টেক্সট স্ট্রিংগুলির সম্পর্ক পরিমাপ করে এবং জেমিনি API ব্যবহার করে তৈরি করা যেতে পারে।
পাইথন
from openai import OpenAI
client = OpenAI(
api_key="GEMINI_API_KEY",
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
)
response = client.embeddings.create(
input="Your text string goes here",
model="text-embedding-004"
)
print(response.data[0].embedding)
Node.js
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "GEMINI_API_KEY",
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/"
});
async function main() {
const embedding = await openai.embeddings.create({
model: "text-embedding-004",
input: "Your text string goes here",
});
console.log(embedding);
}
main();
বিশ্রাম
curl "https://generativelanguage.googleapis.com/v1beta/openai/embeddings" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer GEMINI_API_KEY" \
-d '{
"input": "Your text string goes here",
"model": "text-embedding-004"
}'
বর্তমান সীমাবদ্ধতা
ওপেনএআই লাইব্রেরিগুলির জন্য সমর্থন এখনও বিটাতে রয়েছে যখন আমরা বৈশিষ্ট্য সমর্থন প্রসারিত করি। নিম্নলিখিত কার্যকারিতা সীমিত:
আপনার যদি সমর্থিত প্যারামিটার, আসন্ন বৈশিষ্ট্যগুলি সম্পর্কে প্রশ্ন থাকে বা Gemini-এর সাথে শুরু করার ক্ষেত্রে কোনো সমস্যায় পড়েন, তাহলে আমাদের ডেভেলপার ফোরামে যোগ দিন।