OpenAI compatibility

Gemini models are accessible using the OpenAI libraries (Python and TypeScript / Javascript) along with the REST API, by updating three lines of code and using your Gemini API key. If you aren't already using the OpenAI libraries, we recommend that you call the Gemini API directly.

Python

from openai import OpenAI

client = OpenAI(
    api_key="gemini_api_key",
    base_url="https://generativelanguage.googleapis.com/v1beta/"
)

response = client.chat.completions.create(
    model="gemini-1.5-flash",
    n=1,
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {
            "role": "user",
            "content": "Explain to me how AI works"
        }
    ]
)

print(response.choices[0].message)

Node.js

import OpenAI from "openai";

const openai = new OpenAI({
    apiKey: "gemini_api_key",
    baseURL: "https://generativelanguage.googleapis.com/v1beta/"
});

const response = await openai.chat.completions.create({
    model: "gemini-1.5-flash",
    messages: [
        { role: "system", content: "You are a helpful assistant." },
        {
            role: "user",
            content: "Explain to me how AI works",
        },
    ],
});

console.log(response.choices[0].message);

REST

curl "https://generativelanguage.googleapis.com/v1beta/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer gemini_api_key" \
-d '{
    "model": "gemini-1.5-flash",
    "messages": [
        {"role": "user", "content": "Explain to me how AI works"}
    ]
    }'

Streaming

The Gemini API supports streaming responses.

Python

from openai import OpenAI

client = OpenAI(
    api_key="gemini_api_key",
    base_url="https://generativelanguage.googleapis.com/v1beta/"
)

response = client.chat.completions.create(
  model="gemini-1.5-flash",
  messages=[
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello!"}
  ],
  stream=True
)

for chunk in response:
    print(chunk.choices[0].delta)

Node.js

import OpenAI from "openai";

const openai = new OpenAI({
    apiKey: "gemini_api_key",
    baseURL: "https://generativelanguage.googleapis.com/v1beta/"
});

async function main() {
  const completion = await openai.chat.completions.create({
    model: "gemini-1.5-flash",
    messages: [
      {"role": "system", "content": "You are a helpful assistant."},
      {"role": "user", "content": "Hello!"}
    ],
    stream: true,
  });

  for await (const chunk of completion) {
    console.log(chunk.choices[0].delta.content);
  }
}

main();

REST

curl "https://generativelanguage.googleapis.com/v1beta//chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer gemini_api_key" \
-d '{
    "model": "gemini-1.5-flash",
    "messages": [
        {"role": "user", "content": "Explain to me how AI works"}
    ],
    "stream": true
  }'

Function calling

Function calling makes it easier for you to get structured data outputs from generative models and is supported in the Gemini API.

Python

from openai import OpenAI

client = OpenAI(
    api_key="gemini_api_key",
    base_url="https://generativelanguage.googleapis.com/v1beta/"
)

tools = [
  {
    "type": "function",
    "function": {
      "name": "get_weather",
      "description": "Get the weather in a given location",
      "parameters": {
        "type": "object",
        "properties": {
          "location": {
            "type": "string",
            "description": "The city and state, e.g. Chicago, IL",
          },
          "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
        },
        "required": ["location"],
      },
    }
  }
]

messages = [{"role": "user", "content": "What's the weather like in Chicago today?"}]
response = client.chat.completions.create(
  model="gemini-1.5-flash",
  messages=messages,
  tools=tools,
  tool_choice="auto"
)

print(response)

Node.js

import OpenAI from "openai";

const openai = new OpenAI({
    apiKey: "gemini_api_key",
    baseURL: "https://generativelanguage.googleapis.com/v1beta/"
});

async function main() {
  const messages = [{"role": "user", "content": "What's the weather like in Chicago today?"}];
  const tools = [
      {
        "type": "function",
        "function": {
          "name": "get_weather",
          "description": "Get the weather in a given location",
          "parameters": {
            "type": "object",
            "properties": {
              "location": {
                "type": "string",
                "description": "The city and state, e.g. Chicago, IL",
              },
              "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
            },
            "required": ["location"],
          },
        }
      }
  ];

  const response = await openai.chat.completions.create({
    model: "gemini-1.5-flash",
    messages: messages,
    tools: tools,
    tool_choice: "auto",
  });

  console.log(response);
}

main();

REST

curl "https://generativelanguage.googleapis.com/v1beta/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer gemini_api_key" \
-d '{
  "model": "gemini-1.5-flash",
  "messages": [
    {
      "role": "user",
      "content": "What'\''s the weather like in Chicago today?"
    }
  ],
  "tools": [
    {
      "type": "function",
      "function": {
        "name": "get_weather",
        "description": "Get the current weather in a given location",
        "parameters": {
          "type": "object",
          "properties": {
            "location": {
              "type": "string",
              "description": "The city and state, e.g. Chicago, IL"
            },
            "unit": {
              "type": "string",
              "enum": ["celsius", "fahrenheit"]
            }
          },
          "required": ["location"]
        }
      }
    }
  ],
  "tool_choice": "auto"
}'

Embeddings

Text embeddings measure the relatedness of text strings and can be generated using the the Gemini API.

Python

from openai import OpenAI

client = OpenAI(
    api_key="gemini_api_key",
    base_url="https://generativelanguage.googleapis.com/v1beta/"
)

response = client.embeddings.create(
    input="Your text string goes here",
    model="text-embedding-004"
)

print(response.data[0].embedding)

Node.js

import OpenAI from "openai";

const openai = new OpenAI({
    apiKey: "gemini_api_key",
    baseURL: "https://generativelanguage.googleapis.com/v1beta/"
});

async function main() {
  const embedding = await openai.embeddings.create({
    model: "text-embedding-004",
    input: "Your text string goes here",
  });

  console.log(embedding);
}

main();

REST

curl "https://generativelanguage.googleapis.com/v1beta/embeddings" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer gemini_api_key" \
-d '{
    "input": "Your text string goes here",
    "model": "text-embedding-004"
  }'

Current limitations

Support for the OpenAI libraries is still in beta while we extend feature support. The following functionalities are limited:

If you have questions about supported parameters, upcoming features, or run into any issues getting started with Gemini, join our Developer Forum.