Counting tokens

ดูคําแนะนําโดยละเอียดเกี่ยวกับการนับโทเค็นโดยใช้ Gemini API รวมถึงวิธีนับรูปภาพ เสียง และวิดีโอได้ที่คําแนะนําการนับโทเค็นและสูตรอาหารที่มาพร้อมกับคําแนะนํา

เมธอด: models.countTokens

เรียกใช้ตัวแยกวิเคราะห์ของโมเดลในอินพุต Content และแสดงผลจํานวนโทเค็น ดูข้อมูลเพิ่มเติมเกี่ยวกับโทเค็นได้ในคู่มือโทเค็น

ปลายทาง

โพสต์ https://generativelanguage.googleapis.com/v1beta/{model=models/*}:countTokens

พารามิเตอร์เส้นทาง

model string

ต้องระบุ ชื่อทรัพยากรของโมเดล ซึ่งจะเป็นรหัสสําหรับโมเดลที่จะใช้

ชื่อนี้ควรตรงกับชื่อโมเดลที่แสดงผลโดยเมธอด models.list

รูปแบบ: models/{model} อยู่ในรูปแบบ models/{model}

เนื้อหาของคำขอ

เนื้อความของคำขอมีข้อมูลซึ่งมีโครงสร้างดังต่อไปนี้

ฟิลด์
contents[] object (Content)

ไม่บังคับ อินพุตที่ส่งไปยังโมเดลเป็นพรอมต์ ระบบจะไม่สนใจฟิลด์นี้เมื่อตั้งค่าเป็น generateContentRequest

generateContentRequest object (GenerateContentRequest)

ไม่บังคับ อินพุตโดยรวมที่ส่งไปยัง Model ซึ่งรวมถึงพรอมต์และข้อมูลอื่นๆ เกี่ยวกับการกำหนดทิศทางของโมเดล เช่น คำสั่งของระบบ และ/หรือการประกาศฟังก์ชันสำหรับการเรียกใช้ฟังก์ชัน Model/Content และ generateContentRequest จะใช้ร่วมกันไม่ได้ คุณสามารถส่ง Model + Content หรือ generateContentRequest เท่านั้น แต่จะส่งทั้ง 2 อย่างไม่ได้

ตัวอย่างคำขอ

from google import genai

client = genai.Client()
prompt = "The quick brown fox jumps over the lazy dog."

# Count tokens using the new client method.
total_tokens = client.models.count_tokens(
    model="gemini-2.0-flash", contents=prompt
)
print("total_tokens: ", total_tokens)
# ( e.g., total_tokens: 10 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=prompt
)

# The usage_metadata provides detailed token counts.
print(response.usage_metadata)
# ( e.g., prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "The quick brown fox jumps over the lazy dog.";
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "The quick brown fox jumps over the lazy dog"

// Call CountTokens to get the input token count (`total tokens`).
tokResp, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 10 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

// On the response for GenerateContent, use UsageMetadata to get
// separate input and output token counts (PromptTokenCount and
// CandidatesTokenCount, respectively), as well as the combined
// token count (TotalTokenCount).
fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 10, candidates_token_count: 38, total_token_count: 48 )
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[{
          "text": "The quick brown fox jumps over the lazy dog."
          }],
        }],
      }'
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

// For text-only input
val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

let prompt = "Write a story about a magic backpack."

let response = try await generativeModel.countTokens(prompt)

print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'The quick brown fox jumps over the lazy dog.';
final tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent =
    new Content.Builder().addText("Write a story about a magic backpack.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
from google.genai import types

client = genai.Client()

chat = client.chats.create(
    model="gemini-2.0-flash",
    history=[
        types.Content(
            role="user", parts=[types.Part(text="Hi my name is Bob")]
        ),
        types.Content(role="model", parts=[types.Part(text="Hi Bob!")]),
    ],
)
# Count tokens for the chat history.
print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=chat.get_history()
    )
)
# ( e.g., total_tokens: 10 )

response = chat.send_message(
    message="In one sentence, explain how a computer works to a young child."
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )

# You can count tokens for the combined history and a new message.
extra = types.UserContent(
    parts=[
        types.Part(
            text="What is the meaning of life?",
        )
    ]
)
history = chat.get_history()
history.append(extra)
print(client.models.count_tokens(model="gemini-2.0-flash", contents=history))
# ( e.g., total_tokens: 56 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
// Initial chat history.
const history = [
  { role: "user", parts: [{ text: "Hi my name is Bob" }] },
  { role: "model", parts: [{ text: "Hi Bob!" }] },
];
const chat = ai.chats.create({
  model: "gemini-2.0-flash",
  history: history,
});

// Count tokens for the current chat history.
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: chat.getHistory(),
});
console.log(countTokensResponse.totalTokens);

const chatResponse = await chat.sendMessage({
  message: "In one sentence, explain how a computer works to a young child.",
});
console.log(chatResponse.usageMetadata);

// Add an extra user message to the history.
const extraMessage = {
  role: "user",
  parts: [{ text: "What is the meaning of life?" }],
};
const combinedHistory = chat.getHistory();
combinedHistory.push(extraMessage);
const combinedCountTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: combinedHistory,
});
console.log(
  "Combined history token count:",
  combinedCountTokensResponse.totalTokens,
);
model := client.GenerativeModel("gemini-1.5-flash")
cs := model.StartChat()

cs.History = []*genai.Content{
	{
		Parts: []genai.Part{
			genai.Text("Hi my name is Bob"),
		},
		Role: "user",
	},
	{
		Parts: []genai.Part{
			genai.Text("Hi Bob!"),
		},
		Role: "model",
	},
}

prompt := "Explain how a computer works to a young child."
resp, err := cs.SendMessage(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

// On the response for SendMessage, use `UsageMetadata` to get
// separate input and output token counts
// (`prompt_token_count` and `candidates_token_count`, respectively),
// as well as the combined token count (`total_token_count`).
fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [
        {"role": "user",
        "parts": [{"text": "Hi, my name is Bob."}],
        },
        {"role": "model",
         "parts":[{"text": "Hi Bob"}],
        },
      ],
      }'
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

val chat =
    generativeModel.startChat(
        history =
            listOf(
                content(role = "user") { text("Hello, I have 2 dogs in my house.") },
                content(role = "model") {
                  text("Great to meet you. What would you like to know?")
                }))

val history = chat.history
val messageContent = content { text("This is the message I intend to send") }
val (totalTokens) = generativeModel.countTokens(*history.toTypedArray(), messageContent)
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

// Optionally specify existing chat history
let history = [
  ModelContent(role: "user", parts: "Hello, I have 2 dogs in my house."),
  ModelContent(role: "model", parts: "Great to meet you. What would you like to know?"),
]

// Initialize the chat with optional chat history
let chat = generativeModel.startChat(history: history)

let response = try await generativeModel.countTokens(chat.history + [
  ModelContent(role: "user", parts: "This is the message I intend to send"),
])
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final chat = model.startChat(history: [
  Content.text('Hi my name is Bob'),
  Content.model([TextPart('Hi Bob!')])
]);
var tokenCount = await model.countTokens(chat.history);
print('Total tokens: ${tokenCount.totalTokens}');

final response = await chat.sendMessage(Content.text(
    'In one sentence, explain how a computer works to a young child.'));
if (response.usageMetadata case final usage?) {
  print('Prompt: ${usage.promptTokenCount}, '
      'Candidates: ${usage.candidatesTokenCount}, '
      'Total: ${usage.totalTokenCount}');
}

tokenCount = await model.countTokens(
    [...chat.history, Content.text('What is the meaning of life?')]);
print('Total tokens: ${tokenCount.totalTokens}');
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

// (optional) Create previous chat history for context
Content.Builder userContentBuilder = new Content.Builder();
userContentBuilder.setRole("user");
userContentBuilder.addText("Hello, I have 2 dogs in my house.");
Content userContent = userContentBuilder.build();

Content.Builder modelContentBuilder = new Content.Builder();
modelContentBuilder.setRole("model");
modelContentBuilder.addText("Great to meet you. What would you like to know?");
Content modelContent = userContentBuilder.build();

List<Content> history = Arrays.asList(userContent, modelContent);

// Initialize the chat
ChatFutures chat = model.startChat(history);

Content messageContent =
    new Content.Builder().addText("This is the message I intend to send").build();

Collections.addAll(history, messageContent);

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

ListenableFuture<CountTokensResponse> countTokensResponse =
    model.countTokens(history.toArray(new Content[0]));
Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        System.out.println(result);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
import PIL.Image

client = genai.Client()
prompt = "Tell me about this image"
your_image_file = PIL.Image.open(media / "organ.jpg")

# Count tokens for combined text and inline image.
print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=[prompt, your_image_file]
    )
)
# ( e.g., total_tokens: 263 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=[prompt, your_image_file]
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "Tell me about this image";
const imageBuffer = fs.readFileSync(path.join(media, "organ.jpg"));

// Convert buffer to base64 string.
const imageBase64 = imageBuffer.toString("base64");

// Build contents using createUserContent and createPartFromBase64.
const contents = createUserContent([
  prompt,
  createPartFromBase64(imageBase64, "image/jpeg"),
]);

const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: contents,
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: contents,
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "Tell me about this image"
imageFile, err := os.ReadFile(filepath.Join(testDataDir, "personWorkingOnComputer.jpg"))
if err != nil {
	log.Fatal(err)
}
// Call `CountTokens` to get the input token count
// of the combined text and file (`total_tokens`).
// An image's display or file size does not affect its token count.
// Optionally, you can call `count_tokens` for the text and file separately.
tokResp, err := model.CountTokens(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 264 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile))
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 )
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY" \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[
            {"text": "Tell me about this instrument"},
            {
              "inline_data": {
                "mime_type":"image/jpeg",
                "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
              }
            }
        ]
        }]
       }' 2> /dev/null
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

val image1: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image1)
val image2: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image2)

val multiModalContent = content {
  image(image1)
  image(image2)
  text("What's the difference between these pictures?")
}

val (totalTokens) = generativeModel.countTokens(multiModalContent)
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

guard let image1 = UIImage(systemName: "cloud.sun") else { fatalError() }
guard let image2 = UIImage(systemName: "cloud.heavyrain") else { fatalError() }

let prompt = "What's the difference between these pictures?"

let response = try await generativeModel.countTokens(image1, image2, prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);

Future<DataPart> fileToPart(String mimeType, String path) async {
  return DataPart(mimeType, await File(path).readAsBytes());
}

final prompt = 'Tell me about this image';
final image = await fileToPart('image/jpeg', 'resources/organ.jpg');
final content = Content.multi([TextPart(prompt), image]);

// An image's display size does not affet its token count.
// Optionally, you can call `countTokens` for the prompt and file separately.
final tokenCount = await model.countTokens([content]);
print('Total tokens: ${tokenCount.totalTokens}');

final response = await model.generateContent([content]);
if (response.usageMetadata case final usage?) {
  print('Prompt: ${usage.promptTokenCount}, '
      'Candidates: ${usage.candidatesTokenCount}, '
      'Total: ${usage.totalTokenCount}');
}
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);
Content text = new Content.Builder().addText("Write a story about a magic backpack.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-and-image input
Bitmap image1 = BitmapFactory.decodeResource(context.getResources(), R.drawable.image1);
Bitmap image2 = BitmapFactory.decodeResource(context.getResources(), R.drawable.image2);

Content multiModalContent =
    new Content.Builder()
        .addImage(image1)
        .addImage(image2)
        .addText("What's different between these pictures?")
        .build();

ListenableFuture<CountTokensResponse> countTokensResponse =
    model.countTokens(multiModalContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
PythonNode.jsGoเปลือกหอย
from google import genai
import time

client = genai.Client()
prompt = "Tell me about this video"
your_file = client.files.upload(file=media / "Big_Buck_Bunny.mp4")

# Wait for the video to be processed.
while your_file.state.name == "PROCESSING":
    print("processing video...")
    time.sleep(5)
    your_file = client.files.get(name=your_file.name)

print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=[prompt, your_file]
    )
)
# ( e.g., total_tokens: 300 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=[prompt, your_file]
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "Tell me about this video";
let videoFile = await ai.files.upload({
  file: path.join(media, "Big_Buck_Bunny.mp4"),
  config: { mimeType: "video/mp4" },
});

// Poll until the video file is completely processed (state becomes ACTIVE).
while (!videoFile.state || videoFile.state.toString() !== "ACTIVE") {
  console.log("Processing video...");
  console.log("File state: ", videoFile.state);
  await sleep(5000);
  videoFile = await ai.files.get({ name: videoFile.name });
}

const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: createUserContent([
    prompt,
    createPartFromUri(videoFile.uri, videoFile.mimeType),
  ]),
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: createUserContent([
    prompt,
    createPartFromUri(videoFile.uri, videoFile.mimeType),
  ]),
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "Tell me about this video"
file, err := client.UploadFileFromPath(ctx, filepath.Join(testDataDir, "earth.mp4"), nil)
if err != nil {
	log.Fatal(err)
}
defer client.DeleteFile(ctx, file.Name)

fd := genai.FileData{URI: file.URI}
// Call `CountTokens` to get the input token count
// of the combined text and file (`total_tokens`).
// A video or audio file is converted to tokens at a fixed rate of tokens per
// second.
// Optionally, you can call `count_tokens` for the text and file separately.
tokResp, err := model.CountTokens(ctx, genai.Text(prompt), fd)
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 1481 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt), fd)
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 1481, candidates_token_count: 43, total_token_count: 1524 )

MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
DISPLAY_NAME=VIDEO_PATH

# Initial resumable request defining metadata.
# The upload url is in the response headers dump them to a file.
curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
  -D upload-header.tmp \
  -H "X-Goog-Upload-Protocol: resumable" \
  -H "X-Goog-Upload-Command: start" \
  -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
  -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
  -H "Content-Type: application/json" \
  -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null

upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
rm "${tmp_header_file}"

# Upload the actual bytes.
curl "${upload_url}" \
  -H "Content-Length: ${NUM_BYTES}" \
  -H "X-Goog-Upload-Offset: 0" \
  -H "X-Goog-Upload-Command: upload, finalize" \
  --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json

file_uri=$(jq ".file.uri" file_info.json)

state=$(jq ".file.state" file_info.json)

name=$(jq ".file.name" file_info.json)

while [[ "($state)" = *"PROCESSING"* ]];
do
  echo "Processing video..."
  sleep 5
  # Get the file of interest to check state
  curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
  state=$(jq ".file.state" file_info.json)
done

curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[
          {"text": "Describe this video clip"},
          {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
        }]
       }'
Python
from google import genai

client = genai.Client()
sample_pdf = client.files.upload(file=media / "test.pdf")
token_count = client.models.count_tokens(
    model="gemini-2.0-flash",
    contents=["Give me a summary of this document.", sample_pdf],
)
print(f"{token_count=}")

response = client.models.generate_content(
    model="gemini-2.0-flash",
    contents=["Give me a summary of this document.", sample_pdf],
)
print(response.usage_metadata)
PythonNode.jsGo
from google import genai
from google.genai import types
import time

client = genai.Client()
your_file = client.files.upload(file=media / "a11.txt")

cache = client.caches.create(
    model="gemini-1.5-flash-001",
    config={
        "contents": ["Here the Apollo 11 transcript:", your_file],
        "system_instruction": None,
        "tools": None,
    },
)

# Create a prompt.
prompt = "Please give a short summary of this file."

# Count tokens for the prompt (the cached content is not passed here).
print(client.models.count_tokens(model="gemini-2.0-flash", contents=prompt))
# ( e.g., total_tokens: 9 )

response = client.models.generate_content(
    model="gemini-1.5-flash-001",
    contents=prompt,
    config=types.GenerateContentConfig(
        cached_content=cache.name,
    ),
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: ..., cached_content_token_count: ..., candidates_token_count: ... )
client.caches.delete(name=cache.name)
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const textFile = await ai.files.upload({
    file: path.join(media, "a11.txt"),
    config: { mimeType: "text/plain" },
  });

  const cache = await ai.caches.create({
    model: "gemini-1.5-flash-001",
    config: {
      contents: createUserContent([
        "Here the Apollo 11 transcript:",
        createPartFromUri(textFile.uri, textFile.mimeType),
      ]),
      system_instruction: null,
      tools: null,
    },
  });

  const prompt = "Please give a short summary of this file.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(countTokensResponse.totalTokens);

  const generateResponse = await ai.models.generateContent({
    model: "gemini-1.5-flash-001",
    contents: prompt,
    config: { cachedContent: cache.name },
  });
  console.log(generateResponse.usageMetadata);

  await ai.caches.delete({ name: cache.name });
  return {
    totalTokens: countTokensResponse.totalTokens,
    usage: generateResponse.usageMetadata,
  };
}

// Error: systemInstruction parameter is not supported in Gemini API.
export async function tokensSystemInstruction() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt = "The quick brown fox jumps over the lazy dog.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(
    "base_count (no system instruction):",
    countTokensResponse.totalTokens,
  );

  // Uncomment if the API gets support for systemInstruction in count_tokens.
  /*
  const countTokensWithSysInstResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: {systemInstruction: 'You are a cat. Your name is Neko.'},
  });
  console.log(
    "total_tokens (with system instruction):",
    countTokensWithSysInstResponse.totalTokens,
  );
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for systemInstruction in count_tokens.
    // sysInstResponseTokenCount: countTokensWithSysInstResponse.totalTokens,
  };
}

// Error: tools parameter is not supported in Gemini API.
export async function tokensTools() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt =
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

  // Define function declarations for arithmetic operations.
  const addDeclaration = {
    name: "addNumbers",
    parameters: {
      type: "object",
      description: "Return the result of adding two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description:
            "The first parameter which can be an integer or a floating point number.",
        },
        secondParam: {
          type: "number",
          description:
            "The second parameter which can be an integer or a floating point number.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const subtractDeclaration = {
    name: "subtractNumbers",
    parameters: {
      type: "object",
      description:
        "Return the result of subtracting the second number from the first.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const multiplyDeclaration = {
    name: "multiplyNumbers",
    parameters: {
      type: "object",
      description: "Return the product of two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const divideDeclaration = {
    name: "divideNumbers",
    parameters: {
      type: "object",
      description:
        "Return the quotient of dividing the first number by the second.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const tools = {
    functionDeclarations: [
      addDeclaration,
      subtractDeclaration,
      multiplyDeclaration,
      divideDeclaration,
    ],
  };

  // Uncomment if the API gets support for tools in count_tokens.
  /*
  const countTokensWithToolsResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: { tools: tools }
  });
  console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for tools in count_tokens.
    // toolsResponseTokenCount : countTokensWithToolsResponse.totalTokens
  };
}
txt := strings.Repeat("George Washington was the first president of the United States. ", 3000)
argcc := &genai.CachedContent{
	Model:    "gemini-1.5-flash-001",
	Contents: []*genai.Content{genai.NewUserContent(genai.Text(txt))},
}
cc, err := client.CreateCachedContent(ctx, argcc)
if err != nil {
	log.Fatal(err)
}
defer client.DeleteCachedContent(ctx, cc.Name)

modelWithCache := client.GenerativeModelFromCachedContent(cc)
prompt := "Summarize this statement"
tokResp, err := modelWithCache.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 5 )

resp, err := modelWithCache.GenerateContent(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("cached_content_token_count:", resp.UsageMetadata.CachedContentTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 33007,  candidates_token_count: 39, cached_content_token_count: 33002, total_token_count: 33046 )
PythonNode.jsGoKotlinSwiftDartJava
from google import genai
from google.genai import types

client = genai.Client()
prompt = "The quick brown fox jumps over the lazy dog."

base_count = client.models.count_tokens(
    model="gemini-2.0-flash", contents=prompt
)
print("total_tokens (no system instruction):", base_count)
# ( e.g., total_tokens: 10 )

# When using a system instruction, include it in the count tokens config.
# TODO: Uncomment once the API stops failing

# count_with_sys = client.models.count_tokens(
#     model="gemini-2.0-flash",
#     contents=prompt,
#     config=types.CountTokensConfig(system_instruction="You are a cat. Your name is Neko.")
# )
# print("total_tokens (with system instruction):", count_with_sys)
# ( e.g., total_tokens: 21 )
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt = "The quick brown fox jumps over the lazy dog.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(
    "base_count (no system instruction):",
    countTokensResponse.totalTokens,
  );

  // Uncomment if the API gets support for systemInstruction in count_tokens.
  /*
  const countTokensWithSysInstResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: {systemInstruction: 'You are a cat. Your name is Neko.'},
  });
  console.log(
    "total_tokens (with system instruction):",
    countTokensWithSysInstResponse.totalTokens,
  );
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for systemInstruction in count_tokens.
    // sysInstResponseTokenCount: countTokensWithSysInstResponse.totalTokens,
  };
}

// Error: tools parameter is not supported in Gemini API.
export async function tokensTools() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt =
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

  // Define function declarations for arithmetic operations.
  const addDeclaration = {
    name: "addNumbers",
    parameters: {
      type: "object",
      description: "Return the result of adding two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description:
            "The first parameter which can be an integer or a floating point number.",
        },
        secondParam: {
          type: "number",
          description:
            "The second parameter which can be an integer or a floating point number.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const subtractDeclaration = {
    name: "subtractNumbers",
    parameters: {
      type: "object",
      description:
        "Return the result of subtracting the second number from the first.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const multiplyDeclaration = {
    name: "multiplyNumbers",
    parameters: {
      type: "object",
      description: "Return the product of two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const divideDeclaration = {
    name: "divideNumbers",
    parameters: {
      type: "object",
      description:
        "Return the quotient of dividing the first number by the second.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const tools = {
    functionDeclarations: [
      addDeclaration,
      subtractDeclaration,
      multiplyDeclaration,
      divideDeclaration,
    ],
  };

  // Uncomment if the API gets support for tools in count_tokens.
  /*
  const countTokensWithToolsResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: { tools: tools }
  });
  console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for tools in count_tokens.
    // toolsResponseTokenCount : countTokensWithToolsResponse.totalTokens
  };
}
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "The quick brown fox jumps over the lazy dog"

respNoInstruction, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", respNoInstruction.TotalTokens)
// ( total_tokens: 10 )

// The total token count includes everything sent to the GenerateContent
// request. When you use system instructions, the total token
// count increases.
model.SystemInstruction = genai.NewUserContent(genai.Text("You are a cat. Your name is Neko."))
respWithInstruction, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", respWithInstruction.TotalTokens)
// ( total_tokens: 21 )
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey,
        systemInstruction = content(role = "system") { text("You are a cat. Your name is Neko.")}
    )

// For text-only input
val (totalTokens) = generativeModel.countTokens("What is your name?")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a model that supports system instructions, like a Gemini 1.5 model
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default,
    systemInstruction: ModelContent(role: "system", parts: "You are a cat. Your name is Neko.")
  )

let prompt = "What is your name?"

let response = try await generativeModel.countTokens(prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
var model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'The quick brown fox jumps over the lazy dog.';

// The total token count includes everything sent in the `generateContent`
// request.
var tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
  systemInstruction: Content.system('You are a cat. Your name is Neko.'),
);
tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
// Create your system instructions
Content systemInstruction =
    new Content.Builder().addText("You are a cat. Your name is Neko.").build();

// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey,
        /* generationConfig (optional) */ null,
        /* safetySettings (optional) */ null,
        /* requestOptions (optional) */ new RequestOptions(),
        /* tools (optional) */ null,
        /* toolsConfig (optional) */ null,
        /* systemInstruction (optional) */ systemInstruction);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent = new Content.Builder().addText("What's your name?.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
PythonNode.jsKotlinSwiftDartJava
from google import genai
from google.genai import types

client = genai.Client()
prompt = (
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
)
print(client.models.count_tokens(model="gemini-2.0-flash", contents=prompt))
# ( e.g., total_tokens: 22 )

# Define the function declarations for the arithmetic operations
add_function = types.FunctionDeclaration(
    name="add",
    description="Return the sum of a and b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

subtract_function = types.FunctionDeclaration(
    name="subtract",
    description="Return the difference of a and b (a - b)",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

multiply_function = types.FunctionDeclaration(
    name="multiply",
    description="Return the product of a and b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

divide_function = types.FunctionDeclaration(
    name="divide",
    description="Return the quotient of a divided by b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The numerator",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The denominator (must not be zero)",
            ),
        },
        required=["a", "b"],
    ),
)
tools = [
    types.Tool(function_declarations=[add_function]),
    types.Tool(function_declarations=[subtract_function]),
    types.Tool(function_declarations=[multiply_function]),
    types.Tool(function_declarations=[divide_function]),
]

# Count tokens when tools are included. Tools increase the token count.
# TODO: Uncomment once the API stops failing
# tokens_with_tools = client.models.count_tokens(
#     model="gemini-2.0-flash",
#     contents=prompt,
#     config=types.CountTokensConfig(tools=tools)
# )
# print(tokens_with_tools)
# ( e.g., total_tokens: 206 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt =
  "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

// Define function declarations for arithmetic operations.
const addDeclaration = {
  name: "addNumbers",
  parameters: {
    type: "object",
    description: "Return the result of adding two numbers.",
    properties: {
      firstParam: {
        type: "number",
        description:
          "The first parameter which can be an integer or a floating point number.",
      },
      secondParam: {
        type: "number",
        description:
          "The second parameter which can be an integer or a floating point number.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const subtractDeclaration = {
  name: "subtractNumbers",
  parameters: {
    type: "object",
    description:
      "Return the result of subtracting the second number from the first.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const multiplyDeclaration = {
  name: "multiplyNumbers",
  parameters: {
    type: "object",
    description: "Return the product of two numbers.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const divideDeclaration = {
  name: "divideNumbers",
  parameters: {
    type: "object",
    description:
      "Return the quotient of dividing the first number by the second.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const tools = {
  functionDeclarations: [
    addDeclaration,
    subtractDeclaration,
    multiplyDeclaration,
    divideDeclaration,
  ],
};

// Uncomment if the API gets support for tools in count_tokens.
/*
const countTokensWithToolsResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
  config: { tools: tools }
});
console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
*/
val multiplyDefinition = defineFunction(
    name = "multiply",
    description = "returns the product of the provided numbers.",
    parameters = listOf(
        Schema.double("a", "First number"),
        Schema.double("b", "Second number")
    )
)
val usableFunctions = listOf(multiplyDefinition)

val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey,
        tools = listOf(Tool(usableFunctions))
    )

// For text-only input
val (totalTokens) = generativeModel.countTokens("What's the product of 9 and 358?")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a model that supports system instructions, like a Gemini 1.5 model
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default,
    tools: [Tool(functionDeclarations: [
      FunctionDeclaration(
        name: "controlLight",
        description: "Set the brightness and color temperature of a room light.",
        parameters: [
          "brightness": Schema(
            type: .number,
            format: "double",
            description: "Light level from 0 to 100. Zero is off and 100 is full brightness."
          ),
          "colorTemperature": Schema(
            type: .string,
            format: "enum",
            description: "Color temperature of the light fixture.",
            enumValues: ["daylight", "cool", "warm"]
          ),
        ],
        requiredParameters: ["brightness", "colorTemperature"]
      ),
    ])]
  )

let prompt = "Dim the lights so the room feels cozy and warm."

let response = try await generativeModel.countTokens(prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
var model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'I have 57 cats, each owns 44 mittens, '
    'how many mittens is that in total?';

// The total token count includes everything sent in the `generateContent`
// request.
var tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
final binaryFunction = Schema.object(
  properties: {
    'a': Schema.number(nullable: false),
    'b': Schema.number(nullable: false)
  },
  requiredProperties: ['a', 'b'],
);

model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
  tools: [
    Tool(functionDeclarations: [
      FunctionDeclaration('add', 'returns a + b', binaryFunction),
      FunctionDeclaration('subtract', 'returns a - b', binaryFunction),
      FunctionDeclaration('multipley', 'returns a * b', binaryFunction),
      FunctionDeclaration('divide', 'returns a / b', binaryFunction)
    ])
  ],
);
tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
FunctionDeclaration multiplyDefinition =
    defineFunction(
        /* name  */ "multiply",
        /* description */ "returns a * b.",
        /* parameters */ Arrays.asList(
            Schema.numDouble("a", "First parameter"),
            Schema.numDouble("b", "Second parameter")),
        /* required */ Arrays.asList("a", "b"));

Tool tool = new Tool(Arrays.asList(multiplyDefinition), null);
;

// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey,
        /* generationConfig (optional) */ null,
        /* safetySettings (optional) */ null,
        /* requestOptions (optional) */ new RequestOptions(),
        /* tools (optional) */ Arrays.asList(tool));
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent = new Content.Builder().addText("What's your name?.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);

เนื้อหาการตอบกลับ

การตอบกลับจาก models.countTokens

โดยจะแสดง tokenCount ของโมเดลสำหรับ prompt

หากทำสำเร็จ เนื้อหาการตอบกลับจะมีข้อมูลซึ่งมีโครงสร้างดังต่อไปนี้

ฟิลด์
totalTokens integer

จํานวนโทเค็นที่ Model แยก prompt ไม่เป็นลบเสมอ

cachedContentTokenCount integer

จํานวนโทเค็นในส่วนที่แคชไว้ของพรอมต์ (เนื้อหาที่แคชไว้)

promptTokensDetails[] object (ModalityTokenCount)

เอาต์พุตเท่านั้น รายการรูปแบบที่ประมวลผลในอินพุตคำขอ

cacheTokensDetails[] object (ModalityTokenCount)

เอาต์พุตเท่านั้น รายการรูปแบบที่ประมวลผลในเนื้อหาที่แคชไว้

การแสดง JSON
{
  "totalTokens": integer,
  "cachedContentTokenCount": integer,
  "promptTokensDetails": [
    {
      object (ModalityTokenCount)
    }
  ],
  "cacheTokensDetails": [
    {
      object (ModalityTokenCount)
    }
  ]
}

GenerateContentRequest

คำขอสร้างการทํางานเสร็จสมบูรณ์จากโมเดล

ฟิลด์
model string

ต้องระบุ ชื่อของ Model ที่จะใช้สําหรับสร้างการทํางานให้เสร็จสมบูรณ์

รูปแบบ: models/{model}

contents[] object (Content)

ต้องระบุ เนื้อหาของการสนทนาปัจจุบันกับโมเดล

สําหรับการค้นหาแบบเทิร์นเดียว อินสแตนซ์นี้จะเป็นแบบเดี่ยว สําหรับคําค้นหาแบบหลายรอบ เช่น chat ช่องนี้จะซ้ำกันซึ่งมีประวัติการสนทนาและคําขอล่าสุด

tools[] object (Tool)

ไม่บังคับ รายการ Tools ที่ Model อาจใช้เพื่อสร้างคำตอบถัดไป

Tool คือโค้ดที่ช่วยให้ระบบโต้ตอบกับระบบภายนอกเพื่อดำเนินการหรือชุดการดำเนินการที่อยู่นอกเหนือความรู้และขอบเขตของ Model Tool ที่รองรับคือ Function และ codeExecution ดูข้อมูลเพิ่มเติมได้ที่คู่มือการเรียกใช้ฟังก์ชันและการเรียกใช้โค้ด

toolConfig object (ToolConfig)

ไม่บังคับ การกําหนดค่าเครื่องมือสําหรับ Tool ที่ระบุไว้ในคําขอ ดูตัวอย่างการใช้งานได้ในคู่มือการเรียกใช้ฟังก์ชัน

safetySettings[] object (SafetySetting)

ไม่บังคับ รายการอินสแตนซ์ SafetySetting ที่ไม่ซ้ำกันสําหรับการบล็อกเนื้อหาที่ไม่ปลอดภัย

ซึ่งจะมีผลกับ GenerateContentRequest.contents และ GenerateContentResponse.candidates การตั้งค่าสำหรับ SafetyCategory แต่ละประเภทไม่ควรมีมากกว่า 1 รายการ API จะบล็อกเนื้อหาและการตอบกลับที่ไม่เป็นไปตามเกณฑ์ที่กำหนดโดยการตั้งค่าเหล่านี้ รายการนี้จะลบล้างการตั้งค่าเริ่มต้นสำหรับ SafetyCategory แต่ละรายการที่ระบุไว้ใน safetySettings หากไม่มี SafetySetting สำหรับ SafetyCategory ที่ระบุในรายการ API จะใช้การตั้งค่าความปลอดภัยเริ่มต้นสำหรับหมวดหมู่นั้น ระบบรองรับหมวดหมู่อันตราย HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY ดูข้อมูลโดยละเอียดเกี่ยวกับการตั้งค่าความปลอดภัยที่ใช้ได้ได้จากคู่มือ นอกจากนี้ โปรดดูคำแนะนำด้านความปลอดภัยเพื่อดูวิธีนำข้อควรพิจารณาด้านความปลอดภัยมาใช้ในการประยุกต์ใช้ AI

systemInstruction object (Content)

ไม่บังคับ คำสั่งของระบบที่นักพัฒนาแอปตั้งค่าไว้ ปัจจุบันมีเฉพาะข้อความเท่านั้น

generationConfig object (GenerationConfig)

ไม่บังคับ ตัวเลือกการกําหนดค่าสําหรับการสร้างโมเดลและเอาต์พุต

cachedContent string

ไม่บังคับ ชื่อของเนื้อหาที่แคชไว้เพื่อใช้เป็นบริบทในการแสดงการคาดคะเน รูปแบบ: cachedContents/{cachedContent}

การแสดง JSON
{
  "model": string,
  "contents": [
    {
      object (Content)
    }
  ],
  "tools": [
    {
      object (Tool)
    }
  ],
  "toolConfig": {
    object (ToolConfig)
  },
  "safetySettings": [
    {
      object (SafetySetting)
    }
  ],
  "systemInstruction": {
    object (Content)
  },
  "generationConfig": {
    object (GenerationConfig)
  },
  "cachedContent": string
}