Counting tokens

ছবি, অডিও এবং ভিডিও কীভাবে গণনা করা হয় তা সহ Gemini API ব্যবহার করে টোকেন গণনা করার একটি বিস্তারিত নির্দেশিকা দেখতে, টোকেন গণনা নির্দেশিকা এবং সাথে থাকা কুকবুক রেসিপি দেখুন।

পদ্ধতি: models.countTokens

ইনপুট Content একটি মডেলের টোকেনাইজার চালায় এবং টোকেন গণনা প্রদান করে। টোকেন সম্পর্কে আরও জানতে টোকেন গাইড পড়ুন।

শেষবিন্দু

পোস্ট https: / /generativelanguage.googleapis.com /v1beta /{model=models /*}:countTokens

পাথ প্যারামিটার

model string

প্রয়োজন। মডেলের সম্পদের নাম। এটি মডেল ব্যবহার করার জন্য একটি আইডি হিসাবে কাজ করে।

এই নামটি models.list পদ্ধতি দ্বারা প্রত্যাবর্তিত একটি মডেল নামের সাথে মিলিত হওয়া উচিত।

বিন্যাস: models/{model} এটি ফর্ম models/{model} লাগে।

শরীরের অনুরোধ

অনুরোধের অংশে নিম্নলিখিত কাঠামো সহ ডেটা রয়েছে:

ক্ষেত্র
contents[] object ( Content )

ঐচ্ছিক। প্রম্পট হিসাবে মডেলে দেওয়া ইনপুট। generateContentRequest সেট করা হলে এই ক্ষেত্রটি উপেক্ষা করা হয়।

generateContentRequest object ( GenerateContentRequest )

ঐচ্ছিক। Model দেওয়া সামগ্রিক ইনপুট। এর মধ্যে রয়েছে প্রম্পটের পাশাপাশি অন্যান্য মডেল স্টিয়ারিং তথ্য যেমন সিস্টেম নির্দেশাবলী , এবং/অথবা ফাংশন কলিংয়ের জন্য ফাংশন ঘোষণা। Model s/ Content এবং generateContentRequest পারস্পরিকভাবে একচেটিয়া। আপনি হয় Model + Content পাঠাতে পারেন অথবা একটি generateContentRequest পাঠাতে পারেন, কিন্তু উভয়ই নয়।

উদাহরণ অনুরোধ

from google import genai

client = genai.Client()
prompt = "The quick brown fox jumps over the lazy dog."

# Count tokens using the new client method.
total_tokens = client.models.count_tokens(
    model="gemini-2.0-flash", contents=prompt
)
print("total_tokens: ", total_tokens)
# ( e.g., total_tokens: 10 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=prompt
)

# The usage_metadata provides detailed token counts.
print(response.usage_metadata)
# ( e.g., prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "The quick brown fox jumps over the lazy dog.";
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "The quick brown fox jumps over the lazy dog"

// Call CountTokens to get the input token count (`total tokens`).
tokResp, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 10 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

// On the response for GenerateContent, use UsageMetadata to get
// separate input and output token counts (PromptTokenCount and
// CandidatesTokenCount, respectively), as well as the combined
// token count (TotalTokenCount).
fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 10, candidates_token_count: 38, total_token_count: 48 )
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[{
          "text": "The quick brown fox jumps over the lazy dog."
          }],
        }],
      }'
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

// For text-only input
val (totalTokens) = generativeModel.countTokens("Write a story about a magic backpack.")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

let prompt = "Write a story about a magic backpack."

let response = try await generativeModel.countTokens(prompt)

print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'The quick brown fox jumps over the lazy dog.';
final tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent =
    new Content.Builder().addText("Write a story about a magic backpack.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
from google.genai import types

client = genai.Client()

chat = client.chats.create(
    model="gemini-2.0-flash",
    history=[
        types.Content(
            role="user", parts=[types.Part(text="Hi my name is Bob")]
        ),
        types.Content(role="model", parts=[types.Part(text="Hi Bob!")]),
    ],
)
# Count tokens for the chat history.
print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=chat.get_history()
    )
)
# ( e.g., total_tokens: 10 )

response = chat.send_message(
    message="In one sentence, explain how a computer works to a young child."
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )

# You can count tokens for the combined history and a new message.
extra = types.UserContent(
    parts=[
        types.Part(
            text="What is the meaning of life?",
        )
    ]
)
history = chat.get_history()
history.append(extra)
print(client.models.count_tokens(model="gemini-2.0-flash", contents=history))
# ( e.g., total_tokens: 56 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
// Initial chat history.
const history = [
  { role: "user", parts: [{ text: "Hi my name is Bob" }] },
  { role: "model", parts: [{ text: "Hi Bob!" }] },
];
const chat = ai.chats.create({
  model: "gemini-2.0-flash",
  history: history,
});

// Count tokens for the current chat history.
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: chat.getHistory(),
});
console.log(countTokensResponse.totalTokens);

const chatResponse = await chat.sendMessage({
  message: "In one sentence, explain how a computer works to a young child.",
});
console.log(chatResponse.usageMetadata);

// Add an extra user message to the history.
const extraMessage = {
  role: "user",
  parts: [{ text: "What is the meaning of life?" }],
};
const combinedHistory = chat.getHistory();
combinedHistory.push(extraMessage);
const combinedCountTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: combinedHistory,
});
console.log(
  "Combined history token count:",
  combinedCountTokensResponse.totalTokens,
);
model := client.GenerativeModel("gemini-1.5-flash")
cs := model.StartChat()

cs.History = []*genai.Content{
	{
		Parts: []genai.Part{
			genai.Text("Hi my name is Bob"),
		},
		Role: "user",
	},
	{
		Parts: []genai.Part{
			genai.Text("Hi Bob!"),
		},
		Role: "model",
	},
}

prompt := "Explain how a computer works to a young child."
resp, err := cs.SendMessage(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

// On the response for SendMessage, use `UsageMetadata` to get
// separate input and output token counts
// (`prompt_token_count` and `candidates_token_count`, respectively),
// as well as the combined token count (`total_token_count`).
fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [
        {"role": "user",
        "parts": [{"text": "Hi, my name is Bob."}],
        },
        {"role": "model",
         "parts":[{"text": "Hi Bob"}],
        },
      ],
      }'
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

val chat =
    generativeModel.startChat(
        history =
            listOf(
                content(role = "user") { text("Hello, I have 2 dogs in my house.") },
                content(role = "model") {
                  text("Great to meet you. What would you like to know?")
                }))

val history = chat.history
val messageContent = content { text("This is the message I intend to send") }
val (totalTokens) = generativeModel.countTokens(*history.toTypedArray(), messageContent)
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

// Optionally specify existing chat history
let history = [
  ModelContent(role: "user", parts: "Hello, I have 2 dogs in my house."),
  ModelContent(role: "model", parts: "Great to meet you. What would you like to know?"),
]

// Initialize the chat with optional chat history
let chat = generativeModel.startChat(history: history)

let response = try await generativeModel.countTokens(chat.history + [
  ModelContent(role: "user", parts: "This is the message I intend to send"),
])
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final chat = model.startChat(history: [
  Content.text('Hi my name is Bob'),
  Content.model([TextPart('Hi Bob!')])
]);
var tokenCount = await model.countTokens(chat.history);
print('Total tokens: ${tokenCount.totalTokens}');

final response = await chat.sendMessage(Content.text(
    'In one sentence, explain how a computer works to a young child.'));
if (response.usageMetadata case final usage?) {
  print('Prompt: ${usage.promptTokenCount}, '
      'Candidates: ${usage.candidatesTokenCount}, '
      'Total: ${usage.totalTokenCount}');
}

tokenCount = await model.countTokens(
    [...chat.history, Content.text('What is the meaning of life?')]);
print('Total tokens: ${tokenCount.totalTokens}');
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

// (optional) Create previous chat history for context
Content.Builder userContentBuilder = new Content.Builder();
userContentBuilder.setRole("user");
userContentBuilder.addText("Hello, I have 2 dogs in my house.");
Content userContent = userContentBuilder.build();

Content.Builder modelContentBuilder = new Content.Builder();
modelContentBuilder.setRole("model");
modelContentBuilder.addText("Great to meet you. What would you like to know?");
Content modelContent = userContentBuilder.build();

List<Content> history = Arrays.asList(userContent, modelContent);

// Initialize the chat
ChatFutures chat = model.startChat(history);

Content messageContent =
    new Content.Builder().addText("This is the message I intend to send").build();

Collections.addAll(history, messageContent);

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

ListenableFuture<CountTokensResponse> countTokensResponse =
    model.countTokens(history.toArray(new Content[0]));
Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        System.out.println(result);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
import PIL.Image

client = genai.Client()
prompt = "Tell me about this image"
your_image_file = PIL.Image.open(media / "organ.jpg")

# Count tokens for combined text and inline image.
print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=[prompt, your_image_file]
    )
)
# ( e.g., total_tokens: 263 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=[prompt, your_image_file]
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "Tell me about this image";
const imageBuffer = fs.readFileSync(path.join(media, "organ.jpg"));

// Convert buffer to base64 string.
const imageBase64 = imageBuffer.toString("base64");

// Build contents using createUserContent and createPartFromBase64.
const contents = createUserContent([
  prompt,
  createPartFromBase64(imageBase64, "image/jpeg"),
]);

const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: contents,
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: contents,
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "Tell me about this image"
imageFile, err := os.ReadFile(filepath.Join(testDataDir, "personWorkingOnComputer.jpg"))
if err != nil {
	log.Fatal(err)
}
// Call `CountTokens` to get the input token count
// of the combined text and file (`total_tokens`).
// An image's display or file size does not affect its token count.
// Optionally, you can call `count_tokens` for the text and file separately.
tokResp, err := model.CountTokens(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 264 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt), genai.ImageData("jpeg", imageFile))
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 264, candidates_token_count: 100, total_token_count: 364 )
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY" \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[
            {"text": "Tell me about this instrument"},
            {
              "inline_data": {
                "mime_type":"image/jpeg",
                "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
              }
            }
        ]
        }]
       }' 2> /dev/null
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey)

val image1: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image1)
val image2: Bitmap = BitmapFactory.decodeResource(context.resources, R.drawable.image2)

val multiModalContent = content {
  image(image1)
  image(image2)
  text("What's the difference between these pictures?")
}

val (totalTokens) = generativeModel.countTokens(multiModalContent)
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a Gemini model appropriate for your use case
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default
  )

guard let image1 = UIImage(systemName: "cloud.sun") else { fatalError() }
guard let image2 = UIImage(systemName: "cloud.heavyrain") else { fatalError() }

let prompt = "What's the difference between these pictures?"

let response = try await generativeModel.countTokens(image1, image2, prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
final model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);

Future<DataPart> fileToPart(String mimeType, String path) async {
  return DataPart(mimeType, await File(path).readAsBytes());
}

final prompt = 'Tell me about this image';
final image = await fileToPart('image/jpeg', 'resources/organ.jpg');
final content = Content.multi([TextPart(prompt), image]);

// An image's display size does not affet its token count.
// Optionally, you can call `countTokens` for the prompt and file separately.
final tokenCount = await model.countTokens([content]);
print('Total tokens: ${tokenCount.totalTokens}');

final response = await model.generateContent([content]);
if (response.usageMetadata case final usage?) {
  print('Prompt: ${usage.promptTokenCount}, '
      'Candidates: ${usage.candidatesTokenCount}, '
      'Total: ${usage.totalTokenCount}');
}
// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);
Content text = new Content.Builder().addText("Write a story about a magic backpack.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-and-image input
Bitmap image1 = BitmapFactory.decodeResource(context.getResources(), R.drawable.image1);
Bitmap image2 = BitmapFactory.decodeResource(context.getResources(), R.drawable.image2);

Content multiModalContent =
    new Content.Builder()
        .addImage(image1)
        .addImage(image2)
        .addText("What's different between these pictures?")
        .build();

ListenableFuture<CountTokensResponse> countTokensResponse =
    model.countTokens(multiModalContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
import time

client = genai.Client()
prompt = "Tell me about this video"
your_file = client.files.upload(file=media / "Big_Buck_Bunny.mp4")

# Wait for the video to be processed.
while your_file.state.name == "PROCESSING":
    print("processing video...")
    time.sleep(5)
    your_file = client.files.get(name=your_file.name)

print(
    client.models.count_tokens(
        model="gemini-2.0-flash", contents=[prompt, your_file]
    )
)
# ( e.g., total_tokens: 300 )

response = client.models.generate_content(
    model="gemini-2.0-flash", contents=[prompt, your_file]
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt = "Tell me about this video";
let videoFile = await ai.files.upload({
  file: path.join(media, "Big_Buck_Bunny.mp4"),
  config: { mimeType: "video/mp4" },
});

// Poll until the video file is completely processed (state becomes ACTIVE).
while (!videoFile.state || videoFile.state.toString() !== "ACTIVE") {
  console.log("Processing video...");
  console.log("File state: ", videoFile.state);
  await sleep(5000);
  videoFile = await ai.files.get({ name: videoFile.name });
}

const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: createUserContent([
    prompt,
    createPartFromUri(videoFile.uri, videoFile.mimeType),
  ]),
});
console.log(countTokensResponse.totalTokens);

const generateResponse = await ai.models.generateContent({
  model: "gemini-2.0-flash",
  contents: createUserContent([
    prompt,
    createPartFromUri(videoFile.uri, videoFile.mimeType),
  ]),
});
console.log(generateResponse.usageMetadata);
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "Tell me about this video"
file, err := client.UploadFileFromPath(ctx, filepath.Join(testDataDir, "earth.mp4"), nil)
if err != nil {
	log.Fatal(err)
}
defer client.DeleteFile(ctx, file.Name)

fd := genai.FileData{URI: file.URI}
// Call `CountTokens` to get the input token count
// of the combined text and file (`total_tokens`).
// A video or audio file is converted to tokens at a fixed rate of tokens per
// second.
// Optionally, you can call `count_tokens` for the text and file separately.
tokResp, err := model.CountTokens(ctx, genai.Text(prompt), fd)
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 1481 )

resp, err := model.GenerateContent(ctx, genai.Text(prompt), fd)
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 1481, candidates_token_count: 43, total_token_count: 1524 )

MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
DISPLAY_NAME=VIDEO_PATH

# Initial resumable request defining metadata.
# The upload url is in the response headers dump them to a file.
curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
  -D upload-header.tmp \
  -H "X-Goog-Upload-Protocol: resumable" \
  -H "X-Goog-Upload-Command: start" \
  -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
  -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
  -H "Content-Type: application/json" \
  -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null

upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
rm "${tmp_header_file}"

# Upload the actual bytes.
curl "${upload_url}" \
  -H "Content-Length: ${NUM_BYTES}" \
  -H "X-Goog-Upload-Offset: 0" \
  -H "X-Goog-Upload-Command: upload, finalize" \
  --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json

file_uri=$(jq ".file.uri" file_info.json)

state=$(jq ".file.state" file_info.json)

name=$(jq ".file.name" file_info.json)

while [[ "($state)" = *"PROCESSING"* ]];
do
  echo "Processing video..."
  sleep 5
  # Get the file of interest to check state
  curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
  state=$(jq ".file.state" file_info.json)
done

curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
    -H 'Content-Type: application/json' \
    -X POST \
    -d '{
      "contents": [{
        "parts":[
          {"text": "Describe this video clip"},
          {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
        }]
       }'
পাইথন
from google import genai

client = genai.Client()
sample_pdf = client.files.upload(file=media / "test.pdf")
token_count = client.models.count_tokens(
    model="gemini-2.0-flash",
    contents=["Give me a summary of this document.", sample_pdf],
)
print(f"{token_count=}")

response = client.models.generate_content(
    model="gemini-2.0-flash",
    contents=["Give me a summary of this document.", sample_pdf],
)
print(response.usage_metadata)
from google import genai
from google.genai import types
import time

client = genai.Client()
your_file = client.files.upload(file=media / "a11.txt")

cache = client.caches.create(
    model="gemini-1.5-flash-001",
    config={
        "contents": ["Here the Apollo 11 transcript:", your_file],
        "system_instruction": None,
        "tools": None,
    },
)

# Create a prompt.
prompt = "Please give a short summary of this file."

# Count tokens for the prompt (the cached content is not passed here).
print(client.models.count_tokens(model="gemini-2.0-flash", contents=prompt))
# ( e.g., total_tokens: 9 )

response = client.models.generate_content(
    model="gemini-1.5-flash-001",
    contents=prompt,
    config=types.GenerateContentConfig(
        cached_content=cache.name,
    ),
)
print(response.usage_metadata)
# ( e.g., prompt_token_count: ..., cached_content_token_count: ..., candidates_token_count: ... )
client.caches.delete(name=cache.name)
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const textFile = await ai.files.upload({
    file: path.join(media, "a11.txt"),
    config: { mimeType: "text/plain" },
  });

  const cache = await ai.caches.create({
    model: "gemini-1.5-flash-001",
    config: {
      contents: createUserContent([
        "Here the Apollo 11 transcript:",
        createPartFromUri(textFile.uri, textFile.mimeType),
      ]),
      system_instruction: null,
      tools: null,
    },
  });

  const prompt = "Please give a short summary of this file.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(countTokensResponse.totalTokens);

  const generateResponse = await ai.models.generateContent({
    model: "gemini-1.5-flash-001",
    contents: prompt,
    config: { cachedContent: cache.name },
  });
  console.log(generateResponse.usageMetadata);

  await ai.caches.delete({ name: cache.name });
  return {
    totalTokens: countTokensResponse.totalTokens,
    usage: generateResponse.usageMetadata,
  };
}

// Error: systemInstruction parameter is not supported in Gemini API.
export async function tokensSystemInstruction() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt = "The quick brown fox jumps over the lazy dog.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(
    "base_count (no system instruction):",
    countTokensResponse.totalTokens,
  );

  // Uncomment if the API gets support for systemInstruction in count_tokens.
  /*
  const countTokensWithSysInstResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: {systemInstruction: 'You are a cat. Your name is Neko.'},
  });
  console.log(
    "total_tokens (with system instruction):",
    countTokensWithSysInstResponse.totalTokens,
  );
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for systemInstruction in count_tokens.
    // sysInstResponseTokenCount: countTokensWithSysInstResponse.totalTokens,
  };
}

// Error: tools parameter is not supported in Gemini API.
export async function tokensTools() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt =
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

  // Define function declarations for arithmetic operations.
  const addDeclaration = {
    name: "addNumbers",
    parameters: {
      type: "object",
      description: "Return the result of adding two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description:
            "The first parameter which can be an integer or a floating point number.",
        },
        secondParam: {
          type: "number",
          description:
            "The second parameter which can be an integer or a floating point number.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const subtractDeclaration = {
    name: "subtractNumbers",
    parameters: {
      type: "object",
      description:
        "Return the result of subtracting the second number from the first.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const multiplyDeclaration = {
    name: "multiplyNumbers",
    parameters: {
      type: "object",
      description: "Return the product of two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const divideDeclaration = {
    name: "divideNumbers",
    parameters: {
      type: "object",
      description:
        "Return the quotient of dividing the first number by the second.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const tools = {
    functionDeclarations: [
      addDeclaration,
      subtractDeclaration,
      multiplyDeclaration,
      divideDeclaration,
    ],
  };

  // Uncomment if the API gets support for tools in count_tokens.
  /*
  const countTokensWithToolsResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: { tools: tools }
  });
  console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for tools in count_tokens.
    // toolsResponseTokenCount : countTokensWithToolsResponse.totalTokens
  };
}
txt := strings.Repeat("George Washington was the first president of the United States. ", 3000)
argcc := &genai.CachedContent{
	Model:    "gemini-1.5-flash-001",
	Contents: []*genai.Content{genai.NewUserContent(genai.Text(txt))},
}
cc, err := client.CreateCachedContent(ctx, argcc)
if err != nil {
	log.Fatal(err)
}
defer client.DeleteCachedContent(ctx, cc.Name)

modelWithCache := client.GenerativeModelFromCachedContent(cc)
prompt := "Summarize this statement"
tokResp, err := modelWithCache.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", tokResp.TotalTokens)
// ( total_tokens: 5 )

resp, err := modelWithCache.GenerateContent(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}

fmt.Println("prompt_token_count:", resp.UsageMetadata.PromptTokenCount)
fmt.Println("candidates_token_count:", resp.UsageMetadata.CandidatesTokenCount)
fmt.Println("cached_content_token_count:", resp.UsageMetadata.CachedContentTokenCount)
fmt.Println("total_token_count:", resp.UsageMetadata.TotalTokenCount)
// ( prompt_token_count: 33007,  candidates_token_count: 39, cached_content_token_count: 33002, total_token_count: 33046 )
from google import genai
from google.genai import types

client = genai.Client()
prompt = "The quick brown fox jumps over the lazy dog."

base_count = client.models.count_tokens(
    model="gemini-2.0-flash", contents=prompt
)
print("total_tokens (no system instruction):", base_count)
# ( e.g., total_tokens: 10 )

# When using a system instruction, include it in the count tokens config.
# TODO: Uncomment once the API stops failing

# count_with_sys = client.models.count_tokens(
#     model="gemini-2.0-flash",
#     contents=prompt,
#     config=types.CountTokensConfig(system_instruction="You are a cat. Your name is Neko.")
# )
# print("total_tokens (with system instruction):", count_with_sys)
# ( e.g., total_tokens: 21 )
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt = "The quick brown fox jumps over the lazy dog.";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log(
    "base_count (no system instruction):",
    countTokensResponse.totalTokens,
  );

  // Uncomment if the API gets support for systemInstruction in count_tokens.
  /*
  const countTokensWithSysInstResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: {systemInstruction: 'You are a cat. Your name is Neko.'},
  });
  console.log(
    "total_tokens (with system instruction):",
    countTokensWithSysInstResponse.totalTokens,
  );
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for systemInstruction in count_tokens.
    // sysInstResponseTokenCount: countTokensWithSysInstResponse.totalTokens,
  };
}

// Error: tools parameter is not supported in Gemini API.
export async function tokensTools() {
  // Make sure to include the following import:
  // import {GoogleGenAI} from '@google/genai';
  const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
  const prompt =
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
  const countTokensResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
  });
  console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

  // Define function declarations for arithmetic operations.
  const addDeclaration = {
    name: "addNumbers",
    parameters: {
      type: "object",
      description: "Return the result of adding two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description:
            "The first parameter which can be an integer or a floating point number.",
        },
        secondParam: {
          type: "number",
          description:
            "The second parameter which can be an integer or a floating point number.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const subtractDeclaration = {
    name: "subtractNumbers",
    parameters: {
      type: "object",
      description:
        "Return the result of subtracting the second number from the first.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const multiplyDeclaration = {
    name: "multiplyNumbers",
    parameters: {
      type: "object",
      description: "Return the product of two numbers.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const divideDeclaration = {
    name: "divideNumbers",
    parameters: {
      type: "object",
      description:
        "Return the quotient of dividing the first number by the second.",
      properties: {
        firstParam: {
          type: "number",
          description: "The first parameter.",
        },
        secondParam: {
          type: "number",
          description: "The second parameter.",
        },
      },
      required: ["firstParam", "secondParam"],
    },
  };

  const tools = {
    functionDeclarations: [
      addDeclaration,
      subtractDeclaration,
      multiplyDeclaration,
      divideDeclaration,
    ],
  };

  // Uncomment if the API gets support for tools in count_tokens.
  /*
  const countTokensWithToolsResponse = await ai.models.countTokens({
    model: "gemini-2.0-flash",
    contents: prompt,
    config: { tools: tools }
  });
  console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
  */
  return {
    responseTokenCount: countTokensResponse.totalTokens,
    // Uncomment if the API gets support for tools in count_tokens.
    // toolsResponseTokenCount : countTokensWithToolsResponse.totalTokens
  };
}
model := client.GenerativeModel("gemini-1.5-flash")
prompt := "The quick brown fox jumps over the lazy dog"

respNoInstruction, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", respNoInstruction.TotalTokens)
// ( total_tokens: 10 )

// The total token count includes everything sent to the GenerateContent
// request. When you use system instructions, the total token
// count increases.
model.SystemInstruction = genai.NewUserContent(genai.Text("You are a cat. Your name is Neko."))
respWithInstruction, err := model.CountTokens(ctx, genai.Text(prompt))
if err != nil {
	log.Fatal(err)
}
fmt.Println("total_tokens:", respWithInstruction.TotalTokens)
// ( total_tokens: 21 )
val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey,
        systemInstruction = content(role = "system") { text("You are a cat. Your name is Neko.")}
    )

// For text-only input
val (totalTokens) = generativeModel.countTokens("What is your name?")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a model that supports system instructions, like a Gemini 1.5 model
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default,
    systemInstruction: ModelContent(role: "system", parts: "You are a cat. Your name is Neko.")
  )

let prompt = "What is your name?"

let response = try await generativeModel.countTokens(prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
var model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'The quick brown fox jumps over the lazy dog.';

// The total token count includes everything sent in the `generateContent`
// request.
var tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
  systemInstruction: Content.system('You are a cat. Your name is Neko.'),
);
tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
// Create your system instructions
Content systemInstruction =
    new Content.Builder().addText("You are a cat. Your name is Neko.").build();

// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey,
        /* generationConfig (optional) */ null,
        /* safetySettings (optional) */ null,
        /* requestOptions (optional) */ new RequestOptions(),
        /* tools (optional) */ null,
        /* toolsConfig (optional) */ null,
        /* systemInstruction (optional) */ systemInstruction);
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent = new Content.Builder().addText("What's your name?.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);
from google import genai
from google.genai import types

client = genai.Client()
prompt = (
    "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
)
print(client.models.count_tokens(model="gemini-2.0-flash", contents=prompt))
# ( e.g., total_tokens: 22 )

# Define the function declarations for the arithmetic operations
add_function = types.FunctionDeclaration(
    name="add",
    description="Return the sum of a and b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

subtract_function = types.FunctionDeclaration(
    name="subtract",
    description="Return the difference of a and b (a - b)",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

multiply_function = types.FunctionDeclaration(
    name="multiply",
    description="Return the product of a and b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The first number",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The second number",
            ),
        },
        required=["a", "b"],
    ),
)

divide_function = types.FunctionDeclaration(
    name="divide",
    description="Return the quotient of a divided by b",
    parameters=types.Schema(
        type="OBJECT",
        properties={
            "a": types.Schema(
                type="NUMBER",
                description="The numerator",
            ),
            "b": types.Schema(
                type="NUMBER",
                description="The denominator (must not be zero)",
            ),
        },
        required=["a", "b"],
    ),
)
tools = [
    types.Tool(function_declarations=[add_function]),
    types.Tool(function_declarations=[subtract_function]),
    types.Tool(function_declarations=[multiply_function]),
    types.Tool(function_declarations=[divide_function]),
]

# Count tokens when tools are included. Tools increase the token count.
# TODO: Uncomment once the API stops failing
# tokens_with_tools = client.models.count_tokens(
#     model="gemini-2.0-flash",
#     contents=prompt,
#     config=types.CountTokensConfig(tools=tools)
# )
# print(tokens_with_tools)
# ( e.g., total_tokens: 206 )
// Make sure to include the following import:
// import {GoogleGenAI} from '@google/genai';
const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
const prompt =
  "I have 57 cats, each owns 44 mittens, how many mittens is that in total?";
const countTokensResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
});
console.log("total_tokens (no tools):", countTokensResponse.totalTokens);

// Define function declarations for arithmetic operations.
const addDeclaration = {
  name: "addNumbers",
  parameters: {
    type: "object",
    description: "Return the result of adding two numbers.",
    properties: {
      firstParam: {
        type: "number",
        description:
          "The first parameter which can be an integer or a floating point number.",
      },
      secondParam: {
        type: "number",
        description:
          "The second parameter which can be an integer or a floating point number.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const subtractDeclaration = {
  name: "subtractNumbers",
  parameters: {
    type: "object",
    description:
      "Return the result of subtracting the second number from the first.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const multiplyDeclaration = {
  name: "multiplyNumbers",
  parameters: {
    type: "object",
    description: "Return the product of two numbers.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const divideDeclaration = {
  name: "divideNumbers",
  parameters: {
    type: "object",
    description:
      "Return the quotient of dividing the first number by the second.",
    properties: {
      firstParam: {
        type: "number",
        description: "The first parameter.",
      },
      secondParam: {
        type: "number",
        description: "The second parameter.",
      },
    },
    required: ["firstParam", "secondParam"],
  },
};

const tools = {
  functionDeclarations: [
    addDeclaration,
    subtractDeclaration,
    multiplyDeclaration,
    divideDeclaration,
  ],
};

// Uncomment if the API gets support for tools in count_tokens.
/*
const countTokensWithToolsResponse = await ai.models.countTokens({
  model: "gemini-2.0-flash",
  contents: prompt,
  config: { tools: tools }
});
console.log("total_tokens (with tools):", countTokensWithToolsResponse.totalTokens);
*/
val multiplyDefinition = defineFunction(
    name = "multiply",
    description = "returns the product of the provided numbers.",
    parameters = listOf(
        Schema.double("a", "First number"),
        Schema.double("b", "Second number")
    )
)
val usableFunctions = listOf(multiplyDefinition)

val generativeModel =
    GenerativeModel(
        // Specify a Gemini model appropriate for your use case
        modelName = "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key" above)
        apiKey = BuildConfig.apiKey,
        tools = listOf(Tool(usableFunctions))
    )

// For text-only input
val (totalTokens) = generativeModel.countTokens("What's the product of 9 and 358?")
print(totalTokens)
let generativeModel =
  GenerativeModel(
    // Specify a model that supports system instructions, like a Gemini 1.5 model
    name: "gemini-1.5-flash",
    // Access your API key from your on-demand resource .plist file (see "Set up your API key"
    // above)
    apiKey: APIKey.default,
    tools: [Tool(functionDeclarations: [
      FunctionDeclaration(
        name: "controlLight",
        description: "Set the brightness and color temperature of a room light.",
        parameters: [
          "brightness": Schema(
            type: .number,
            format: "double",
            description: "Light level from 0 to 100. Zero is off and 100 is full brightness."
          ),
          "colorTemperature": Schema(
            type: .string,
            format: "enum",
            description: "Color temperature of the light fixture.",
            enumValues: ["daylight", "cool", "warm"]
          ),
        ],
        requiredParameters: ["brightness", "colorTemperature"]
      ),
    ])]
  )

let prompt = "Dim the lights so the room feels cozy and warm."

let response = try await generativeModel.countTokens(prompt)
print("Total Tokens: \(response.totalTokens)")
// Make sure to include this import:
// import 'package:google_generative_ai/google_generative_ai.dart';
var model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
);
final prompt = 'I have 57 cats, each owns 44 mittens, '
    'how many mittens is that in total?';

// The total token count includes everything sent in the `generateContent`
// request.
var tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
final binaryFunction = Schema.object(
  properties: {
    'a': Schema.number(nullable: false),
    'b': Schema.number(nullable: false)
  },
  requiredProperties: ['a', 'b'],
);

model = GenerativeModel(
  model: 'gemini-1.5-flash',
  apiKey: apiKey,
  tools: [
    Tool(functionDeclarations: [
      FunctionDeclaration('add', 'returns a + b', binaryFunction),
      FunctionDeclaration('subtract', 'returns a - b', binaryFunction),
      FunctionDeclaration('multipley', 'returns a * b', binaryFunction),
      FunctionDeclaration('divide', 'returns a / b', binaryFunction)
    ])
  ],
);
tokenCount = await model.countTokens([Content.text(prompt)]);
print('Total tokens: ${tokenCount.totalTokens}');
FunctionDeclaration multiplyDefinition =
    defineFunction(
        /* name  */ "multiply",
        /* description */ "returns a * b.",
        /* parameters */ Arrays.asList(
            Schema.numDouble("a", "First parameter"),
            Schema.numDouble("b", "Second parameter")),
        /* required */ Arrays.asList("a", "b"));

Tool tool = new Tool(Arrays.asList(multiplyDefinition), null);
;

// Specify a Gemini model appropriate for your use case
GenerativeModel gm =
    new GenerativeModel(
        /* modelName */ "gemini-1.5-flash",
        // Access your API key as a Build Configuration variable (see "Set up your API key"
        // above)
        /* apiKey */ BuildConfig.apiKey,
        /* generationConfig (optional) */ null,
        /* safetySettings (optional) */ null,
        /* requestOptions (optional) */ new RequestOptions(),
        /* tools (optional) */ Arrays.asList(tool));
GenerativeModelFutures model = GenerativeModelFutures.from(gm);

Content inputContent = new Content.Builder().addText("What's your name?.").build();

// For illustrative purposes only. You should use an executor that fits your needs.
Executor executor = Executors.newSingleThreadExecutor();

// For text-only input
ListenableFuture<CountTokensResponse> countTokensResponse = model.countTokens(inputContent);

Futures.addCallback(
    countTokensResponse,
    new FutureCallback<CountTokensResponse>() {
      @Override
      public void onSuccess(CountTokensResponse result) {
        int totalTokens = result.getTotalTokens();
        System.out.println("TotalTokens = " + totalTokens);
      }

      @Override
      public void onFailure(Throwable t) {
        t.printStackTrace();
      }
    },
    executor);

প্রতিক্রিয়া শরীর

models.countTokens থেকে একটি প্রতিক্রিয়া।

এটি prompt জন্য মডেলের tokenCount ফেরত দেয়।

সফল হলে, প্রতিক্রিয়া বডিতে নিম্নলিখিত কাঠামোর সাথে ডেটা থাকে:

ক্ষেত্র
totalTokens integer

Model prompt টোকেনাইজ করে এমন টোকেনের সংখ্যা। সবসময় অ নেতিবাচক.

cachedContentTokenCount integer

প্রম্পটের ক্যাশ করা অংশে টোকেনের সংখ্যা (ক্যাশ করা বিষয়বস্তু)।

promptTokensDetails[] object ( ModalityTokenCount )

শুধুমাত্র আউটপুট। অনুরোধ ইনপুট প্রক্রিয়া করা হয়েছে যে পদ্ধতির তালিকা.

cacheTokensDetails[] object ( ModalityTokenCount )

শুধুমাত্র আউটপুট। ক্যাশে করা বিষয়বস্তুতে প্রসেস করা পদ্ধতির তালিকা।

JSON প্রতিনিধিত্ব
{
  "totalTokens": integer,
  "cachedContentTokenCount": integer,
  "promptTokensDetails": [
    {
      object (ModalityTokenCount)
    }
  ],
  "cacheTokensDetails": [
    {
      object (ModalityTokenCount)
    }
  ]
}

কন্টেন্ট রিকোয়েস্ট জেনারেট করুন

মডেল থেকে একটি সম্পূর্ণতা তৈরি করার জন্য অনুরোধ করুন.

ক্ষেত্র
model string

প্রয়োজন। সমাপ্তি তৈরি করার জন্য ব্যবহার করা Model নাম।

বিন্যাস: models/{model}

contents[] object ( Content )

প্রয়োজন। মডেলের সাথে বর্তমান কথোপকথনের বিষয়বস্তু।

একক-পালা প্রশ্নের জন্য, এটি একটি একক উদাহরণ। চ্যাটের মতো বহু-মুখী প্রশ্নের জন্য, এটি একটি পুনরাবৃত্ত ক্ষেত্র যাতে কথোপকথনের ইতিহাস এবং সর্বশেষ অনুরোধ থাকে।

tools[] object ( Tool )

ঐচ্ছিক। পরবর্তী প্রতিক্রিয়া তৈরি করতে Model ব্যবহার করতে পারে Tools একটি তালিকা৷

একটি Tool হল কোডের একটি অংশ যা Model জ্ঞান এবং সুযোগের বাইরে একটি ক্রিয়া সম্পাদন করতে বা ক্রিয়াগুলির সেট করার জন্য সিস্টেমকে বহিরাগত সিস্টেমের সাথে যোগাযোগ করতে সক্ষম করে। সমর্থিত Tool হল Function এবং codeExecution । আরও জানতে ফাংশন কলিং এবং কোড এক্সিকিউশন গাইডগুলি পড়ুন।

toolConfig object ( ToolConfig )

ঐচ্ছিক। অনুরোধে উল্লেখ করা যেকোনো Tool জন্য টুল কনফিগারেশন। একটি ব্যবহারের উদাহরণের জন্য ফাংশন কলিং গাইড পড়ুন।

safetySettings[] object ( SafetySetting )

ঐচ্ছিক। অনিরাপদ বিষয়বস্তু ব্লক করার জন্য অনন্য SafetySetting দৃষ্টান্তের একটি তালিকা।

এটি GenerateContentRequest.contents এবং GenerateContentResponse.candidates এ প্রয়োগ করা হবে। প্রতিটি SafetyCategory প্রকারের জন্য একাধিক সেটিং থাকা উচিত নয়৷ এপিআই এই সেটিংস দ্বারা নির্ধারিত থ্রেশহোল্ড পূরণ করতে ব্যর্থ যে কোনো বিষয়বস্তু এবং প্রতিক্রিয়া ব্লক করবে। এই তালিকাটি সেফটিসেটিংসে নির্দিষ্ট করা প্রতিটি SafetyCategory জন্য ডিফল্ট সেটিংস ওভাররাইড করে। যদি তালিকায় প্রদত্ত একটি প্রদত্ত SafetyCategory জন্য কোনো SafetySetting না থাকে, তাহলে API সেই বিভাগের জন্য ডিফল্ট নিরাপত্তা সেটিং ব্যবহার করবে। ক্ষতির বিভাগগুলি HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY সমর্থিত৷ উপলব্ধ নিরাপত্তা সেটিংস সম্পর্কে বিস্তারিত তথ্যের জন্য গাইড পড়ুন। এছাড়াও আপনার AI অ্যাপ্লিকেশনগুলিতে সুরক্ষা বিবেচনাগুলি কীভাবে অন্তর্ভুক্ত করবেন তা শিখতে সুরক্ষা নির্দেশিকা পড়ুন।

systemInstruction object ( Content )

ঐচ্ছিক। বিকাশকারী সেট সিস্টেম নির্দেশনা(গুলি) । বর্তমানে, শুধুমাত্র টেক্সট.

generationConfig object ( GenerationConfig )

ঐচ্ছিক। মডেল জেনারেশন এবং আউটপুটগুলির জন্য কনফিগারেশন বিকল্প।

cachedContent string

ঐচ্ছিক। ভবিষ্যদ্বাণী পরিবেশনের জন্য প্রসঙ্গ হিসাবে ব্যবহার করার জন্য ক্যাশ করা সামগ্রীর নাম৷ বিন্যাস: cachedContents/{cachedContent}

JSON প্রতিনিধিত্ব
{
  "model": string,
  "contents": [
    {
      object (Content)
    }
  ],
  "tools": [
    {
      object (Tool)
    }
  ],
  "toolConfig": {
    object (ToolConfig)
  },
  "safetySettings": [
    {
      object (SafetySetting)
    }
  ],
  "systemInstruction": {
    object (Content)
  },
  "generationConfig": {
    object (GenerationConfig)
  },
  "cachedContent": string
}