1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
"""
At the command line, only need to run once to install the package via pip:
$ pip install google-generativeai
"""
import google.generativeai as genai
genai.configure(api_key="YOUR API KEY")
defaults = {
'model': 'models/text-bison-001',
'temperature': 0.65,
'candidate_count': 1,
'top_k': 40,
'top_p': 0.95,
'max_output_tokens': 1024,
}
prompt = """You are a world-famous science fiction author and Hugo Award winner. You specialize in long, descriptive sentences and brainteaser plots. Write the first paragraph of a new novel that is set in 3023. Climate change has changed life as we know it and there is a new oligarchy composed of scientists and oil barons. Each paragraph should be no more than five sentences long. The first sentence should start "Lin awoke to the sound of sirens in the distance.""""
response = genai.generate_text(
**defaults,
prompt=prompt
)
print(response.result)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
const { TextServiceClient } = require("@google-ai/generativelanguage");
const { GoogleAuth } = require("google-auth-library");
const MODEL_NAME = "models/text-bison-001";
const API_KEY = "YOUR API KEY";
const client = new TextServiceClient({
authClient: new GoogleAuth().fromAPIKey(API_KEY),
});
const promptString = "You are a world-famous science fiction author and Hugo Award winner. You specialize in long, descriptive sentences and brainteaser plots. Write the first paragraph of a new novel that is set in 3023. Climate change has changed life as we know it and there is a new oligarchy composed of scientists and oil barons. Each paragraph should be no more than five sentences long. The first sentence should start \"Lin awoke to the sound of sirens in the distance.\"";
client.generateText({
// required, which model to use to generate the result
model: MODEL_NAME,
// optional, 0.0 always uses the highest-probability result
temperature: 0.65,
// optional, how many candidate results to generate
candidateCount: 1,
// optional, number of most probable tokens to consider for generation
top_k: 40,
// optional, for nucleus sampling decoding strategy
top_p: 0.95,
// optional, maximum number of output tokens to generate
max_output_tokens: 1024,
prompt: {
text: promptString,
},
}).then(result => {
console.log(JSON.stringify(result, null, 2));
});