View source on GitHub |
Provides methods for getting metadata information about Generative Models.
google.ai.generativelanguage.ModelServiceAsyncClient(
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ModelServiceTransport] = 'grpc_asyncio',
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO
) -> None
Raises | |
---|---|
google.auth.exceptions.MutualTlsChannelError
|
If mutual TLS transport creation failed for any reason. |
Attributes | |
---|---|
transport
|
Returns the transport used by the client instance. |
Methods
create_tuned_model
create_tuned_model(
request=None,
*,
tuned_model=None,
tuned_model_id=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Creates a tuned model. Intermediate tuning progress (if any) is accessed through the [google.longrunning.Operations] service.
Status and results can be accessed through the Operations service. Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_create_tuned_model():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
tuned_model = generativelanguage_v1beta.TunedModel()
tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
request = generativelanguage_v1beta.CreateTunedModelRequest(
tuned_model=tuned_model,
)
# Make the request
operation = client.create_tuned_model(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.CreateTunedModelRequest, dict]]
tuned_model (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.api_core.operation_async.AsyncOperation
|
An object representing a long-running operation.
The result type for the operation will be
:class: |
delete_tuned_model
delete_tuned_model(
request=None,
*,
name=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Deletes a tuned model.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_delete_tuned_model():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
request = generativelanguage_v1beta.DeleteTunedModelRequest(
name="name_value",
)
# Make the request
await client.delete_tuned_model(request=request)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.DeleteTunedModelRequest, dict]]
name (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
from_service_account_file
@classmethod
from_service_account_file( filename: str, *args, **kwargs )
Creates an instance of this client using the provided credentials file.
Args | |
---|---|
filename
|
str
The path to the service account private key json file. |
args
|
Additional arguments to pass to the constructor. |
kwargs
|
Additional arguments to pass to the constructor. |
Returns | |
---|---|
ModelServiceAsyncClient
|
The constructed client. |
from_service_account_info
@classmethod
from_service_account_info( info: dict, *args, **kwargs )
Creates an instance of this client using the provided credentials info.
Args | |
---|---|
info
|
dict
The service account private key info. |
args
|
Additional arguments to pass to the constructor. |
kwargs
|
Additional arguments to pass to the constructor. |
Returns | |
---|---|
ModelServiceAsyncClient
|
The constructed client. |
from_service_account_json
@classmethod
from_service_account_json( filename: str, *args, **kwargs )
Creates an instance of this client using the provided credentials file.
Args | |
---|---|
filename
|
str
The path to the service account private key json file. |
args
|
Additional arguments to pass to the constructor. |
kwargs
|
Additional arguments to pass to the constructor. |
Returns | |
---|---|
ModelServiceAsyncClient
|
The constructed client. |
get_model
get_model(
request=None,
*,
name=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Gets information about a specific Model.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_get_model():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
request = generativelanguage_v1beta.GetModelRequest(
name="name_value",
)
# Make the request
response = await client.get_model(request=request)
# Handle the response
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.GetModelRequest, dict]]
name (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.ai.generativelanguage.Model
|
Information about a Generative Language Model. |
get_mtls_endpoint_and_cert_source
@classmethod
get_mtls_endpoint_and_cert_source( client_options: Optional[ClientOptions] = None )
Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if GOOGLE_API_USE_CLIENT_CERTIFICATE
environment variable is not "true", the
client cert source is None.
(2) if client_options.client_cert_source
is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if client_options.api_endpoint
if provided, use the provided one.
(2) if GOOGLE_API_USE_CLIENT_CERTIFICATE
environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114
Args | |
---|---|
client_options
|
google.api_core.client_options.ClientOptions
Custom options for the
client. Only the |
Returns | |
---|---|
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. |
Raises | |
---|---|
google.auth.exceptions.MutualTLSChannelError
|
If any errors happen. |
get_transport_class
get_transport_class()
partial(func, *args, **keywords) - new function with partial application of the given arguments and keywords.
get_tuned_model
get_tuned_model(
request=None,
*,
name=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Gets information about a specific TunedModel.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_get_tuned_model():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
request = generativelanguage_v1beta.GetTunedModelRequest(
name="name_value",
)
# Make the request
response = await client.get_tuned_model(request=request)
# Handle the response
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.GetTunedModelRequest, dict]]
name (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.ai.generativelanguage.TunedModel
|
A fine-tuned model created using ModelService.CreateTunedModel. |
list_models
list_models(
request=None,
*,
page_size=None,
page_token=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Lists models available through the API.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_list_models():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
request = generativelanguage_v1beta.ListModelsRequest(
)
# Make the request
page_result = client.list_models(request=request)
# Handle the response
async for response in page_result:
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.ListModelsRequest, dict]]
page_size (:class:
page_token (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.ai.generativelanguage_v1beta.services.model_service.pagers.ListModelsAsyncPager
|
Response from ListModel containing a paginated list of
Models.
Iterating over this object will yield results and resolve additional pages automatically. |
list_tuned_models
list_tuned_models(
request=None,
*,
page_size=None,
page_token=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Lists tuned models owned by the user.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_list_tuned_models():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
request = generativelanguage_v1beta.ListTunedModelsRequest(
)
# Make the request
page_result = client.list_tuned_models(request=request)
# Handle the response
async for response in page_result:
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.ListTunedModelsRequest, dict]]
page_size (:class:
page_token (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.ai.generativelanguage_v1beta.services.model_service.pagers.ListTunedModelsAsyncPager
|
Response from ListTunedModels containing a paginated
list of Models.
Iterating over this object will yield results and resolve additional pages automatically. |
update_tuned_model
update_tuned_model(
request=None,
*,
tuned_model=None,
update_mask=None,
retry=<_MethodDefault._DEFAULT_VALUE: <object object>>,
timeout=<_MethodDefault._DEFAULT_VALUE: <object object>>,
metadata=()
)
Updates a tuned model.
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# <a href="https://googleapis.dev/python/google-api-core/latest/client_options.html">https://googleapis.dev/python/google-api-core/latest/client_options.html</a>
from google.ai import generativelanguage_v1beta
async def sample_update_tuned_model():
# Create a client
client = generativelanguage_v1beta.ModelServiceAsyncClient()
# Initialize request argument(s)
tuned_model = generativelanguage_v1beta.TunedModel()
tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
request = generativelanguage_v1beta.UpdateTunedModelRequest(
tuned_model=tuned_model,
)
# Make the request
response = await client.update_tuned_model(request=request)
# Handle the response
print(response)
Args | |
---|---|
request
|
Optional[Union[google.ai.generativelanguage.UpdateTunedModelRequest, dict]]
tuned_model (:class:
|
retry
|
google.api_core.retry_async.AsyncRetry
Designation of what errors, if any, should be retried. |
timeout
|
float
The timeout for this request. |
metadata
|
Sequence[Tuple[str, str]]
Strings which should be sent along with the request as metadata. |
Returns | |
---|---|
google.ai.generativelanguage.TunedModel
|
A fine-tuned model created using ModelService.CreateTunedModel. |
Class Variables | |
---|---|
DEFAULT_ENDPOINT |
'generativelanguage.googleapis.com'
|
DEFAULT_MTLS_ENDPOINT |
'generativelanguage.mtls.googleapis.com'
|