- 1.122.0 (latest)
 - 1.121.0
 - 1.120.0
 - 1.119.0
 - 1.118.0
 - 1.117.0
 - 1.116.0
 - 1.115.0
 - 1.114.0
 - 1.113.0
 - 1.112.0
 - 1.111.0
 - 1.110.0
 - 1.109.0
 - 1.108.0
 - 1.107.0
 - 1.106.0
 - 1.105.0
 - 1.104.0
 - 1.103.0
 - 1.102.0
 - 1.101.0
 - 1.100.0
 - 1.99.0
 - 1.98.0
 - 1.97.0
 - 1.96.0
 - 1.95.1
 - 1.94.0
 - 1.93.1
 - 1.92.0
 - 1.91.0
 - 1.90.0
 - 1.89.0
 - 1.88.0
 - 1.87.0
 - 1.86.0
 - 1.85.0
 - 1.84.0
 - 1.83.0
 - 1.82.0
 - 1.81.0
 - 1.80.0
 - 1.79.0
 - 1.78.0
 - 1.77.0
 - 1.76.0
 - 1.75.0
 - 1.74.0
 - 1.73.0
 - 1.72.0
 - 1.71.1
 - 1.70.0
 - 1.69.0
 - 1.68.0
 - 1.67.1
 - 1.66.0
 - 1.65.0
 - 1.63.0
 - 1.62.0
 - 1.60.0
 - 1.59.0
 - 1.58.0
 - 1.57.0
 - 1.56.0
 - 1.55.0
 - 1.54.1
 - 1.53.0
 - 1.52.0
 - 1.51.0
 - 1.50.0
 - 1.49.0
 - 1.48.0
 - 1.47.0
 - 1.46.0
 - 1.45.0
 - 1.44.0
 - 1.43.0
 - 1.39.0
 - 1.38.1
 - 1.37.0
 - 1.36.4
 - 1.35.0
 - 1.34.0
 - 1.33.1
 - 1.32.0
 - 1.31.1
 - 1.30.1
 - 1.29.0
 - 1.28.1
 - 1.27.1
 - 1.26.1
 - 1.25.0
 - 1.24.1
 - 1.23.0
 - 1.22.1
 - 1.21.0
 - 1.20.0
 - 1.19.1
 - 1.18.3
 - 1.17.1
 - 1.16.1
 - 1.15.1
 - 1.14.0
 - 1.13.1
 - 1.12.1
 - 1.11.0
 - 1.10.0
 - 1.9.0
 - 1.8.1
 - 1.7.1
 - 1.6.2
 - 1.5.0
 - 1.4.3
 - 1.3.0
 - 1.2.0
 - 1.1.1
 - 1.0.1
 - 0.9.0
 - 0.8.0
 - 0.7.1
 - 0.6.0
 - 0.5.1
 - 0.4.0
 - 0.3.1
 
GenerativeModel(
    model_name: str,
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[google.cloud.aiplatform_v1beta1.types.content.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
)Initializes GenerativeModel.
Usage:
model = GenerativeModel("gemini-pro")
print(model.generate_content("Hello"))
```
Parameter | 
      |
|---|---|
| Name | Description | 
model_name | 
        
  	str
  	Model Garden model resource name.  | 
      
Methods
count_tokens
count_tokens(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponseCounts tokens.
| Parameter | |
|---|---|
| Name | Description | 
contents | 
        
          typing.Union[typing.List[Content], typing.List[typing.Dict[str, typing.Any]], str, Image, Part, typing.List[typing.Union[str, Image, Part]]]
          Contents to send to the model. Supports either a list of Content objects (passing a multi-turn conversation) or a value that can be converted to a single Content object (passing a single message). Supports * str, Image, Part, * List[Union[str, Image, Part]], * List[Content]  | 
      
| Returns | |
|---|---|
| Type | Description | 
A CountTokensResponse object that has the following attributes | 
        total_tokens: The total number of tokens counted across all instances from the request. total_billable_characters: The total number of billable characters counted across all instances from the request. | 
count_tokens_async
count_tokens_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponseCounts tokens asynchronously.
| Parameter | |
|---|---|
| Name | Description | 
contents | 
        
          typing.Union[typing.List[Content], typing.List[typing.Dict[str, typing.Any]], str, Image, Part, typing.List[typing.Union[str, Image, Part]]]
          Contents to send to the model. Supports either a list of Content objects (passing a multi-turn conversation) or a value that can be converted to a single Content object (passing a single message). Supports * str, Image, Part, * List[Union[str, Image, Part]], * List[Content]  | 
      
| Returns | |
|---|---|
| Type | Description | 
And awaitable for a CountTokensResponse object that has the following attributes | 
        total_tokens: The total number of tokens counted across all instances from the request. total_billable_characters: The total number of billable characters counted across all instances from the request. | 
generate_content
generate_content(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[google.cloud.aiplatform_v1beta1.types.content.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]Generates content.
| Parameter | |
|---|---|
| Name | Description | 
contents | 
        
          typing.Union[typing.List[Content], typing.List[typing.Dict[str, typing.Any]], str, Image, Part, typing.List[typing.Union[str, Image, Part]]]
          Contents to send to the model. Supports either a list of Content objects (passing a multi-turn conversation) or a value that can be converted to a single Content object (passing a single message). Supports * str, Image, Part, * List[Union[str, Image, Part]], * List[Content]  | 
      
generate_content_async
generate_content_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[google.cloud.aiplatform_v1beta1.types.content.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.AsyncIterable[
        vertexai.generative_models._generative_models.GenerationResponse
    ],
]Generates content asynchronously.
| Parameter | |
|---|---|
| Name | Description | 
contents | 
        
          typing.Union[typing.List[Content], typing.List[typing.Dict[str, typing.Any]], str, Image, Part, typing.List[typing.Union[str, Image, Part]]]
          Contents to send to the model. Supports either a list of Content objects (passing a multi-turn conversation) or a value that can be converted to a single Content object (passing a single message). Supports * str, Image, Part, * List[Union[str, Image, Part]], * List[Content]  | 
      
start_chat
start_chat(
    *,
    history: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Content]
    ] = None,
    response_validation: bool = True,
    responder: typing.Optional[
        vertexai.generative_models._generative_models.AutomaticFunctionCallingResponder
    ] = None
) -> vertexai.generative_models._generative_models.ChatSessionCreates a stateful chat session.