2121from typing_extensions import TypedDict
2222
2323import google .ai .generativelanguage as glm
24+ from google .generativeai import protos
2425
2526from google .generativeai .client import (
2627 get_default_generative_client ,
3536
3637DEFAULT_ANSWER_MODEL = "models/aqa"
3738
38- AnswerStyle = glm .GenerateAnswerRequest .AnswerStyle
39+ AnswerStyle = protos .GenerateAnswerRequest .AnswerStyle
3940
4041AnswerStyleOptions = Union [int , str , AnswerStyle ]
4142
@@ -66,28 +67,30 @@ def to_answer_style(x: AnswerStyleOptions) -> AnswerStyle:
6667
6768
6869GroundingPassageOptions = (
69- Union [glm .GroundingPassage , tuple [str , content_types .ContentType ], content_types .ContentType ],
70+ Union [
71+ protos .GroundingPassage , tuple [str , content_types .ContentType ], content_types .ContentType
72+ ],
7073)
7174
7275GroundingPassagesOptions = Union [
73- glm .GroundingPassages ,
76+ protos .GroundingPassages ,
7477 Iterable [GroundingPassageOptions ],
7578 Mapping [str , content_types .ContentType ],
7679]
7780
7881
79- def _make_grounding_passages (source : GroundingPassagesOptions ) -> glm .GroundingPassages :
82+ def _make_grounding_passages (source : GroundingPassagesOptions ) -> protos .GroundingPassages :
8083 """
81- Converts the `source` into a `glm .GroundingPassage`. A `GroundingPassages` contains a list of
82- `glm .GroundingPassage` objects, which each contain a `glm .Contant` and a string `id`.
84+ Converts the `source` into a `protos .GroundingPassage`. A `GroundingPassages` contains a list of
85+ `protos .GroundingPassage` objects, which each contain a `protos .Contant` and a string `id`.
8386
8487 Args:
85- source: `Content` or a `GroundingPassagesOptions` that will be converted to glm .GroundingPassages.
88+ source: `Content` or a `GroundingPassagesOptions` that will be converted to protos .GroundingPassages.
8689
8790 Return:
88- `glm .GroundingPassages` to be passed into `glm .GenerateAnswer`.
91+ `protos .GroundingPassages` to be passed into `protos .GenerateAnswer`.
8992 """
90- if isinstance (source , glm .GroundingPassages ):
93+ if isinstance (source , protos .GroundingPassages ):
9194 return source
9295
9396 if not isinstance (source , Iterable ):
@@ -100,19 +103,19 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP
100103 source = source .items ()
101104
102105 for n , data in enumerate (source ):
103- if isinstance (data , glm .GroundingPassage ):
106+ if isinstance (data , protos .GroundingPassage ):
104107 passages .append (data )
105108 elif isinstance (data , tuple ):
106109 id , content = data # tuple must have exactly 2 items.
107110 passages .append ({"id" : id , "content" : content_types .to_content (content )})
108111 else :
109112 passages .append ({"id" : str (n ), "content" : content_types .to_content (data )})
110113
111- return glm .GroundingPassages (passages = passages )
114+ return protos .GroundingPassages (passages = passages )
112115
113116
114117SourceNameType = Union [
115- str , retriever_types .Corpus , glm .Corpus , retriever_types .Document , glm .Document
118+ str , retriever_types .Corpus , protos .Corpus , retriever_types .Document , protos .Document
116119]
117120
118121
@@ -127,15 +130,15 @@ class SemanticRetrieverConfigDict(TypedDict):
127130SemanticRetrieverConfigOptions = Union [
128131 SourceNameType ,
129132 SemanticRetrieverConfigDict ,
130- glm .SemanticRetrieverConfig ,
133+ protos .SemanticRetrieverConfig ,
131134]
132135
133136
134137def _maybe_get_source_name (source ) -> str | None :
135138 if isinstance (source , str ):
136139 return source
137140 elif isinstance (
138- source , (retriever_types .Corpus , glm .Corpus , retriever_types .Document , glm .Document )
141+ source , (retriever_types .Corpus , protos .Corpus , retriever_types .Document , protos .Document )
139142 ):
140143 return source .name
141144 else :
@@ -145,8 +148,8 @@ def _maybe_get_source_name(source) -> str | None:
145148def _make_semantic_retriever_config (
146149 source : SemanticRetrieverConfigOptions ,
147150 query : content_types .ContentsType ,
148- ) -> glm .SemanticRetrieverConfig :
149- if isinstance (source , glm .SemanticRetrieverConfig ):
151+ ) -> protos .SemanticRetrieverConfig :
152+ if isinstance (source , protos .SemanticRetrieverConfig ):
150153 return source
151154
152155 name = _maybe_get_source_name (source )
@@ -156,7 +159,7 @@ def _make_semantic_retriever_config(
156159 source ["source" ] = _maybe_get_source_name (source ["source" ])
157160 else :
158161 raise TypeError (
159- f"Invalid input: Failed to create a 'glm .SemanticRetrieverConfig' from the provided source. "
162+ f"Invalid input: Failed to create a 'protos .SemanticRetrieverConfig' from the provided source. "
160163 f"Received type: { type (source ).__name__ } , "
161164 f"Received value: { source } "
162165 )
@@ -166,7 +169,7 @@ def _make_semantic_retriever_config(
166169 elif isinstance (source ["query" ], str ):
167170 source ["query" ] = content_types .to_content (source ["query" ])
168171
169- return glm .SemanticRetrieverConfig (source )
172+ return protos .SemanticRetrieverConfig (source )
170173
171174
172175def _make_generate_answer_request (
@@ -178,26 +181,26 @@ def _make_generate_answer_request(
178181 answer_style : AnswerStyle | None = None ,
179182 safety_settings : safety_types .SafetySettingOptions | None = None ,
180183 temperature : float | None = None ,
181- ) -> glm .GenerateAnswerRequest :
184+ ) -> protos .GenerateAnswerRequest :
182185 """
183- constructs a glm .GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
186+ constructs a protos .GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
184187
185188 Args:
186189 model: Name of the model used to generate the grounded response.
187190 contents: Content of the current conversation with the model. For single-turn query, this is a
188191 single question to answer. For multi-turn queries, this is a repeated field that contains
189192 conversation history and the last `Content` in the list containing the question.
190193 inline_passages: Grounding passages (a list of `Content`-like objects or `(id, content)` pairs,
191- or a `glm .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
194+ or a `protos .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
192195 one must be set, but not both.
193- semantic_retriever: A Corpus, Document, or `glm .SemanticRetrieverConfig` to use for grounding. Exclusive with
196+ semantic_retriever: A Corpus, Document, or `protos .SemanticRetrieverConfig` to use for grounding. Exclusive with
194197 `inline_passages`, one must be set, but not both.
195198 answer_style: Style for grounded answers.
196199 safety_settings: Safety settings for generated output.
197200 temperature: The temperature for randomness in the output.
198201
199202 Returns:
200- Call for glm .GenerateAnswerRequest().
203+ Call for protos .GenerateAnswerRequest().
201204 """
202205 model = model_types .make_model_name (model )
203206
@@ -224,7 +227,7 @@ def _make_generate_answer_request(
224227 if answer_style :
225228 answer_style = to_answer_style (answer_style )
226229
227- return glm .GenerateAnswerRequest (
230+ return protos .GenerateAnswerRequest (
228231 model = model ,
229232 contents = contents ,
230233 inline_passages = inline_passages ,
@@ -273,9 +276,9 @@ def generate_answer(
273276 contents: The question to be answered by the model, grounded in the
274277 provided source.
275278 inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
276- or a `glm .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
279+ or a `protos .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
277280 one must be set, but not both.
278- semantic_retriever: A Corpus, Document, or `glm .SemanticRetrieverConfig` to use for grounding. Exclusive with
281+ semantic_retriever: A Corpus, Document, or `protos .SemanticRetrieverConfig` to use for grounding. Exclusive with
279282 `inline_passages`, one must be set, but not both.
280283 answer_style: Style in which the grounded answer should be returned.
281284 safety_settings: Safety settings for generated output. Defaults to None.
@@ -327,9 +330,9 @@ async def generate_answer_async(
327330 contents: The question to be answered by the model, grounded in the
328331 provided source.
329332 inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
330- or a `glm .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
333+ or a `protos .GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
331334 one must be set, but not both.
332- semantic_retriever: A Corpus, Document, or `glm .SemanticRetrieverConfig` to use for grounding. Exclusive with
335+ semantic_retriever: A Corpus, Document, or `protos .SemanticRetrieverConfig` to use for grounding. Exclusive with
333336 `inline_passages`, one must be set, but not both.
334337 answer_style: Style in which the grounded answer should be returned.
335338 safety_settings: Safety settings for generated output. Defaults to None.
0 commit comments