File tree Expand file tree Collapse file tree 3 files changed +5
-0
lines changed Expand file tree Collapse file tree 3 files changed +5
-0
lines changed Original file line number Diff line number Diff line change @@ -40,6 +40,9 @@ class ModelSettings:
40
40
max_tokens : int | None = None
41
41
"""The maximum number of output tokens to generate."""
42
42
43
+ metadata : dict [str , str ] | None = None
44
+ """Metadata to include with the model response call."""
45
+
43
46
store : bool | None = None
44
47
"""Whether to store the generated model response for later retrieval.
45
48
Defaults to True if not provided."""
Original file line number Diff line number Diff line change @@ -537,6 +537,7 @@ async def _fetch_response(
537
537
stream_options = {"include_usage" : True } if stream else NOT_GIVEN ,
538
538
store = store ,
539
539
extra_headers = _HEADERS ,
540
+ metadata = model_settings .metadata ,
540
541
)
541
542
542
543
if isinstance (ret , ChatCompletion ):
Original file line number Diff line number Diff line change @@ -247,6 +247,7 @@ async def _fetch_response(
247
247
extra_headers = _HEADERS ,
248
248
text = response_format ,
249
249
store = self ._non_null_or_not_given (model_settings .store ),
250
+ metadata = model_settings .metadata ,
250
251
)
251
252
252
253
def _get_client (self ) -> AsyncOpenAI :
You can’t perform that action at this time.
0 commit comments