-
Notifications
You must be signed in to change notification settings - Fork 2.4k
Open
Labels
models[Component] Issues related to model support[Component] Issues related to model support
Description
I want to use Groq models via LiteLlm. Here is my code.
kaggle_mcp_server = McpToolset(
connection_params=StdioConnectionParams(
server_params=StdioServerParameters(
command='npx',
args=[
'-y',
'mcp-remote',
'https://www.kaggle.com/mcp'
],
),
timeout=1000
)
)
kaggle_agent = LlmAgent(
name = "kaggle_agent",
model=LiteLlm(model="groq/openai/gpt-oss-20b"),
instruction="Use the MCP Tool to search, download and perform multiple actions for daatbases in kaggle",
tools=[kaggle_mcp_server]
)
kaggle_runner = InMemoryRunner(agent=kaggle_agent)
response= await kaggle_runner.run_debug("I need large dataset for weapon detection in multiple scenerious", verbose = True)
I get following error:
---------------------------------------------------------------------------
ValidationError Traceback (most recent call last)
Cell In[7], line 2
1 kaggle_runner = InMemoryRunner(agent=kaggle_agent)
----> 2 response= await kaggle_runner.run_debug("I need large dataset for weapon detection in multiple scenerious", verbose = True)
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/runners.py:1023, in Runner.run_debug(self, user_messages, user_id, session_id, run_config, quiet, verbose)
1020 if not quiet:
1021 print(f'\nUser > {message}')
-> 1023 async for event in self.run_async(
1024 user_id=user_id,
1025 session_id=session.id,
1026 new_message=types.UserContent(parts=[types.Part(text=message)]),
1027 run_config=run_config,
1028 ):
1029 if not quiet:
1030 print_event(event, verbose=verbose)
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/runners.py:443, in Runner.run_async(self, user_id, session_id, invocation_id, new_message, state_delta, run_config)
436 asyncio.create_task(
437 _run_compaction_for_sliding_window(
438 self.app, session, self.session_service
439 )
440 )
442 async with Aclosing(_run_with_trace(new_message, invocation_id)) as agen:
--> 443 async for event in agen:
444 yield event
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/runners.py:427, in Runner.run_async.<locals>._run_with_trace(new_message, invocation_id)
417 yield event
419 async with Aclosing(
420 self._exec_with_plugin(
421 invocation_context=invocation_context,
(...) 425 )
426 ) as agen:
--> 427 async for event in agen:
428 yield event
429 # Run compaction after all events are yielded from the agent.
430 # (We don't compact in the middle of an invocation, we only compact at the end of an invocation.)
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/runners.py:653, in Runner._exec_with_plugin(self, invocation_context, session, execute_fn, is_live_call)
650 else:
651 # Step 2: Otherwise continue with normal execution
652 async with Aclosing(execute_fn(invocation_context)) as agen:
--> 653 async for event in agen:
654 if not event.partial:
655 if self._should_append_event(event, is_live_call):
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/runners.py:416, in Runner.run_async.<locals>._run_with_trace.<locals>.execute(ctx)
414 async def execute(ctx: InvocationContext) -> AsyncGenerator[Event]:
415 async with Aclosing(ctx.agent.run_async(ctx)) as agen:
--> 416 async for event in agen:
417 yield event
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/agents/base_agent.py:294, in BaseAgent.run_async(self, parent_context)
291 return
293 async with Aclosing(self._run_async_impl(ctx)) as agen:
--> 294 async for event in agen:
295 yield event
297 if ctx.end_invocation:
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/agents/llm_agent.py:435, in LlmAgent._run_async_impl(self, ctx)
433 should_pause = False
434 async with Aclosing(self._llm_flow.run_async(ctx)) as agen:
--> 435 async for event in agen:
436 self.__maybe_save_output_to_state(event)
437 yield event
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:356, in BaseLlmFlow.run_async(self, invocation_context)
354 last_event = None
355 async with Aclosing(self._run_one_step_async(invocation_context)) as agen:
--> 356 async for event in agen:
357 last_event = event
358 yield event
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:433, in BaseLlmFlow._run_one_step_async(self, invocation_context)
422 model_response_event = Event(
423 id=Event.new_id(),
424 invocation_id=invocation_context.invocation_id,
425 author=invocation_context.agent.name,
426 branch=invocation_context.branch,
427 )
428 async with Aclosing(
429 self._call_llm_async(
430 invocation_context, llm_request, model_response_event
431 )
432 ) as agen:
--> 433 async for llm_response in agen:
434 # Postprocess after calling the LLM.
435 async with Aclosing(
436 self._postprocess_async(
437 invocation_context,
(...) 441 )
442 ) as agen:
443 async for event in agen:
444 # Update the mutable event id to avoid conflict
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:804, in BaseLlmFlow._call_llm_async(self, invocation_context, llm_request, model_response_event)
801 yield llm_response
803 async with Aclosing(_call_llm_with_tracing()) as agen:
--> 804 async for event in agen:
805 yield event
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:788, in BaseLlmFlow._call_llm_async.<locals>._call_llm_with_tracing()
775 responses_generator = llm.generate_content_async(
776 llm_request,
777 stream=invocation_context.run_config.streaming_mode
778 == StreamingMode.SSE,
779 )
780 async with Aclosing(
781 self._run_and_handle_error(
782 responses_generator,
(...) 786 )
787 ) as agen:
--> 788 async for llm_response in agen:
789 trace_call_llm(
790 invocation_context,
791 model_response_event.id,
792 llm_request,
793 llm_response,
794 )
795 # Runs after_model_callback if it exists.
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:998, in BaseLlmFlow._run_and_handle_error(self, response_generator, invocation_context, llm_request, model_response_event)
996 yield error_response
997 else:
--> 998 raise model_error
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/flows/llm_flows/base_llm_flow.py:982, in BaseLlmFlow._run_and_handle_error(self, response_generator, invocation_context, llm_request, model_response_event)
980 try:
981 async with Aclosing(response_generator) as agen:
--> 982 async for response in agen:
983 yield response
984 except Exception as model_error:
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/models/lite_llm.py:960, in LiteLlm.generate_content_async(self, llm_request, stream)
956 _append_fallback_user_content_if_missing(llm_request)
957 logger.debug(_build_request_log(llm_request))
959 messages, tools, response_format, generation_params = (
--> 960 _get_completion_inputs(llm_request)
961 )
963 if "functions" in self._additional_args:
964 # LiteLLM does not support both tools and functions together.
965 tools = None
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/models/lite_llm.py:718, in _get_completion_inputs(llm_request)
712 tools: Optional[List[Dict]] = None
713 if (
714 llm_request.config
715 and llm_request.config.tools
716 and llm_request.config.tools[0].function_declarations
717 ):
--> 718 tools = [
719 _function_declaration_to_tool_param(tool)
720 for tool in llm_request.config.tools[0].function_declarations
721 ]
723 # 3. Handle response format
724 response_format: Optional[types.SchemaUnion] = None
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/models/lite_llm.py:719, in <listcomp>(.0)
712 tools: Optional[List[Dict]] = None
713 if (
714 llm_request.config
715 and llm_request.config.tools
716 and llm_request.config.tools[0].function_declarations
717 ):
718 tools = [
--> 719 _function_declaration_to_tool_param(tool)
720 for tool in llm_request.config.tools[0].function_declarations
721 ]
723 # 3. Handle response format
724 response_format: Optional[types.SchemaUnion] = None
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/models/lite_llm.py:501, in _function_declaration_to_tool_param(function_declaration)
496 if (
497 function_declaration.parameters
498 and function_declaration.parameters.properties
499 ):
500 for key, value in function_declaration.parameters.properties.items():
--> 501 properties[key] = _schema_to_dict(value)
503 tool_params = {
504 "type": "function",
505 "function": {
(...) 512 },
513 }
515 if (
516 function_declaration.parameters
517 and function_declaration.parameters.required
518 ):
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/google/adk/models/lite_llm.py:467, in _schema_to_dict(schema)
464 for key, value in schema_dict["properties"].items():
465 # value is a dict → rebuild a Schema object and recurse
466 if isinstance(value, dict):
--> 467 new_props[key] = _schema_to_dict(types.Schema.model_validate(value))
468 # value is already a Schema instance
469 elif isinstance(value, types.Schema):
File ~/programs/personal-programs/5dgai/.venv/lib/python3.11/site-packages/pydantic/main.py:716, in BaseModel.model_validate(cls, obj, strict, extra, from_attributes, context, by_alias, by_name)
710 if by_alias is False and by_name is not True:
711 raise PydanticUserError(
712 'At least one of `by_alias` or `by_name` must be set to True.',
713 code='validate-by-alias-and-name-false',
714 )
--> 716 return cls.__pydantic_validator__.validate_python(
717 obj,
718 strict=strict,
719 extra=extra,
720 from_attributes=from_attributes,
721 context=context,
722 by_alias=by_alias,
723 by_name=by_name,
724 )
ValidationError: 1 validation error for Schema
enum.9
Input should be a valid string [type=string_type, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.12/v/string_type
Metadata
Metadata
Assignees
Labels
models[Component] Issues related to model support[Component] Issues related to model support