For skills with executable scripts, use Sandbox Tools with mounted skills instead.
Available Tools
| Tool | Description |
|---|---|
get_skill | Get skill metadata and file index |
get_skill_file | Read a file from a skill |
Enable LLMs to read skill files without sandbox
| Tool | Description |
|---|---|
get_skill | Get skill metadata and file index |
get_skill_file | Read a file from a skill |
import json
import os
from acontext import AcontextClient
from acontext.agent.skill import SKILL_TOOLS
from openai import OpenAI
client = AcontextClient(api_key=os.getenv("ACONTEXT_API_KEY"))
openai_client = OpenAI()
# Preload skills by UUID
skill_ids = ["uuid-of-skill-1", "uuid-of-skill-2"]
ctx = SKILL_TOOLS.format_context(client, skill_ids)
tools = SKILL_TOOLS.to_openai_tool_schema()
skills_context = ctx.get_context_prompt()
messages = [
{"role": "system", "content": f"You have skill access.\n\n{skills_context}"},
{"role": "user", "content": "What guidelines are in the internal-comms skill?"}
]
# Agent loop
while True:
response = openai_client.chat.completions.create(
model="gpt-4.1", messages=messages, tools=tools
)
message = response.choices[0].message
messages.append(message)
if not message.tool_calls:
print(f"Assistant: {message.content}")
break
for tc in message.tool_calls:
result = SKILL_TOOLS.execute_tool(ctx, tc.function.name, json.loads(tc.function.arguments))
messages.append({"role": "tool", "tool_call_id": tc.id, "content": result})
<available_skills>
<skill>
<name>data-extraction</name>
<description>Extract structured data from documents</description>
</skill>
</available_skills>
{"skill_name": "data-extraction"}
{"skill_name": "data-extraction", "file_path": "SKILL.md"}