forked from Alishahryar1/free-claude-code
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
86 lines (60 loc) · 2.37 KB
/
.env.example
File metadata and controls
86 lines (60 loc) · 2.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# NVIDIA NIM Config
NVIDIA_NIM_API_KEY=""
NVIDIA_NIM_BASE_URL="https://integrate.api.nvidia.com/v1"
# OpenRouter Config
OPENROUTER_API_KEY=""
# LM Studio Config (local provider, no API key required)
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
# Llama.cpp Config (local provider, no API key required)
LLAMACPP_BASE_URL="http://localhost:8080/v1"
# All Claude model requests are mapped to these models, plain model is fallback
# Format: provider_type/model/name
# Valid providers: "nvidia_nim" | "open_router" | "lmstudio" | "llamacpp"
MODEL_OPUS="nvidia_nim/z-ai/glm4.7"
MODEL_SONNET="open_router/arcee-ai/trinity-large-preview:free"
MODEL_HAIKU="open_router/stepfun/step-3.5-flash:free"
MODEL="nvidia_nim/z-ai/glm4.7"
# NIM Settings
# Enable chat_template_kwargs + reasoning_budget for thinking models (kimi, nemotron).
# Leave false for models that don't support it (e.g. Mistral).
NIM_ENABLE_THINKING=false
# Provider config
PROVIDER_RATE_LIMIT=40
PROVIDER_RATE_WINDOW=60
PROVIDER_MAX_CONCURRENCY=5
# HTTP client timeouts (seconds) for provider API requests
HTTP_READ_TIMEOUT=120
HTTP_WRITE_TIMEOUT=10
HTTP_CONNECT_TIMEOUT=2
# Optional server API key (Anthropic-style)
ANTHROPIC_AUTH_TOKEN=
# Messaging Platform: "telegram" | "discord"
MESSAGING_PLATFORM="discord"
MESSAGING_RATE_LIMIT=1
MESSAGING_RATE_WINDOW=1
# Voice Note Transcription
VOICE_NOTE_ENABLED=false
# WHISPER_DEVICE: "cpu" | "cuda" | "nvidia_nim"
# - "cpu"/"cuda": Hugging Face transformers Whisper (offline, free; install with: uv sync --extra voice_local)
# - "nvidia_nim": NVIDIA NIM Whisper via Riva gRPC (requires NVIDIA_NIM_API_KEY; install with: uv sync --extra voice)
WHISPER_DEVICE="nvidia_nim"
# WHISPER_MODEL:
# - For cpu/cuda: Hugging Face ID or short name (tiny, base, small, medium, large-v2, large-v3, large-v3-turbo)
# - For nvidia_nim: NVIDIA NIM model (e.g., "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3")
# - For nvidia_nim, default to "openai/whisper-large-v3" for best performance
WHISPER_MODEL="openai/whisper-large-v3"
HF_TOKEN=""
# Telegram Config
TELEGRAM_BOT_TOKEN=""
ALLOWED_TELEGRAM_USER_ID=""
# Discord Config
DISCORD_BOT_TOKEN=""
ALLOWED_DISCORD_CHANNELS=""
# Agent Config
CLAUDE_WORKSPACE="./agent_workspace"
ALLOWED_DIR=""
FAST_PREFIX_DETECTION=true
ENABLE_NETWORK_PROBE_MOCK=true
ENABLE_TITLE_GENERATION_SKIP=true
ENABLE_SUGGESTION_MODE_SKIP=true
ENABLE_FILEPATH_EXTRACTION_MOCK=true