Skip to content

Configuration Reference

Claudex searches for config files in this order:

  1. $CLAUDEX_CONFIG environment variable
  2. ./claudex.toml (current directory)
  3. ./.claudex/config.toml (current directory)
  4. Parent directories (up to 10 levels), checking both patterns
  5. ~/.config/claudex/config.toml (XDG — checked before platform-specific paths)

See Configuration for full details.

# Path to claude binary (default: "claude" from PATH)
claude_binary = "claude"
# Proxy server bind port
proxy_port = 13456
# Proxy server bind address
proxy_host = "127.0.0.1"
# Log level: trace, debug, info, warn, error
log_level = "info"
FieldTypeDefaultDescription
claude_binarystring"claude"Path to the Claude Code CLI binary
proxy_portinteger13456Port the translation proxy listens on
proxy_hoststring"127.0.0.1"Address the proxy binds to
log_levelstring"info"Minimum log level

Define shorthand names for model identifiers:

[model_aliases]
grok3 = "grok-3-beta"
gpt4o = "gpt-4o"
ds3 = "deepseek-chat"
claude = "claude-sonnet-4-20250514"

Use aliases with -m:

Terminal window
claudex run grok -m grok3
[[profiles]]
name = "grok"
provider_type = "OpenAICompatible"
base_url = "https://api.x.ai/v1"
api_key = "xai-..."
# api_key_keyring = "grok-api-key"
default_model = "grok-3-beta"
auth_type = "api-key" # "api-key" (default) or "oauth"
# oauth_provider = "openai" # required when auth_type = "oauth"
backup_providers = ["deepseek"]
custom_headers = {}
extra_env = {}
priority = 100
enabled = true
# Model slot mapping (optional)
[profiles.models]
haiku = "grok-3-mini-beta"
sonnet = "grok-3-beta"
opus = "grok-3-beta"
FieldTypeDefaultDescription
namestringrequiredUnique profile identifier
provider_typestring"DirectAnthropic""DirectAnthropic", "OpenAICompatible", or "OpenAIResponses"
base_urlstringrequiredProvider API endpoint URL
api_keystring""API key in plaintext
api_key_keyringstringOS keychain entry name (overrides api_key)
default_modelstringrequiredModel identifier to use by default
auth_typestring"api-key"Authentication method: "api-key" or "oauth"
oauth_providerstringOAuth provider name (required when auth_type = "oauth"). One of: claude, openai, google, qwen, kimi, github
backup_providersstring[][]Profile names for failover, tried in order
custom_headersmap{}Additional HTTP headers sent with every request
extra_envmap{}Environment variables set when launching Claude
priorityinteger100Priority weight for smart routing (higher = preferred)
enabledbooleantrueWhether this profile is active

The optional [profiles.models] table maps Claude Code’s /model switcher slots to provider-specific model names. When you switch models inside Claude Code (e.g., /model opus), Claudex translates the request to the mapped model.

[profiles.models]
haiku = "grok-3-mini-beta" # maps /model haiku
sonnet = "grok-3-beta" # maps /model sonnet
opus = "grok-3-beta" # maps /model opus
FieldTypeDescription
haikustringModel to use when Claude Code selects haiku
sonnetstringModel to use when Claude Code selects sonnet
opusstringModel to use when Claude Code selects opus
# Anthropic (DirectAnthropic — no translation)
[[profiles]]
name = "anthropic"
provider_type = "DirectAnthropic"
base_url = "https://api.anthropic.com"
api_key = "sk-ant-..."
default_model = "claude-sonnet-4-20250514"
# MiniMax (DirectAnthropic — no translation)
[[profiles]]
name = "minimax"
provider_type = "DirectAnthropic"
base_url = "https://api.minimax.io/anthropic"
api_key = "..."
default_model = "claude-sonnet-4-20250514"
backup_providers = ["anthropic"]
# OpenRouter (OpenAICompatible — needs translation)
[[profiles]]
name = "openrouter"
provider_type = "OpenAICompatible"
base_url = "https://openrouter.ai/api/v1"
api_key = "..."
default_model = "anthropic/claude-sonnet-4"
# Grok (OpenAICompatible — needs translation)
[[profiles]]
name = "grok"
provider_type = "OpenAICompatible"
base_url = "https://api.x.ai/v1"
api_key = "xai-..."
default_model = "grok-3-beta"
backup_providers = ["deepseek"]
# OpenAI (OpenAICompatible — needs translation)
[[profiles]]
name = "chatgpt"
provider_type = "OpenAICompatible"
base_url = "https://api.openai.com/v1"
api_key = "sk-..."
default_model = "gpt-4o"
# DeepSeek (OpenAICompatible — needs translation)
[[profiles]]
name = "deepseek"
provider_type = "OpenAICompatible"
base_url = "https://api.deepseek.com"
api_key = "..."
default_model = "deepseek-chat"
backup_providers = ["grok"]
# Kimi / Moonshot (OpenAICompatible — needs translation)
[[profiles]]
name = "kimi"
provider_type = "OpenAICompatible"
base_url = "https://api.moonshot.cn/v1"
api_key = "..."
default_model = "moonshot-v1-128k"
# GLM / 智谱 (OpenAICompatible — needs translation)
[[profiles]]
name = "glm"
provider_type = "OpenAICompatible"
base_url = "https://open.bigmodel.cn/api/paas/v4"
api_key = "..."
default_model = "glm-4-plus"
# Ollama (local, no API key needed)
[[profiles]]
name = "local-qwen"
provider_type = "OpenAICompatible"
base_url = "http://localhost:11434/v1"
api_key = ""
default_model = "qwen2.5:72b"
enabled = false
# vLLM / LM Studio (local)
[[profiles]]
name = "local-llama"
provider_type = "OpenAICompatible"
base_url = "http://localhost:8000/v1"
api_key = ""
default_model = "llama-3.3-70b"
enabled = false
# ChatGPT/Codex subscription (OpenAIResponses — Responses API translation)
[[profiles]]
name = "codex-sub"
provider_type = "OpenAIResponses"
base_url = "https://chatgpt.com/backend-api/codex"
default_model = "gpt-4o"
auth_type = "oauth"
oauth_provider = "openai"
# OpenAI via OAuth (reads token from Codex CLI ~/.codex/auth.json)
[[profiles]]
name = "chatgpt-oauth"
provider_type = "OpenAICompatible"
base_url = "https://api.openai.com/v1"
default_model = "gpt-4o"
auth_type = "oauth"
oauth_provider = "openai"
[profiles.models]
haiku = "gpt-4o-mini"
sonnet = "gpt-4o"
opus = "o1"
# Claude subscription (skips proxy, uses Claude's native OAuth from ~/.claude)
[[profiles]]
name = "claude-sub"
provider_type = "DirectAnthropic"
base_url = "https://api.anthropic.com"
default_model = "claude-sonnet-4-20250514"
auth_type = "oauth"
oauth_provider = "claude"
[profiles.models]
haiku = "claude-haiku-4-20250514"
sonnet = "claude-sonnet-4-20250514"
opus = "claude-opus-4-20250514"
# Google Gemini via OAuth
[[profiles]]
name = "gemini"
provider_type = "OpenAICompatible"
base_url = "https://generativelanguage.googleapis.com/v1beta/openai"
default_model = "gemini-2.5-pro"
auth_type = "oauth"
oauth_provider = "google"
# Kimi via OAuth
[[profiles]]
name = "kimi-oauth"
provider_type = "OpenAICompatible"
base_url = "https://api.moonshot.cn/v1"
default_model = "moonshot-v1-128k"
auth_type = "oauth"
oauth_provider = "kimi"
# Qwen via OAuth
[[profiles]]
name = "qwen-oauth"
provider_type = "OpenAICompatible"
base_url = "https://chat.qwenlm.ai/api/chat/v1"
default_model = "qwen-max"
auth_type = "oauth"
oauth_provider = "qwen"
# GitHub Copilot via OAuth
[[profiles]]
name = "github-copilot"
provider_type = "OpenAICompatible"
base_url = "https://api.githubcopilot.com"
default_model = "gpt-4o"
auth_type = "oauth"
oauth_provider = "github"
# ChatGPT/Codex subscription via OAuth (OpenAIResponses)
[[profiles]]
name = "codex-sub"
provider_type = "OpenAIResponses"
base_url = "https://chatgpt.com/backend-api/codex"
default_model = "gpt-4o"
auth_type = "oauth"
oauth_provider = "openai"
[profiles.models]
haiku = "gpt-4o-mini"
sonnet = "gpt-4o"
opus = "o1-pro"
[router]
enabled = false
profile = "local-qwen" # reuse a profile's base_url + api_key
model = "qwen2.5:3b" # override model (optional)
FieldTypeDefaultDescription
enabledbooleanfalseEnable smart routing
profilestring""Profile name to reuse for classification (uses its base_url + api_key)
modelstring""Model override for classification (defaults to profile’s default_model)
[router.rules]
code = "deepseek"
analysis = "grok"
creative = "chatgpt"
search = "kimi"
math = "deepseek"
default = "grok"
KeyDescription
codeProfile for coding tasks
analysisProfile for analysis and reasoning
creativeProfile for creative writing
searchProfile for search and research
mathProfile for math and logic
defaultFallback when intent is unclassified
[context.compression]
enabled = false
threshold_tokens = 50000
keep_recent = 10
profile = "local-qwen" # reuse a profile's base_url + api_key
model = "qwen2.5:3b" # override model (optional)
FieldTypeDefaultDescription
enabledbooleanfalseEnable conversation compression
threshold_tokensinteger50000Compress when token count exceeds this
keep_recentinteger10Always keep the last N messages uncompressed
profilestring""Profile name to reuse for summarization (uses its base_url + api_key)
modelstring""Model override for summarization (defaults to profile’s default_model)
[context.sharing]
enabled = false
max_context_size = 2000
FieldTypeDefaultDescription
enabledbooleanfalseEnable cross-profile context sharing
max_context_sizeinteger2000Max tokens to inject from other profiles
[context.rag]
enabled = false
index_paths = ["./src", "./docs"]
profile = "local-qwen" # reuse a profile's base_url + api_key
model = "nomic-embed-text" # embedding model
chunk_size = 512
top_k = 5
FieldTypeDefaultDescription
enabledbooleanfalseEnable local RAG
index_pathsstring[][]Directories to index
profilestring""Profile name to reuse for embeddings (uses its base_url + api_key)
modelstring""Embedding model name (defaults to profile’s default_model)
chunk_sizeinteger512Text chunk size in tokens
top_kinteger5Number of results to inject