-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstack.env.example
More file actions
88 lines (77 loc) · 3.56 KB
/
stack.env.example
File metadata and controls
88 lines (77 loc) · 3.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Copy this file to `../stack.env` before running the workspace stack scripts.
ENCRYPTION_KEY=replace-with-32-char-minimum-secret
ENGINE_API_KEY=replace-with-server-api-key
JWT_SECRET=replace-with-32-char-minimum-jwt-secret
CORE_BOOTSTRAP_PUBLISHABLE_KEY=vkey_0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
# Optional host port overrides when 3000/8787 are already in use
CORE_HOST_PORT=3000
ENGINE_HOST_PORT=8787
ECHOLINE_HOST_PORT=8000
# Echoline (Self-Hosted STT/TTS) - Optional
# Enable fully self-hosted speech by running: docker compose --profile echoline up
# When echoline is running, set STT_PROVIDER and TTS_PROVIDER to 'openai-compatible'
# to route audio processing to the local echoline container instead of Deepgram.
#
# Echoline requires an LLM backend for its Realtime API chat completions feature.
# By default it points back to the engine's OpenAI-compatible endpoint.
ECHOLINE_CHAT_COMPLETION_BASE_URL=http://host.docker.internal:8787/v1
ECHOLINE_CHAT_COMPLETION_API_KEY=${ENGINE_API_KEY}
ECHOLINE_LOG_LEVEL=INFO
# HuggingFace token for downloading private models (optional)
# Get a token at: https://huggingface.co/settings/tokens
HF_TOKEN=
# Engine runtime config is persisted by the engine itself in the Docker volume at /app/data/config/runtime.yaml.
# The env vars below are bootstrap defaults used to create the initial YAML file on first boot.
RUNTIME_CONFIG_PATH=/app/data/config/runtime.yaml
# Optional provider/runtime overrides
TEST_MODE=true
LLM_PROVIDER=openrouter
GROQ_API_KEY=
GROQ_MODEL=openai/gpt-oss-20b
GROQ_WHISPER_MODEL=whisper-large-v3
OPENROUTER_API_KEY=
OPENROUTER_MODEL=openrouter/healer-alpha
OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:8000/v1
OPENAI_COMPATIBLE_API_KEY=
OPENAI_COMPATIBLE_MODEL=gpt-4o-mini
OPENAI_COMPATIBLE_NAME=openai-compatible
# STT/TTS Provider Selection
# Options:
# - deepgram: Hosted Deepgram API (requires DEEPGRAM_API_KEY)
# - openai-compatible: Self-hosted Echoline or other OpenAI-compatible service
#
# For fully self-hosted speech (no external STT/TTS dependencies):
# 1. Set STT_PROVIDER=openai-compatible and TTS_PROVIDER=openai-compatible
# 2. Ensure the echoline service is running: docker compose --profile echoline up
# 3. Configure OPENAI_COMPATIBLE_BASE_URL to point to echoline (default: http://echoline:8000/v1)
STT_PROVIDER=deepgram
TTS_PROVIDER=deepgram
DEFAULT_STT_PROVIDER=${STT_PROVIDER}
DEFAULT_TTS_PROVIDER=${TTS_PROVIDER}
# Deepgram Configuration (when STT_PROVIDER or TTS_PROVIDER is 'deepgram')
DEEPGRAM_API_KEY=
DEEPGRAM_STT_MODEL=nova-3
DEEPGRAM_STT_LANGUAGE=en-US
DEEPGRAM_TTS_MODEL=aura-2-thalia-en
# OpenAI-Compatible Audio Provider Configuration (when using Echoline)
# These settings route STT/TTS requests to an OpenAI-compatible audio service.
# Default points to the echoline container within the Docker network.
OPENAI_COMPATIBLE_BASE_URL=http://echoline:8000/v1
OPENAI_COMPATIBLE_API_KEY=
# Echoline-specific model and voice configuration
# These are used when STT_PROVIDER=openai-compatible or TTS_PROVIDER=openai-compatible
ECHOLINE_STT_MODEL=Systran/faster-whisper-tiny
ECHOLINE_TTS_MODEL=onnx-community/Kokoro-82M-v1.0-ONNX
ECHOLINE_TTS_VOICE=af_heart
DEFAULT_VOICE=af_heart
CEREBRAS_API_KEY=
VAD_PROVIDER=silero
VAD_ENABLED=true
CORE_ENABLE_DEV_VOICE_OVERRIDES=false
# Optional Core bootstrap overrides
CORE_BOOTSTRAP_APP_ID=default
CORE_BOOTSTRAP_APP_NAME=Local Stack App
CORE_BOOTSTRAP_APP_DESCRIPTION=Bootstrap app for the self-hosted Docker stack
CORE_BOOTSTRAP_API_KEY_LABEL=Local Stack Key
CORE_BOOTSTRAP_SCOPES=mint_ephemeral
CORE_BOOTSTRAP_ALLOWED_PROVIDERS=vowel-prime