forked from presenton/presenton
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
124 lines (121 loc) · 3.79 KB
/
docker-compose.yml
File metadata and controls
124 lines (121 loc) · 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
services:
production:
# image: ghcr.io/presenton/presenton:latest
build:
context: .
dockerfile: Dockerfile
ports:
# You can replace 5000 with any other port number of your choice to run Presenton on a different port number.
- "5000:80"
volumes:
- ./app_data:/app_data
environment:
- CAN_CHANGE_KEYS=${CAN_CHANGE_KEYS}
- LLM=${LLM}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_MODEL=${OPENAI_MODEL}
- GOOGLE_API_KEY=${GOOGLE_API_KEY}
- GOOGLE_MODEL=${GOOGLE_MODEL}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- ANTHROPIC_MODEL=${ANTHROPIC_MODEL}
- OLLAMA_URL=${OLLAMA_URL}
- OLLAMA_MODEL=${OLLAMA_MODEL}
- CUSTOM_LLM_URL=${CUSTOM_LLM_URL}
- CUSTOM_LLM_API_KEY=${CUSTOM_LLM_API_KEY}
- CUSTOM_MODEL=${CUSTOM_MODEL}
- PEXELS_API_KEY=${PEXELS_API_KEY}
- EXTENDED_REASONING=${EXTENDED_REASONING}
- DATABASE_URL=${DATABASE_URL}
production-gpu:
# image: ghcr.io/presenton/presenton:latest
build:
context: .
dockerfile: Dockerfile
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ports:
# You can replace 5000 with any other port number of your choice to run Presenton on a different port number.
- "5000:80"
volumes:
- ./app_data:/app_data
environment:
- CAN_CHANGE_KEYS=${CAN_CHANGE_KEYS}
- LLM=${LLM}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_MODEL=${OPENAI_MODEL}
- GOOGLE_API_KEY=${GOOGLE_API_KEY}
- GOOGLE_MODEL=${GOOGLE_MODEL}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- ANTHROPIC_MODEL=${ANTHROPIC_MODEL}
- OLLAMA_URL=${OLLAMA_URL}
- OLLAMA_MODEL=${OLLAMA_MODEL}
- CUSTOM_LLM_URL=${CUSTOM_LLM_URL}
- CUSTOM_LLM_API_KEY=${CUSTOM_LLM_API_KEY}
- CUSTOM_MODEL=${CUSTOM_MODEL}
- PEXELS_API_KEY=${PEXELS_API_KEY}
- EXTENDED_REASONING=${EXTENDED_REASONING}
- DATABASE_URL=${DATABASE_URL}
development:
build:
context: .
dockerfile: Dockerfile.dev
ports:
- "5000:80"
volumes:
- .:/app
- ./app_data:/app_data
environment:
- CAN_CHANGE_KEYS=${CAN_CHANGE_KEYS}
- LLM=${LLM}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_MODEL=${OPENAI_MODEL}
- GOOGLE_API_KEY=${GOOGLE_API_KEY}
- GOOGLE_MODEL=${GOOGLE_MODEL}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- ANTHROPIC_MODEL=${ANTHROPIC_MODEL}
- OLLAMA_URL=${OLLAMA_URL}
- OLLAMA_MODEL=${OLLAMA_MODEL}
- CUSTOM_LLM_URL=${CUSTOM_LLM_URL}
- CUSTOM_LLM_API_KEY=${CUSTOM_LLM_API_KEY}
- CUSTOM_MODEL=${CUSTOM_MODEL}
- PEXELS_API_KEY=${PEXELS_API_KEY}
- EXTENDED_REASONING=${EXTENDED_REASONING}
- DATABASE_URL=${DATABASE_URL}
development-gpu:
build:
context: .
dockerfile: Dockerfile.dev
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ports:
- "5000:80"
volumes:
- .:/app
- ./app_data:/app_data
environment:
- CAN_CHANGE_KEYS=${CAN_CHANGE_KEYS}
- LLM=${LLM}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_MODEL=${OPENAI_MODEL}
- GOOGLE_API_KEY=${GOOGLE_API_KEY}
- GOOGLE_MODEL=${GOOGLE_MODEL}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- ANTHROPIC_MODEL=${ANTHROPIC_MODEL}
- OLLAMA_URL=${OLLAMA_URL}
- OLLAMA_MODEL=${OLLAMA_MODEL}
- CUSTOM_LLM_URL=${CUSTOM_LLM_URL}
- CUSTOM_LLM_API_KEY=${CUSTOM_LLM_API_KEY}
- CUSTOM_MODEL=${CUSTOM_MODEL}
- PEXELS_API_KEY=${PEXELS_API_KEY}
- EXTENDED_REASONING=${EXTENDED_REASONING}
- DATABASE_URL=${DATABASE_URL}