-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathMakefile
More file actions
375 lines (306 loc) · 16 KB
/
Makefile
File metadata and controls
375 lines (306 loc) · 16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
.PHONY: help dev dev-stop dev-reset dev-logs dev-shell dev-legacy install clean lint lint-all fmt fmt-all setup-claude-creds setup-claude-creds-k8s build-backend build-frontend build-agent-controller build-all push-backend push-frontend push-agent-controller push-all build-all-parallel push-all-parallel deploy deploy-loop deploy-loop-all deploy-backend deploy-agent deploy-frontend-k8s deploy-manifests prod-reset kind-create kind-delete kind-load kind-secrets kind-deploy kind-reset kind-logs kind-shell test test-run test-reset test-ci debug-tasks debug-task debug-retry debug-logs debug-db
# Load .env file if it exists
-include .env
export
# Configuration
# Set GHCR_USER in .env file (see .env.example)
GHCR_REGISTRY := ghcr.io
GHCR_USER ?= yourusername
IMAGE_TAG ?= latest
BACKEND_IMAGE := $(GHCR_REGISTRY)/$(GHCR_USER)/mainloop-backend:$(IMAGE_TAG)
FRONTEND_IMAGE := $(GHCR_REGISTRY)/$(GHCR_USER)/mainloop-frontend:$(IMAGE_TAG)
AGENT_CONTROLLER_IMAGE := $(GHCR_REGISTRY)/$(GHCR_USER)/mainloop-agent-controller:$(IMAGE_TAG)
help: ## Show this help message
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
# =============================================================================
# Development (DevSpace + Kind) - Hot reload via file sync
# =============================================================================
dev: ## Start dev environment with hot reload (DevSpace + Kind)
devspace dev --kube-context kind-$(KIND_CLUSTER_NAME) -n mainloop
dev-stop: ## Stop DevSpace and purge resources
devspace purge --kube-context kind-$(KIND_CLUSTER_NAME) -n mainloop
dev-reset: ## Reset ALL data (database + task namespaces + restart backend)
@./scripts/kind/reset-data.sh
dev-clear-cache: ## Clear Vite cache (fixes stale HMR issues)
kubectl --context kind-$(KIND_CLUSTER_NAME) -n mainloop exec deployment/mainloop-frontend-devspace -- rm -rf /app/node_modules/.vite 2>/dev/null || true
@echo "Vite cache cleared. Refresh browser."
dev-logs: ## Tail backend logs
devspace logs -f --kube-context kind-$(KIND_CLUSTER_NAME) -n mainloop
dev-shell: ## Open shell in backend pod
devspace enter --kube-context kind-$(KIND_CLUSTER_NAME) -n mainloop
dev-legacy: ## Start with docker compose (no hot reload)
docker compose up --build --watch
# =============================================================================
# Dependencies
# =============================================================================
install: ## Install all dependencies
pnpm install
cd backend && uv sync
cd models && uv sync
clean: ## Clean build artifacts
rm -rf frontend/.svelte-kit frontend/build
rm -rf backend/.venv models/.venv
find . -type d -name "__pycache__" -exec rm -rf {} +
lint: ## Lint files changed since main
trunk check --upstream origin/main
lint-all: ## Lint all files
trunk check -a
fmt: ## Format and fix files changed since main
trunk fmt --upstream origin/main
trunk check --upstream origin/main -y
fmt-all: ## Format and fix all files
trunk fmt -a
trunk check -a -y
setup-claude-creds: ## Login to Claude inside Linux container, credentials saved to shared volume
@echo "=== Claude Container Login ==="
@echo "This will start a container where you can login to Claude."
@echo "Credentials will be saved to the 'claude-config' Docker volume."
@echo ""
@docker build -q -t mainloop-claude-agent ./claude-agent > /dev/null
@docker volume create claude-config > /dev/null 2>&1 || true
@docker rm -f claude-login-tmp > /dev/null 2>&1 || true
@docker run -d --entrypoint "" --name claude-login-tmp \
-v claude-config:/home/claude/.claude \
mainloop-claude-agent sleep 3600 > /dev/null
@echo "Container started with shared claude-config volume."
@echo "Run: claude login"
@echo "Complete the browser OAuth flow, then type 'exit'"
@echo ""
@docker exec -it claude-login-tmp bash; \
echo ""; \
echo "Checking credentials..."; \
if docker exec claude-login-tmp test -f /home/claude/.claude/.credentials.json; then \
echo "✓ Credentials saved to claude-config volume"; \
echo " All containers mounting this volume will have access."; \
else \
echo "⚠ No credentials found. Did you complete the login?"; \
fi; \
docker rm -f claude-login-tmp > /dev/null
setup-claude-creds-mac: ## Extract Claude credentials from macOS Keychain (Mac only)
@echo "Extracting Claude credentials from macOS Keychain..."
@CREDS=$$(security find-generic-password -s "Claude Code-credentials" -a "$(USER)" -w 2>/dev/null); \
if [ -z "$$CREDS" ]; then \
echo "Error: Claude credentials not found in Keychain."; \
echo "Make sure you're logged in to Claude Code on this Mac."; \
exit 1; \
fi; \
if [ -f .env ]; then \
echo "Updating CLAUDE_CREDENTIALS in .env (preserving other variables)..."; \
grep -v "^CLAUDE_CREDENTIALS=" .env > .env.tmp || true; \
mv .env.tmp .env; \
else \
echo "Creating .env file..."; \
touch .env; \
fi; \
echo "CLAUDE_CREDENTIALS=$$CREDS" >> .env; \
echo "✓ Claude credentials updated in .env"
setup-claude-creds-k8s: ## Push Claude credentials from Docker volume to 1Password for k8s
@echo "Extracting credentials from Docker volume and pushing to 1Password..."
@if ! command -v op >/dev/null 2>&1; then \
echo "Error: 1Password CLI (op) not found."; \
echo "Install it: brew install --cask 1password-cli"; \
exit 1; \
fi; \
docker run --rm -v claude-config:/config:ro alpine cat /config/.credentials.json > /tmp/claude-creds.json 2>/dev/null; \
if [ ! -s /tmp/claude-creds.json ]; then \
echo "Error: No credentials in claude-config volume"; \
echo "Run 'make setup-claude-creds' first"; \
rm -f /tmp/claude-creds.json; \
exit 1; \
fi; \
echo "Creating/updating claude-credentials item in kubernetes vault..."; \
op item get claude-credentials --vault kubernetes >/dev/null 2>&1 && \
op item delete claude-credentials --vault kubernetes || true; \
op document create /tmp/claude-creds.json --title=claude-credentials --vault=kubernetes >/dev/null; \
rm -f /tmp/claude-creds.json; \
echo "✓ Claude credentials pushed to 1Password vault 'kubernetes'"
# Backend commands
backend-dev: ## Run backend in development mode
cd backend && uv run uvicorn mainloop.api:app --reload --host 0.0.0.0 --port 8000
# Frontend commands
frontend-dev: ## Run frontend in development mode
cd frontend && pnpm dev
# Docker image commands
build-backend: ## Build backend Docker image
docker build -f backend/Dockerfile -t $(BACKEND_IMAGE) .
build-frontend: ## Build frontend Docker image
docker build -f frontend/Dockerfile -t $(FRONTEND_IMAGE) .
build-agent-controller: ## Build agent controller Docker image
docker build -f claude-agent/Dockerfile -t $(AGENT_CONTROLLER_IMAGE) ./claude-agent
build-all: build-backend build-frontend build-agent-controller ## Build all Docker images
push-backend: build-backend ## Push backend to GHCR
docker push $(BACKEND_IMAGE)
push-frontend: build-frontend ## Push frontend to GHCR
docker push $(FRONTEND_IMAGE)
push-agent-controller: build-agent-controller ## Push agent controller to GHCR
docker push $(AGENT_CONTROLLER_IMAGE)
push-all: push-backend push-frontend push-agent-controller ## Push all images to GHCR
# Parallel build + push (much faster)
build-all-parallel: ## Build all Docker images in parallel
@echo "Building all images in parallel..."
@docker build -f backend/Dockerfile -t $(BACKEND_IMAGE) . & \
docker build -f frontend/Dockerfile -t $(FRONTEND_IMAGE) . & \
docker build -f claude-agent/Dockerfile -t $(AGENT_CONTROLLER_IMAGE) ./claude-agent & \
wait
@echo "All builds complete"
push-all-parallel: build-all-parallel ## Build and push all images in parallel
@echo "Pushing all images in parallel..."
@docker push $(BACKEND_IMAGE) & \
docker push $(FRONTEND_IMAGE) & \
docker push $(AGENT_CONTROLLER_IMAGE) & \
wait
@echo "All pushes complete"
# Deployment
deploy: push-all-parallel ## Full deployment to k8s (parallel builds + pushes)
@echo "Applying Kubernetes manifests..."
kubectl apply -k k8s/apps/mainloop/overlays/prod --server-side --force-conflicts
@echo "Restarting Kubernetes deployments in parallel..."
@kubectl rollout restart deployment/mainloop-backend -n mainloop & \
kubectl rollout restart deployment/mainloop-agent-controller -n mainloop & \
kubectl rollout restart deployment/mainloop-frontend -n mainloop & \
wait
@echo "Rollouts triggered"
deploy-loop: ## Pull and deploy every 10 seconds
@echo "Starting deploy loop (Ctrl+C to stop)..."
@while true; do \
git pull && $(MAKE) deploy; \
sleep 10; \
done
deploy-loop-all: ## Watch all and redeploy everything (old behavior)
watchexec --poll 1000 -w backend -w frontend/src -w k8s -w models -w claude-agent \
-e py,ts,svelte,yaml,toml,Dockerfile \
-i 'test*' -i '*_test.py' -i 'tests/' -i '__pycache__/' -i '.pytest_cache/' -i 'scripts/' \
--on-busy-update restart \
-- $(MAKE) deploy || true
# Selective deploy targets (faster - skip kubectl apply, just build+push+restart)
deploy-backend: ## Build, push, and restart backend only
docker build -f backend/Dockerfile -t $(BACKEND_IMAGE) .
docker push $(BACKEND_IMAGE)
kubectl rollout restart deployment/mainloop-backend -n mainloop
deploy-agent: ## Build, push, and restart agent controller only
docker build -f claude-agent/Dockerfile -t $(AGENT_CONTROLLER_IMAGE) ./claude-agent
docker push $(AGENT_CONTROLLER_IMAGE)
kubectl rollout restart deployment/mainloop-agent-controller -n mainloop
deploy-frontend-k8s: ## Build, push, and restart frontend only (k8s version)
docker build -f frontend/Dockerfile -t $(FRONTEND_IMAGE) .
docker push $(FRONTEND_IMAGE)
kubectl rollout restart deployment/mainloop-frontend -n mainloop
deploy-manifests: ## Apply k8s manifests only (no image builds)
kubectl apply -k k8s/apps/mainloop/overlays/prod --server-side --force-conflicts
PROD_CONTEXT ?= admin@internal-01
prod-reset: ## Reset prod database + task namespaces + restart backend
@echo "=== Using context: $(PROD_CONTEXT) ==="
@echo "=== Cleaning up k8s task namespaces ==="
@for ns in $$(kubectl --context $(PROD_CONTEXT) get ns -o name 2>/dev/null | grep "^namespace/task-" | cut -d/ -f2); do \
echo "Deleting namespace: $$ns"; \
kubectl --context $(PROD_CONTEXT) delete ns "$$ns" --wait=false 2>/dev/null || true; \
done
@echo "=== Deleting CNPG Database CR ==="
kubectl --context $(PROD_CONTEXT) delete database mainloop-db-database -n mainloop --wait=true
@echo "=== Recreating Database CR ==="
kubectl --context $(PROD_CONTEXT) apply -k k8s/apps/mainloop/overlays/prod --server-side --force-conflicts
@echo "=== Waiting for database to be ready ==="
@until kubectl --context $(PROD_CONTEXT) get database mainloop-db-database -n mainloop -o jsonpath='{.status.applied}' 2>/dev/null | grep -q true; do sleep 1; done
@echo "=== Restarting backend to reinitialize DBOS ==="
kubectl --context $(PROD_CONTEXT) rollout restart deployment/mainloop-backend -n mainloop
kubectl --context $(PROD_CONTEXT) rollout status deployment/mainloop-backend -n mainloop --timeout=60s
@echo "=== Reset complete ==="
# K8s commands (for local testing before moving to infrastructure repo)
k8s-apply: ## Apply K8s manifests locally
kubectl apply -k k8s/apps/mainloop/overlays/prod --server-side
k8s-delete: ## Delete K8s resources
kubectl delete -k k8s/apps/mainloop/overlays/prod
# Kind (local Kubernetes testing)
KIND_CLUSTER_NAME ?= mainloop-test
kind-create: ## Create Kind cluster for local testing
@./scripts/kind/create-cluster.sh
kind-delete: ## Delete Kind cluster
@kind delete cluster --name $(KIND_CLUSTER_NAME)
kind-load: ## Build and load images into Kind
@./scripts/kind/load-images.sh
kind-secrets: ## Create K8s secrets from .env
@./scripts/kind/create-secrets.sh
kind-deploy: ## Deploy mainloop to Kind
@./scripts/kind/deploy.sh
kind-reset: dev-reset ## Alias for dev-reset
kind-logs: ## Tail backend logs (Kind test cluster)
@kubectl --context=kind-$(KIND_CLUSTER_NAME) logs -n mainloop deployment/mainloop-backend -f
kind-shell: ## Open shell in backend pod (Kind test cluster)
@kubectl --context=kind-$(KIND_CLUSTER_NAME) exec -it -n mainloop deployment/mainloop-backend -- bash
test-k8s: kind-create kind-load kind-secrets kind-deploy ## Start local K8s test environment
@echo ""
@echo "=== Local K8s environment ready ==="
@echo "Frontend: $(TEST_FRONTEND_URL)"
@echo "Backend: $(TEST_API_URL)"
@echo ""
@echo "Run 'make kind-logs' to tail backend logs"
@echo "Run 'make kind-reset' to reset data between tests"
@echo "Run 'make test-loop' for auto-redeploy on changes"
test-loop: ## Watch for changes and auto-redeploy to Kind
@echo "Starting Kind deploy loop (Ctrl+C to stop)..."
@echo "Watching: backend/, frontend/src/, claude-agent/"
@trap 'kill 0' INT; \
watchexec -w backend/src -w models -e py \
--on-busy-update restart -- bash -c 'make kind-load && make kind-deploy' & \
watchexec -w frontend/src -e ts,svelte,css \
--on-busy-update restart -- bash -c 'make kind-load && make kind-deploy' & \
watchexec -w claude-agent -e py \
--on-busy-update restart -- bash -c 'make kind-load && make kind-deploy' & \
wait
# E2E Testing
test-k8s-components: ## Test K8s namespace/secret creation (quick)
cd backend && uv run python scripts/test_k8s_components.py
test-k8s-job: ## Test K8s job creation (creates a real job)
cd backend && uv run python scripts/test_k8s_components.py --job
test-worker-e2e: ## Run full worker E2E test (requires running backend + k8s)
cd backend && REPO_URL="$(or $(REPO_URL),https://github.com/oldsj/mainloop)" uv run python scripts/test_worker_e2e.py
# =============================================================================
# Testing (DevSpace + Playwright)
# =============================================================================
TEST_API_URL := http://localhost:8081
TEST_FRONTEND_URL := http://localhost:5173
test: ## Deploy to Kind + open Playwright UI
@./scripts/test-guard.sh
devspace deploy --profile test --kube-context kind-$(KIND_CLUSTER_NAME) -n mainloop
@echo "Waiting for backend..."
@until curl -sf $(TEST_API_URL)/health > /dev/null 2>&1; do sleep 2; done
@cd frontend && PLAYWRIGHT_BASE_URL=$(TEST_FRONTEND_URL) API_URL=$(TEST_API_URL) pnpm exec playwright test --ui
test-run: ## Run tests headless (after make test or make dev)
@./scripts/wait-for-ready.sh
@cd frontend && PLAYWRIGHT_BASE_URL=$(TEST_FRONTEND_URL) API_URL=$(TEST_API_URL) pnpm exec playwright test $(TEST_ARGS)
test-reset: dev-reset ## Alias for dev-reset
test-ci: ## Run tests in CI (uses legacy kind scripts, no DevSpace)
@if [ -z "$(CLAUDE_CODE_OAUTH_TOKEN)" ]; then \
echo "Error: CLAUDE_CODE_OAUTH_TOKEN not set"; \
exit 1; \
fi
@if ! kind get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \
$(MAKE) kind-create; \
fi
@$(MAKE) kind-load
@$(MAKE) kind-secrets
@$(MAKE) kind-deploy
@until curl -sf $(TEST_API_URL)/health > /dev/null 2>&1; do sleep 2; done
@cd frontend && PLAYWRIGHT_BASE_URL=$(TEST_FRONTEND_URL) API_URL=$(TEST_API_URL) pnpm exec playwright test
# Debugging commands
# Set API_URL in .env file or override: make debug-tasks API_URL=https://your-api.example.com
API_URL ?= https://mainloop-api.example.com
debug-tasks: ## Show all tasks with workflow status
@curl -s $(API_URL)/debug/tasks | jq '.[] | {id: .task.id, status: .task.status, workflow: .workflow_status, error: .workflow_error, namespace: .namespace_exists, pr: .task.pr_url}'
debug-task: ## Show detailed info for a specific task (usage: make debug-task TASK_ID=xxx)
@curl -s $(API_URL)/debug/tasks | jq '.[] | select(.task.id | startswith("$(TASK_ID)"))'
debug-retry: ## Retry a failed task (usage: make debug-retry TASK_ID=xxx)
@curl -s -X POST $(API_URL)/debug/tasks/$(TASK_ID)/retry | jq
debug-logs: ## Show backend logs
kubectl logs -n mainloop deployment/mainloop-backend --tail=100 -f
debug-db: ## Query tasks directly from database
@kubectl exec -n mainloop deployment/mainloop-backend -- python3 -c "\
import asyncio; \
from mainloop.db import db; \
async def q(): \
await db.connect(); \
async with db.connection() as c: \
rows = await c.fetch('SELECT id, status, pr_url, error FROM worker_tasks ORDER BY created_at DESC LIMIT 5'); \
for r in rows: print(dict(r)); \
await db.disconnect(); \
asyncio.run(q())"