Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
uses: bufbuild/buf-setup-action@v1

- name: Generate runner protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Get pnpm store directory
id: pnpm-store
Expand Down Expand Up @@ -56,7 +56,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Type-check platform server
run: pnpm --filter @agyn/platform-server run typecheck
Expand Down Expand Up @@ -96,7 +96,7 @@ jobs:
uses: bufbuild/buf-setup-action@v1

- name: Generate runner protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Get pnpm store directory
id: pnpm-store
Expand Down Expand Up @@ -126,7 +126,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Apply platform-server migrations
env:
Expand Down Expand Up @@ -159,7 +159,7 @@ jobs:
uses: bufbuild/buf-setup-action@v1

- name: Generate runner protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Get pnpm store directory
id: pnpm-store
Expand Down Expand Up @@ -189,7 +189,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Start LiteLLM docker compose stack
run: docker compose -f packages/platform-server/test/litellm/docker-compose.yml up -d
Expand Down Expand Up @@ -228,7 +228,7 @@ jobs:
uses: bufbuild/buf-setup-action@v1

- name: Generate runner protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Get pnpm store directory
id: pnpm-store
Expand Down Expand Up @@ -258,7 +258,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Run UI tests (@agyn/platform-ui)
run: pnpm --filter @agyn/platform-ui test
Expand Down Expand Up @@ -313,7 +313,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Build Storybook (@agyn/platform-ui)
run: pnpm --filter @agyn/platform-ui run build-storybook
Expand Down Expand Up @@ -363,7 +363,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

- name: Approve necessary build scripts
run: pnpm approve-builds @prisma/client esbuild msw
Expand Down Expand Up @@ -417,7 +417,7 @@ jobs:
run: pnpm install --frozen-lockfile

- name: Generate protobufs
run: pnpm proto:generate
run: pnpm proto:gen

# Build all workspaces in topological order so library outputs exist
- name: Build UI workspace (topo order)
Expand Down
8 changes: 8 additions & 0 deletions buf.gen.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,14 @@ plugins:
out: packages/platform-server/src/proto/gen
opt:
- target=ts
- plugin: buf.build/bufbuild/es
out: packages/notifications/src/proto/gen
opt:
- target=ts
- plugin: buf.build/bufbuild/connect-es
out: packages/notifications/src/proto/gen
opt:
- target=ts
- plugin: buf.build/bufbuild/es
out: packages/docker-runner/src/proto/gen
opt:
Expand Down
32 changes: 32 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,38 @@ services:
start_period: 10s
networks:
- agents_net

redis:
image: redis:7-alpine
container_name: redis
restart: unless-stopped
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- agents_net

notifications:
build:
context: .
dockerfile: packages/notifications/Dockerfile
restart: unless-stopped
depends_on:
redis:
condition: service_healthy
environment:
NOTIFICATIONS_HOST: 0.0.0.0
NOTIFICATIONS_GRPC_PORT: 50051
NOTIFICATIONS_REDIS_URL: redis://redis:6379
NOTIFICATIONS_CHANNEL: notifications.v1
ports:
- "50051:50051"
networks:
- agents_net

volumes:
vault-file:
Expand Down
7 changes: 5 additions & 2 deletions docs/product-spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Table of contents
- Glossary and changelog templates (pointers)

Overview and personas
- Graph-driven AI agent platform composing agents, tools, triggers, memory, and MCP servers into a live-updatable LangGraph runtime. The server exposes HTTP APIs and Socket.IO to manage the graph, provision nodes, execute tools, and observe runs. UI offers a builder to configure graphs and view checkpoints/status.
- Graph-driven AI agent platform composing agents, tools, triggers, memory, and MCP servers into a live-updatable LangGraph runtime. The platform-server exposes HTTP APIs and gRPC notifications, while a dedicated notifications service bridges those notifications to Socket.IO for the UI. UI offers a builder to configure graphs and view checkpoints/status.
- Personas
- Agent Builder (developer)
- Platform Operator (SRE)
Expand All @@ -32,7 +32,7 @@ Architecture and components
- Unknown-key handling and retries: apply strips unknown config keys on schema validation errors and retries up to 3 times.
- Checkpointing via Postgres (default); streaming UI integration planned.
- Server
- HTTP APIs and Socket.IO for management and status streaming.
- HTTP APIs and a gRPC notifications publisher (Socket.IO bridge implemented by a separate notifications service).
- Endpoints manage graph templates, graph state, node lifecycle/actions, dynamic-config schema, reminders, runs, vault proxy, and Nix proxy (when enabled).
- Persistence
- Graph store: filesystem dataset (format: 2) with deterministic edge IDs, dataset-level file locks, and staged working-tree swaps. Each upsert builds a full graph tree in a sibling directory, fsyncs it, and atomically swaps it into place (conflict/timeout/persist error modes preserved).
Expand Down Expand Up @@ -109,6 +109,7 @@ Configuration matrix (server env vars)
- LLM_PROVIDER (litellm | openai)
- If `LLM_PROVIDER=litellm`: LITELLM_BASE_URL and LITELLM_MASTER_KEY
- If `LLM_PROVIDER=openai`: OPENAI_API_KEY (OPENAI_BASE_URL optional)
- NOTIFICATIONS_GRPC_ADDR (gRPC endpoint for the notifications bridge)
- Optional
- GRAPH_REPO_PATH (default ./data/graph)
- GRAPH_BRANCH (default main)
Expand All @@ -117,6 +118,7 @@ Configuration matrix (server env vars)
- VAULT_ADDR, VAULT_TOKEN
- DOCKER_MIRROR_URL (default http://registry-mirror:5000)
- DOCKER_RUNNER_GRPC_HOST, DOCKER_RUNNER_GRPC_PORT (or DOCKER_RUNNER_PORT), DOCKER_RUNNER_SHARED_SECRET (required for docker-runner), plus optional DOCKER_RUNNER_TIMEOUT_MS (default 30000), DOCKER_RUNNER_OPTIONAL (default true; set false to fail-fast), and DOCKER_RUNNER_CONNECT_* knobs (RETRY_BASE_DELAY_MS=500, RETRY_MAX_DELAY_MS=30000, RETRY_JITTER_MS=250, PROBE_INTERVAL_MS=30000, MAX_RETRIES=0 for unlimited background retries).
- NOTIFICATIONS_GRPC_DEADLINE_MS (default 3000)
- MCP_TOOLS_STALE_TIMEOUT_MS
- LANGGRAPH_CHECKPOINTER: postgres (default)
- POSTGRES_URL (postgres connection string)
Expand All @@ -139,6 +141,7 @@ Runbooks
- Verify: curl http://localhost:3010/api/templates; open UI; connect socket to observe node_status when provisioning.
- Docker Compose stack
- Services: postgres, vault (auto-init), registry-mirror.
- Notifications: gRPC-only service publishes to Redis (`NOTIFICATIONS_REDIS_URL`, default `redis://redis:6379`) on channel `NOTIFICATIONS_CHANNEL` (default `notifications.v1`); Socket.IO fan-out is handled by the gateway.
- Observability: Tracing services have been removed; follow upcoming observability docs for replacements.
- Vault init: vault/auto-init.sh populates root token/unseal keys; set VAULT_ENABLED=true and VAULT_ADDR/VAULT_TOKEN.
- Postgres checkpointer: LANGGRAPH_CHECKPOINTER defaults to postgres; configure POSTGRES_URL for the checkpointer connection.
Expand Down
6 changes: 4 additions & 2 deletions docs/technical-overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,15 @@ Design principles
- Container isolation per thread: Tools and MCP operations run in per-thread containers to isolate state.

Layers
- Application server: wires services, loads persisted graph, exposes minimal REST (templates/graph) and a Socket.IO stream for checkpoints.
- Application server: wires services, loads persisted graph, and publishes realtime graph events over gRPC.
- Notifications service: receives gRPC notifications from the platform-server, publishes them to Redis, and the notifications gateway fan-outs the Redis channel to Socket.IO clients.
- Graph runtime: live diff/apply engine enforcing reversible edges via ports and template registries.
- Templates: declarative registration of node factories and their ports.
- Triggers: external event sources (Slack, PR polling) that push messages into agents.
- Nodes: graph components like LLM invocation and memory.
- Tools: actions callable by the LLM (bash, GitHub clone, Slack message) and adapters.
- MCP: local server inside a workspace container with transport over docker exec.
- Services: infra clients and helpers (config, docker container provision, Prisma/Postgres, Slack, GitHub, checkpointer, sockets).
- Services: infra clients and helpers (config, docker container provision, Prisma/Postgres, Slack, GitHub, checkpointer, notifications publisher).

Workspace container platform
- containerProvider.staticConfig.platform: Optional; enum of `linux/amd64` or `linux/arm64`.
Expand Down Expand Up @@ -84,4 +85,5 @@ Defaults and toggles
How to Develop & Test
- Prereqs: Node.js 20+, pnpm 9+, Docker, Postgres
- Run server: pnpm --filter @agyn/platform-server dev
- Run notifications bridge: pnpm --filter @agyn/notifications dev
- Tests: pnpm --filter @agyn/platform-server test
5 changes: 3 additions & 2 deletions docs/ui/graph/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@
Data flow
- TemplatesProvider loads templates from `/graph/templates` (alias of `/api/templates`). Components consume capabilities to render controls.
- Initial node status fetched via `GET /graph/nodes/:id/status`.
- Realtime updates: listen to Socket.IO on the default namespace for `node_status` events. Do not poll when sockets are available.
- Realtime updates: consume Socket.IO events from the notifications service (default namespace). Do not poll when sockets are available.
- For dynamic-configurable nodes (e.g., MCP server), fetch JSON Schema via `GET /graph/nodes/:id/dynamic-config/schema` and render a dynamic form when `dynamicConfigReady` is true.
- Refer to docs/graph/status-updates.md for event shapes and sequencing.

Configuration
- Required environment variables:
- VITE_API_BASE_URL: Agents API base URL (use the origin only; the UI appends `/api` for REST calls and `/socket.io` for websockets)
- VITE_API_BASE_URL: Agents API base URL (use the origin only; the UI appends `/api` for REST calls)
- VITE_SOCKET_BASE_URL (optional): override the notifications Socket.IO origin; defaults to the API origin
- Tracing configuration has been removed; span data is no longer rendered in the builder sidebar.

Related docs
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"test": "pnpm -r --workspace-concurrency=1 run --if-present test",
"convert-graphs": "pnpm --filter @agyn/graph-converter exec graph-converter",
"postinstall": "pnpm -r --if-present run prepare",
"proto:generate": "buf generate buf.build/agynio/api",
"proto:gen": "buf generate buf.build/agynio/api",
"deps:up:podman": "podman compose up -d"
},
"keywords": [],
Expand Down
22 changes: 22 additions & 0 deletions packages/notifications/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# syntax=docker/dockerfile:1.7

FROM node:20-alpine AS base
WORKDIR /app
RUN corepack enable

FROM base AS deps
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
COPY tsconfig.base.json tsconfig.json ./
COPY packages/notifications/package.json packages/notifications/
RUN pnpm install --filter @agyn/notifications --prod --frozen-lockfile

FROM deps AS build
COPY packages/notifications packages/notifications
RUN pnpm --filter @agyn/notifications build

FROM base AS runtime
ENV NODE_ENV=production
COPY --from=deps /app/node_modules node_modules
COPY --from=build /app/packages/notifications/dist dist
COPY packages/notifications/package.json package.json
CMD ["node", "dist/main.js"]
127 changes: 127 additions & 0 deletions packages/notifications/__tests__/grpc.server.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import { randomUUID } from 'node:crypto';
import pino from 'pino';
import { describe, expect, it } from 'vitest';
import { create } from '@bufbuild/protobuf';
import type { HandlerContext } from '@connectrpc/connect';
import { GrpcServer } from '../src/grpc';
import type { NotificationFanout } from '../src/redis-notifications';
import type { PublishedNotification } from '../src/types';
import {
PublishRequestSchema,
SubscribeRequestSchema,
} from '../src/proto/gen/agynio/api/notifications/v1/notifications_pb.js';

class StubFanout implements NotificationFanout {
readonly published: PublishedNotification[] = [];
private readonly listeners = new Set<(notification: PublishedNotification) => void>();

async publish(notification: PublishedNotification): Promise<void> {
this.published.push(notification);
for (const listener of this.listeners) {
listener(notification);
}
}

subscribe(listener: (notification: PublishedNotification) => void): () => void {
this.listeners.add(listener);
return () => {
this.listeners.delete(listener);
};
}

emit(notification: PublishedNotification): void {
for (const listener of this.listeners) {
listener(notification);
}
}
}

const makeServer = (fanout: StubFanout) =>
new GrpcServer({
host: '127.0.0.1',
port: 0,
notifications: fanout,
logger: pino({ level: 'silent' }),
});

const makeContext = (): HandlerContext => ({
signal: new AbortController().signal,
values: new Map(),
requestHeader: new Headers(),
});

describe('GrpcServer publish', () => {
it('rejects invalid publish requests', async () => {
const fanout = new StubFanout();
const server = makeServer(fanout);
const request = create(PublishRequestSchema, {
event: 'agent.updated',
rooms: [],
source: 'platform-server',
});

await expect(server.publish(request, makeContext())).rejects.toThrowError('invalid publish request');
await server.close();
});

it('publishes notifications with generated identifiers', async () => {
const fanout = new StubFanout();
const server = makeServer(fanout);
const request = create(PublishRequestSchema, {
event: 'agent.updated',
rooms: ['graph'],
source: 'platform-server',
payload: { status: 'ready' },
});

const response = await server.publish(request, makeContext());

expect(response.id).toMatch(/[0-9a-f-]{36}/i);
expect(fanout.published).toHaveLength(1);
expect(fanout.published[0]).toMatchObject({
event: 'agent.updated',
rooms: ['graph'],
source: 'platform-server',
payload: { status: 'ready' },
});
expect(fanout.published[0]?.id).toBe(response.id);

await server.close();
});
});

describe('GrpcServer subscribe', () => {
it('streams redis-delivered notifications to subscribers', async () => {
const fanout = new StubFanout();
const server = makeServer(fanout);
const abortController = new AbortController();
const context = { signal: abortController.signal };
const request = create(SubscribeRequestSchema, {});

const iterable = server.subscribe(request, context);
const iterator = iterable[Symbol.asyncIterator]();

const pending = iterator.next();
const notification: PublishedNotification = {
id: randomUUID(),
event: 'agent.updated',
rooms: ['graph'],
source: 'platform-server',
payload: { status: 'ready' },
createdAt: new Date('2025-01-02T03:04:05Z'),
};
fanout.emit(notification);

const result = await pending;
expect(result.done).toBe(false);
expect(result.value?.envelope?.event).toBe('agent.updated');
expect(result.value?.envelope?.rooms).toEqual(['graph']);
expect(result.value?.envelope?.source).toBe('platform-server');

abortController.abort();
const final = await iterator.next();
expect(final.done).toBe(true);

await server.close();
});
});
Loading
Loading