+
+
+**Agent Kit Generation** is an AI-powered content generation system built with [Lamatic.ai](https://lamatic.ai). It uses intelligent workflows to generate text, images, and JSON content through a modern Next.js interface with markdown rendering support.
+
+[](https://vercel.com/new/clone?repository-url=https://github.com/Lamatic/AgentKit&root-directory=kits/agentic/generation&env=AGENTIC_GENERATE_CONTENT,LAMATIC_API_URL,LAMATIC_PROJECT_ID,LAMATIC_API_KEY&envDescription=Your%20Lamatic%20Generation%20keys%20are%20required.&envLink=https://lamatic.ai/templates/agentkits/agentic/agent-kit-generation)
+
+---
+
+## Lamatic Setup (Pre and Post)
+
+Before running this project, you must build and deploy the flow in Lamatic, then wire its config into this codebase.
+
+Pre: Build in Lamatic
+1. Sign in or sign up at https://lamatic.ai
+2. Create a project (if you donβt have one yet)
+3. Click β+ New Flowβ and select "Templates"
+4. Select the 'Generation' agent kit
+5. Configure providers/tools/inputs as prompted
+6. Deploy the kit in Lamatic and obtain your .env keys
+7. Copy the keys from your studio
+
+Post: Wire into this repo
+1. Create a .env file and set the keys
+2. Install and run locally:
+ - npm install
+ - npm run dev
+3. Deploy (Vercel recommended):
+ - Import your repo, set the project's Root Directory (if applicable)
+ - Add env vars in Vercel (same as your .env)
+ - Deploy and test your live URL
+
+Notes
+- Coming soon: single-click export and "Connect Git" in Lamatic to push config directly to your repo.
+
+---
+
+## π Setup
+## Required Keys and Config
+
+Youβll need these things to run this project locally:
+
+1. **.env Keys** β get it from your [Lamatic account](https://lamatic.ai) post kit deployment.
+
+
+| Item | Purpose | Where to Get It |
+| ----------------- | -------------------------------------------- | ----------------------------------------------- |
+| .env Key | Authentication for Lamatic AI APIs and Orchestration | [lamatic.ai](https://lamatic.ai) |
+
+### 1. Environment Variables
+
+Create `.env.local` with:
+
+```bash
+# Lamatic
+AGENTIC_GENERATE_CONTENT = "AGENTIC_GENERATE_CONTENT Flow ID"
+LAMATIC_API_URL = "LAMATIC_API_URL"
+LAMATIC_PROJECT_ID = "LAMATIC_PROJECT_ID"
+LAMATIC_API_KEY = "LAMATIC_API_KEY"
+```
+
+### 2. Install & Run
+
+```bash
+npm install
+npm run dev
+# Open http://localhost:3000
+```
+---
+
+## π Repo Structure
+
+```
+/actions
+ βββ orchestrate.ts # Lamatic workflow orchestration
+/app
+ βββ page.tsx # Main generation form UI
+/components
+ βββ header.tsx # Header component with navigation
+ βββ ui # shadcn/ui components
+/lib
+ βββ lamatic-client.ts # Lamatic SDK client
+/public
+ βββ lamatic-logo.png # Lamatic branding
+/flows
+ βββ ... # Lamatic Flows
+/package.json # Dependencies & scripts
+```
+
+---
+
+## π€ Contributing
+
+We welcome contributions! Open an issue or PR in this repo.
+
+---
+
+## π License
+
+MIT License β see [LICENSE](./LICENSE).
diff --git a/kits/agentic/mockai/actions/orchestrate.ts b/kits/agentic/mockai/actions/orchestrate.ts
new file mode 100644
index 00000000..927824c3
--- /dev/null
+++ b/kits/agentic/mockai/actions/orchestrate.ts
@@ -0,0 +1,125 @@
+"use server"
+
+import { lamaticClient } from "@/lib/lamatic-client"
+import { config } from "../orchestrate.js"
+
+export async function generateQuestions(
+ jobTitle: string,
+ yearsOfExp: number,
+ jobDesc: string,
+): Promise<{
+ success: boolean
+ questions?: string[]
+ error?: string
+}> {
+ try {
+ console.log("[v0] Generating questions with:", { jobTitle, yearsOfExp, jobDesc })
+
+ const flow = config.flows.question
+
+ if (!flow.workflowId) {
+ throw new Error("Workflow ID not found in config for question flow.")
+ }
+
+ const inputs = {
+ jobTitle,
+ yearsOfExp: parseInt(yearsOfExp.toString()),
+ jobDesc
+ }
+
+ console.log("[v0] Sending inputs:", inputs)
+
+ const resData = await lamaticClient.executeFlow(flow.workflowId, inputs)
+ console.log("[v0] Raw response:", resData)
+
+ const questions = resData?.result?.data
+
+ if (!questions || !Array.isArray(questions) || questions.length === 0) {
+ throw new Error("No questions found in response")
+ }
+
+ return {
+ success: true,
+ questions,
+ }
+ } catch (error) {
+ console.error("[v0] Generation error:", error)
+
+ let errorMessage = "Unknown error occurred"
+ if (error instanceof Error) {
+ errorMessage = error.message
+ if (error.message.includes("fetch failed")) {
+ errorMessage =
+ "Network error: Unable to connect to the service. Please check your internet connection and try again."
+ } else if (error.message.includes("API key")) {
+ errorMessage = "Authentication error: Please check your API configuration."
+ }
+ }
+
+ return {
+ success: false,
+ error: errorMessage,
+ }
+ }
+}
+
+export async function evaluateAnswers(
+ candidateResponses: { question: string, answers: string }[]
+): Promise<{
+ success: boolean
+ feedback?: { positives: string[], negatives: string[], rating: number }
+ error?: string
+}> {
+ try {
+ console.log("[v0] Evaluating answers with:", { candidateResponses })
+
+ const flow = config.flows.feedback
+
+ if (!flow.workflowId) {
+ throw new Error("Workflow ID not found in config for feedback flow.")
+ }
+
+ // We can just format the string or array to be safe. We pass what's expected.
+ const inputs = {
+ candidateResponses
+ }
+
+ console.log("[v0] Sending inputs:", JSON.stringify(inputs))
+
+ const resData = await lamaticClient.executeFlow(flow.workflowId, inputs)
+ console.log("[v0] Raw response:", resData)
+
+ const result = resData?.result
+
+ if (!result || typeof result.rating !== 'number') {
+ throw new Error("No feedback found in response")
+ }
+
+ return {
+ success: true,
+ feedback: {
+ positives: result.positives || [],
+ negatives: result.negatives || [],
+ rating: result.rating
+ },
+ }
+ } catch (error) {
+ console.error("[v0] Evaluation error:", error)
+
+ let errorMessage = "Unknown error occurred"
+ if (error instanceof Error) {
+ errorMessage = error.message
+ if (error.message.includes("fetch failed")) {
+ errorMessage =
+ "Network error: Unable to connect to the service. Please check your internet connection and try again."
+ } else if (error.message.includes("API key")) {
+ errorMessage = "Authentication error: Please check your API configuration."
+ }
+ }
+
+ return {
+ success: false,
+ error: errorMessage,
+ }
+ }
+}
diff --git a/kits/agentic/mockai/app/globals.css b/kits/agentic/mockai/app/globals.css
new file mode 100644
index 00000000..dc2aea17
--- /dev/null
+++ b/kits/agentic/mockai/app/globals.css
@@ -0,0 +1,125 @@
+@import 'tailwindcss';
+@import 'tw-animate-css';
+
+@custom-variant dark (&:is(.dark *));
+
+:root {
+ --background: oklch(1 0 0);
+ --foreground: oklch(0.145 0 0);
+ --card: oklch(1 0 0);
+ --card-foreground: oklch(0.145 0 0);
+ --popover: oklch(1 0 0);
+ --popover-foreground: oklch(0.145 0 0);
+ --primary: oklch(0.205 0 0);
+ --primary-foreground: oklch(0.985 0 0);
+ --secondary: oklch(0.97 0 0);
+ --secondary-foreground: oklch(0.205 0 0);
+ --muted: oklch(0.97 0 0);
+ --muted-foreground: oklch(0.556 0 0);
+ --accent: oklch(0.97 0 0);
+ --accent-foreground: oklch(0.205 0 0);
+ --destructive: oklch(0.577 0.245 27.325);
+ --destructive-foreground: oklch(0.577 0.245 27.325);
+ --border: oklch(0.922 0 0);
+ --input: oklch(0.922 0 0);
+ --ring: oklch(0.708 0 0);
+ --chart-1: oklch(0.646 0.222 41.116);
+ --chart-2: oklch(0.6 0.118 184.704);
+ --chart-3: oklch(0.398 0.07 227.392);
+ --chart-4: oklch(0.828 0.189 84.429);
+ --chart-5: oklch(0.769 0.188 70.08);
+ --radius: 0.625rem;
+ --sidebar: oklch(0.985 0 0);
+ --sidebar-foreground: oklch(0.145 0 0);
+ --sidebar-primary: oklch(0.205 0 0);
+ --sidebar-primary-foreground: oklch(0.985 0 0);
+ --sidebar-accent: oklch(0.97 0 0);
+ --sidebar-accent-foreground: oklch(0.205 0 0);
+ --sidebar-border: oklch(0.922 0 0);
+ --sidebar-ring: oklch(0.708 0 0);
+}
+
+.dark {
+ --background: oklch(0.145 0 0);
+ --foreground: oklch(0.985 0 0);
+ --card: oklch(0.145 0 0);
+ --card-foreground: oklch(0.985 0 0);
+ --popover: oklch(0.145 0 0);
+ --popover-foreground: oklch(0.985 0 0);
+ --primary: oklch(0.985 0 0);
+ --primary-foreground: oklch(0.205 0 0);
+ --secondary: oklch(0.269 0 0);
+ --secondary-foreground: oklch(0.985 0 0);
+ --muted: oklch(0.269 0 0);
+ --muted-foreground: oklch(0.708 0 0);
+ --accent: oklch(0.269 0 0);
+ --accent-foreground: oklch(0.985 0 0);
+ --destructive: oklch(0.396 0.141 25.723);
+ --destructive-foreground: oklch(0.637 0.237 25.331);
+ --border: oklch(0.269 0 0);
+ --input: oklch(0.269 0 0);
+ --ring: oklch(0.439 0 0);
+ --chart-1: oklch(0.488 0.243 264.376);
+ --chart-2: oklch(0.696 0.17 162.48);
+ --chart-3: oklch(0.769 0.188 70.08);
+ --chart-4: oklch(0.627 0.265 303.9);
+ --chart-5: oklch(0.645 0.246 16.439);
+ --sidebar: oklch(0.205 0 0);
+ --sidebar-foreground: oklch(0.985 0 0);
+ --sidebar-primary: oklch(0.488 0.243 264.376);
+ --sidebar-primary-foreground: oklch(0.985 0 0);
+ --sidebar-accent: oklch(0.269 0 0);
+ --sidebar-accent-foreground: oklch(0.985 0 0);
+ --sidebar-border: oklch(0.269 0 0);
+ --sidebar-ring: oklch(0.439 0 0);
+}
+
+@theme inline {
+ --font-sans: 'Geist', 'Geist Fallback';
+ --font-mono: 'Geist Mono', 'Geist Mono Fallback';
+ --color-background: var(--background);
+ --color-foreground: var(--foreground);
+ --color-card: var(--card);
+ --color-card-foreground: var(--card-foreground);
+ --color-popover: var(--popover);
+ --color-popover-foreground: var(--popover-foreground);
+ --color-primary: var(--primary);
+ --color-primary-foreground: var(--primary-foreground);
+ --color-secondary: var(--secondary);
+ --color-secondary-foreground: var(--secondary-foreground);
+ --color-muted: var(--muted);
+ --color-muted-foreground: var(--muted-foreground);
+ --color-accent: var(--accent);
+ --color-accent-foreground: var(--accent-foreground);
+ --color-destructive: var(--destructive);
+ --color-destructive-foreground: var(--destructive-foreground);
+ --color-border: var(--border);
+ --color-input: var(--input);
+ --color-ring: var(--ring);
+ --color-chart-1: var(--chart-1);
+ --color-chart-2: var(--chart-2);
+ --color-chart-3: var(--chart-3);
+ --color-chart-4: var(--chart-4);
+ --color-chart-5: var(--chart-5);
+ --radius-sm: calc(var(--radius) - 4px);
+ --radius-md: calc(var(--radius) - 2px);
+ --radius-lg: var(--radius);
+ --radius-xl: calc(var(--radius) + 4px);
+ --color-sidebar: var(--sidebar);
+ --color-sidebar-foreground: var(--sidebar-foreground);
+ --color-sidebar-primary: var(--sidebar-primary);
+ --color-sidebar-primary-foreground: var(--sidebar-primary-foreground);
+ --color-sidebar-accent: var(--sidebar-accent);
+ --color-sidebar-accent-foreground: var(--sidebar-accent-foreground);
+ --color-sidebar-border: var(--sidebar-border);
+ --color-sidebar-ring: var(--sidebar-ring);
+}
+
+@layer base {
+ * {
+ @apply border-border outline-ring/50;
+ }
+ body {
+ @apply bg-background text-foreground;
+ }
+}
diff --git a/kits/agentic/mockai/app/layout.tsx b/kits/agentic/mockai/app/layout.tsx
new file mode 100644
index 00000000..7653455a
--- /dev/null
+++ b/kits/agentic/mockai/app/layout.tsx
@@ -0,0 +1,27 @@
+import type { Metadata } from 'next'
+import { Geist, Geist_Mono } from 'next/font/google'
+import { Analytics } from '@vercel/analytics/next'
+import './globals.css'
+
+const _geist = Geist({ subsets: ["latin"] });
+const _geistMono = Geist_Mono({ subsets: ["latin"] });
+
+export const metadata: Metadata = {
+ title: 'Mockai: Your personalized mock interviewer',
+ description: 'Practice mock interviews cost-free and effortlessly at one place',
+}
+
+export default function RootLayout({
+ children,
+}: Readonly<{
+ children: React.ReactNode
+}>) {
+ return (
+
+
+ {children}
+
+
+
+ )
+}
diff --git a/kits/agentic/mockai/app/page.tsx b/kits/agentic/mockai/app/page.tsx
new file mode 100644
index 00000000..3b68cb5d
--- /dev/null
+++ b/kits/agentic/mockai/app/page.tsx
@@ -0,0 +1,579 @@
+"use client"
+import type React from "react"
+import { useState, useEffect, useRef } from "react"
+import { Button } from "@/components/ui/button"
+import { Textarea } from "@/components/ui/textarea"
+import { Card } from "@/components/ui/card"
+import { Input } from "@/components/ui/input"
+import { Loader2, Sparkles, Presentation, CheckCircle2, ChevronRight, RefreshCw, Briefcase, ThumbsUp, ThumbsDown, Target, Mic, MicOff, Activity } from "lucide-react"
+import { generateQuestions, evaluateAnswers } from "@/actions/orchestrate"
+import { Header } from "@/components/header"
+
+type Step = "setup" | "interview" | "feedback"
+
+const MAX_CHARS = 1500
+
+export default function InterviewPrepPage() {
+ const [step, setStep] = useState("setup")
+ const [isLoading, setIsLoading] = useState(false)
+ const [error, setError] = useState("")
+
+ // Form State
+ const [jobTitle, setJobTitle] = useState("")
+ const [jobDesc, setJobDesc] = useState("")
+ const [yearsOfExp, setYearsOfExp] = useState("")
+
+ // Interview State
+ const [questions, setQuestions] = useState([])
+ const [currentQuestionIndex, setCurrentQuestionIndex] = useState(0)
+ const [currentAnswer, setCurrentAnswer] = useState("")
+ const [candidateResponses, setCandidateResponses] = useState<{ question: string; answers: string }[]>([])
+
+ // Feedback State
+ const [feedback, setFeedback] = useState<{ positives: string[]; negatives: string[]; rating: number } | null>(null)
+
+ // Mic & Audio Analyzer State
+ const [isRecording, setIsRecording] = useState(false)
+ const [interimResult, setInterimResult] = useState("")
+ const [volume, setVolume] = useState(0)
+
+ const recognitionRef = useRef(null)
+ const audioContextRef = useRef(null)
+ const analyserRef = useRef(null)
+ const mediaStreamRef = useRef(null)
+ const animationRef = useRef(null)
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return () => {
+ stopRecording()
+ }
+ }, [])
+
+ const startRecording = async () => {
+ setError("")
+
+ try {
+ // 1. Setup Audio Visualizer
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
+ mediaStreamRef.current = stream
+
+ const AudioContext = window.AudioContext || (window as any).webkitAudioContext
+ const audioCtx = new AudioContext()
+ audioContextRef.current = audioCtx
+
+ const analyser = audioCtx.createAnalyser()
+ analyser.fftSize = 256
+ analyserRef.current = analyser
+
+ const source = audioCtx.createMediaStreamSource(stream)
+ source.connect(analyser)
+
+ const dataArray = new Uint8Array(analyser.frequencyBinCount)
+
+ const updateVolume = () => {
+ if (!analyserRef.current) return
+ analyserRef.current.getByteFrequencyData(dataArray)
+ let sum = 0
+ for (let i = 0; i < dataArray.length; i++) {
+ sum += dataArray[i]
+ }
+ const avg = sum / dataArray.length
+ setVolume(avg) // 0 to ~100
+ animationRef.current = requestAnimationFrame(updateVolume)
+ }
+ updateVolume()
+
+ // 2. Setup Native Web Speech API
+ const SpeechRecognition = window.SpeechRecognition || (window as any).webkitSpeechRecognition
+ if (SpeechRecognition) {
+ const recognition = new SpeechRecognition()
+ recognition.continuous = true
+ recognition.interimResults = true // Crucial for real-time updates!
+ recognition.lang = "en-US"
+
+ recognition.onstart = () => {
+ setIsRecording(true)
+ }
+
+ recognition.onresult = (event: any) => {
+ let interim = ""
+ let finalTr = ""
+
+ for (let i = event.resultIndex; i < event.results.length; ++i) {
+ if (event.results[i].isFinal) {
+ finalTr += event.results[i][0].transcript
+ } else {
+ interim += event.results[i][0].transcript
+ }
+ }
+
+ if (finalTr) {
+ setCurrentAnswer((prev) => {
+ const newAns = prev + (prev.length > 0 && !prev.endsWith(" ") ? " " : "") + finalTr
+ return newAns.slice(0, MAX_CHARS)
+ })
+ }
+ setInterimResult(interim) // Pushes interim results instantly
+ }
+
+ recognition.onerror = (event: any) => {
+ console.error("Speech recognition error payload:", event.error)
+ if (event.error === 'no-speech') {
+ // Ignore silent periods
+ return;
+ }
+
+ if (event.error === 'not-allowed') {
+ setError("Mic blocked: Must use localhost or HTTPS, and allow permissions.")
+ } else if (event.error === 'network') {
+ setError("Network error: Try using localhost instead of your IP address.")
+ } else if (event.error !== 'aborted') {
+ setError(`Speech API error: ${event.error}`)
+ }
+
+ if (event.error !== 'no-speech' && event.error !== 'aborted') {
+ stopRecording()
+ }
+ }
+
+ recognition.onend = () => {
+ stopRecording()
+ }
+
+ recognitionRef.current = recognition
+ recognition.start()
+ } else {
+ stopRecording()
+ setError("Your browser does not support native Speech Recognition. Please try Google Chrome.")
+ }
+ } catch (err) {
+ console.error(err)
+ setError("Failed to access your microphone.")
+ stopRecording()
+ }
+ }
+
+ const stopRecording = () => {
+ setIsRecording(false)
+ setInterimResult("")
+
+ if (recognitionRef.current) {
+ recognitionRef.current.stop()
+ recognitionRef.current = null
+ }
+
+ if (animationRef.current) {
+ cancelAnimationFrame(animationRef.current)
+ animationRef.current = null
+ }
+
+ if (mediaStreamRef.current) {
+ mediaStreamRef.current.getTracks().forEach((track: any) => track.stop())
+ mediaStreamRef.current = null
+ }
+
+ if (audioContextRef.current && audioContextRef.current.state !== "closed") {
+ audioContextRef.current.close().catch(console.error)
+ audioContextRef.current = null
+ }
+
+ setVolume(0)
+ }
+
+ const toggleRecording = () => {
+ if (isRecording) {
+ stopRecording()
+ } else {
+ startRecording()
+ }
+ }
+
+ const handleStartInterview = async (e: React.FormEvent) => {
+ e.preventDefault()
+
+ // β¨ Job Description is now optional
+ if (!jobTitle.trim() || !yearsOfExp.trim()) {
+ setError("Please fill in Job Title and Years of Experience.")
+ return
+ }
+
+ setIsLoading(true)
+ setError("")
+
+ try {
+ const exp = parseInt(yearsOfExp) || 0
+ const response = await generateQuestions(jobTitle, exp, jobDesc)
+
+ if (response.success && response.questions) {
+ setQuestions(response.questions)
+ setStep("interview")
+ } else {
+ setError(response.error || "Failed to generate questions. Check Lamatic configuration.")
+ }
+ } catch (err) {
+ setError(err instanceof Error ? err.message : "An error occurred")
+ } finally {
+ setIsLoading(false)
+ }
+ }
+
+ const handleNextQuestion = async () => {
+ const finalAnswer = currentAnswer + (interimResult ? (currentAnswer.endsWith(" ") ? "" : " ") + interimResult : "")
+
+ if (!finalAnswer.trim()) {
+ setError("Please provide an answer before moving on.")
+ return
+ }
+ setError("")
+
+ if (isRecording) {
+ stopRecording()
+ }
+
+ const newResponses = [...candidateResponses, { question: questions[currentQuestionIndex], answers: finalAnswer }]
+
+ if (currentQuestionIndex < questions.length - 1) {
+ setCandidateResponses(newResponses)
+ setCurrentAnswer("")
+ setCurrentQuestionIndex((prev) => prev + 1)
+ } else {
+ setCandidateResponses(newResponses)
+ setIsLoading(true)
+
+ try {
+ const response = await evaluateAnswers(newResponses)
+ if (response.success && response.feedback) {
+ setFeedback(response.feedback)
+ setStep("feedback")
+ } else {
+ setError(response.error || "Failed to evaluate answers.")
+ }
+ } catch (err) {
+ setError(err instanceof Error ? err.message : "An error occurred")
+ } finally {
+ setIsLoading(false)
+ }
+ }
+ }
+
+ const handleReset = () => {
+ setStep("setup")
+ setJobTitle("")
+ setJobDesc("")
+ setYearsOfExp("")
+ setQuestions([])
+ setCurrentQuestionIndex(0)
+ setCurrentAnswer("")
+ setCandidateResponses([])
+ setFeedback(null)
+ setError("")
+ if (isRecording) {
+ stopRecording()
+ }
+ }
+
+ const renderSetup = () => (
+
+
+
+
+
+
+ Ace Your Next Interview
+
+
+ Tell us about the role. Our AI agent will dynamically generate customized questions, guide you through a mock interview, and provide actionable feedback.
+
+ Here is a detailed breakdown of your performance from our AI evaluator.
+
+
+
+
+
+
+
+ Global Assessment
+
+
+ Your answers have been analyzed based on technical accuracy, clarity, and professionalism. Review your key strengths and areas for improvement below.
+