= {
+ inline: "inline",
+ gallery: "gallery",
+ stacked: "stacked",
};
/** Tool name to language mapping for syntax highlighting */
@@ -122,6 +137,18 @@ function ToolExecutionStepComponent({
[execution.inputArtifacts]
);
+ // Extract display selection metadata for display_artifacts tool
+ const displayMeta = useMemo(() => {
+ if (execution.toolName !== "display_artifacts") return null;
+ const sel = execution.outputArtifacts.find((a) => a.type === "display_selection");
+ if (!sel) return null;
+ const data = sel.data as DisplaySelectionData;
+ return {
+ count: data.artifactIds.length,
+ layout: LAYOUT_LABELS[data.layout] || data.layout,
+ };
+ }, [execution.toolName, execution.outputArtifacts]);
+
return (
{/* Timeline connector line */}
@@ -164,6 +191,14 @@ function ToolExecutionStepComponent({
)}
+ {/* Display artifacts metadata */}
+ {displayMeta && (
+
+ Showing {displayMeta.count} {displayMeta.count === 1 ? "artifact" : "artifacts"}
+ {displayMeta.layout !== "inline" && <> · {displayMeta.layout}>}
+
+ )}
+
{/* Inline code preview - always visible */}
{inlineCode && (
diff --git a/ui/src/components/ToolIcons/ToolIcons.tsx b/ui/src/components/ToolIcons/ToolIcons.tsx
index 0ba0fc4..e0804c5 100644
--- a/ui/src/components/ToolIcons/ToolIcons.tsx
+++ b/ui/src/components/ToolIcons/ToolIcons.tsx
@@ -1,7 +1,7 @@
/**
* ToolIcons - Custom icons for tool types
*
- * These icons are used across ToolsMenu, ToolCallIndicator, ExecutionSummaryBar,
+ * These icons are used across ToolsMenu, ExecutionSummaryBar,
* and ArtifactThumbnail for consistent tool identification.
*/
@@ -228,3 +228,23 @@ export const TOOL_SHORT_NAMES: Record = {
export function getToolShortName(toolId: string): string {
return TOOL_SHORT_NAMES[toolId] || toolId;
}
+
+/** Tool status labels for in-progress display */
+const TOOL_STATUS_LABELS: Record = {
+ file_search: "Searching documents",
+ code_interpreter: "Running Python",
+ js_code_interpreter: "Running JavaScript",
+ sql_query: "Running SQL",
+ chart_render: "Rendering chart",
+ web_search: "Searching web",
+ web_fetch: "Fetching URL",
+ wikipedia: "Searching Wikipedia",
+ wikidata: "Querying Wikidata",
+ sub_agent: "Running agent",
+ mcp: "Calling tool",
+};
+
+/** Get human-readable status label for a running tool */
+export function getToolStatusLabel(toolId: string, toolName?: string): string {
+ return TOOL_STATUS_LABELS[toolId] || (toolName ? `Calling ${toolName}` : "Calling function");
+}
diff --git a/ui/src/components/ToolIcons/index.ts b/ui/src/components/ToolIcons/index.ts
index 4e6d6cf..a8e743f 100644
--- a/ui/src/components/ToolIcons/index.ts
+++ b/ui/src/components/ToolIcons/index.ts
@@ -6,5 +6,6 @@ export {
TOOL_SHORT_NAMES,
getToolIcon,
getToolShortName,
+ getToolStatusLabel,
type ToolIconComponent,
} from "./ToolIcons";
diff --git a/ui/src/components/chat-types.ts b/ui/src/components/chat-types.ts
index a1f5355..33bcf03 100644
--- a/ui/src/components/chat-types.ts
+++ b/ui/src/components/chat-types.ts
@@ -29,6 +29,13 @@ export type ToolExecutionStatus = ToolExecutionStatusImport;
export type ToolExecution = ToolExecutionImport;
export type ToolExecutionRound = ToolExecutionRoundImport;
+/** A completed round of multi-round tool execution, bundling reasoning, content, and tool execution */
+export interface CompletedRound {
+ reasoning?: string;
+ content?: string;
+ toolExecution?: ToolExecutionRound;
+}
+
/** History mode for conversation context sent to models */
export type HistoryMode = "all" | "same-model";
@@ -396,7 +403,7 @@ export interface MessageUsage {
cachedTokens?: number;
/** Reasoning tokens count (if applicable) */
reasoningTokens?: number;
- /** Reasoning content (extended thinking output) */
+ /** Reasoning content (extended thinking output — last/only round) */
reasoningContent?: string;
// Timing stats (captured client-side during streaming)
@@ -823,6 +830,8 @@ export interface ChatMessage {
artifacts?: Artifact[];
/** Tool execution timeline for multi-turn tool calling (assistant messages only) */
toolExecutionRounds?: ToolExecutionRound[];
+ /** Completed rounds bundling reasoning, content, and tool execution (multi-round tool execution) */
+ completedRounds?: CompletedRound[];
/** Debug message ID for looking up debug info in debugStore (assistant messages only) */
debugMessageId?: string;
}
diff --git a/ui/src/pages/chat/useChat.ts b/ui/src/pages/chat/useChat.ts
index eb8bc3c..d6bfba2 100644
--- a/ui/src/pages/chat/useChat.ts
+++ b/ui/src/pages/chat/useChat.ts
@@ -9,6 +9,7 @@ import {
} from "@/stores/conversationStore";
import { useDebugStore } from "@/stores/debugStore";
import type {
+ CompletedRound,
ConversationMode,
ModeConfig,
MessageModeMetadata,
@@ -39,6 +40,7 @@ import {
createMCPToolName,
type ToolExecutorContext,
} from "./utils/toolExecutors";
+import { getToolStatusLabel } from "@/components/ToolIcons";
import { useMCPStore } from "@/stores/mcpStore";
import {
sendChainedMode,
@@ -202,8 +204,12 @@ const MAX_TOOL_ITERATIONS = 5;
/** Result from streaming a response, including any tool calls */
interface StreamResponseResult {
content: string;
+ /** Whether any output_text deltas were received (vs reasoning-only fallback) */
+ hasOutputText: boolean;
usage?: MessageUsage;
reasoningContent?: string;
+ /** Per-round reasoning, content, and tool execution for multi-round tool execution */
+ completedRounds?: CompletedRound[];
/** Tool calls detected during streaming (only when clientSideToolExecution is enabled) */
toolCalls?: ParsedToolCall[];
/** Tool execution timeline for progressive disclosure UI */
@@ -554,9 +560,10 @@ export function useChat({
name: "display_artifacts",
description:
"After executing tools that produce outputs (code, charts, tables, images), " +
- "call this to select which artifacts to display prominently to the user. " +
+ "call this to select which artifacts to display prominently to the user inline at this point in the conversation. " +
"Artifacts not selected will be available in a collapsed 'more outputs' section. " +
- "Always call this after your tool executions complete to curate the user's view. " +
+ "Call this each time you have outputs to show rather than waiting until the end — " +
+ "artifacts appear where you call this function, so call it right after the relevant tools complete. " +
"Choose the most relevant and interesting outputs - typically final results rather than intermediate steps.",
parameters: {
type: "object",
@@ -800,6 +807,7 @@ export function useChat({
let usage: MessageUsage | undefined;
// Fallback: extract tool calls from response.completed if not captured during streaming
let completedToolCalls: ParsedToolCall[] = [];
+ let hasOutputText = false;
// Capture response output for debugging
let responseOutput: unknown[] | undefined;
@@ -856,6 +864,7 @@ export function useChat({
// Handle different Responses API event types
if (event.type === "response.output_text.delta" && event.delta) {
+ hasOutputText = true;
content += event.delta;
streamingStore.appendContent(storeKey, event.delta);
} else if (
@@ -980,10 +989,11 @@ export function useChat({
const outputText =
event.response.output_text ||
event.response.output
- ?.flatMap((item) =>
- item.content
- ?.filter((c) => c.type === "output_text")
- .map((c) => c.text || "")
+ ?.flatMap(
+ (item) =>
+ item.content
+ ?.filter((c) => c.type === "output_text")
+ .map((c) => c.text || "") ?? []
)
.join("\n\n---\n\n");
@@ -1129,6 +1139,7 @@ export function useChat({
return {
content,
+ hasOutputText,
usage,
reasoningContent: reasoningContent || undefined,
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
@@ -1271,9 +1282,9 @@ export function useChat({
}
let currentInputItems = [...initialInputItems];
- let accumulatedContent = "";
let accumulatedUsage: MessageUsage | undefined;
let lastReasoningContent: string | undefined;
+ const allCompletedRounds: CompletedRound[] = [];
let iterations = 0;
// Track execution rounds locally (also mirrored in store for real-time UI)
@@ -1311,9 +1322,14 @@ export function useChat({
return iterations === 1
? null
: {
- content: accumulatedContent,
+ content: allCompletedRounds
+ .map((r) => r.content)
+ .filter(Boolean)
+ .join("\n\n---\n\n"),
+ hasOutputText: true,
usage: accumulatedUsage,
reasoningContent: lastReasoningContent,
+ completedRounds: allCompletedRounds.length > 0 ? allCompletedRounds : undefined,
toolExecutionRounds: executionRounds.length > 0 ? executionRounds : undefined,
};
}
@@ -1328,17 +1344,16 @@ export function useChat({
}
}
- // Accumulate content across rounds with separator.
- // Skip reasoning-only rounds (where content was set from reasoning fallback
- // rather than actual output text — e.g., rounds that only call display_artifacts).
- const isActualOutput = result.content && result.content !== result.reasoningContent;
- if (isActualOutput) {
- if (accumulatedContent) {
- accumulatedContent += "\n\n---\n\n" + result.content;
- } else {
- accumulatedContent = result.content;
- }
- }
+ // Track per-round data for interleaved reasoning/content display.
+ // Only count content as meaningful if it has non-whitespace text —
+ // models sometimes emit trivial whitespace before tool calls.
+ const hasNonTrivialContent = result.hasOutputText && !!result.content?.trim();
+ const roundData: CompletedRound = {};
+ if (result.reasoningContent) roundData.reasoning = result.reasoningContent;
+ if (hasNonTrivialContent) roundData.content = result.content;
+ allCompletedRounds.push(roundData);
+ // Push to streaming store so the UI can render interleaved rounds during streaming
+ streamingStore.pushCompletedRound(storeKey, roundData);
lastReasoningContent = result.reasoningContent;
// Accumulate usage (sum tokens across iterations)
@@ -1368,6 +1383,10 @@ export function useChat({
break;
}
+ // Resume streaming state so the UI shows between-round indicators
+ // (completeStream set isStreaming=false, but we have more rounds coming)
+ streamingStore.resumeStreaming(storeKey);
+
// Capture tool calls for debug
if (messageId) {
debugStore.setRoundToolCalls(
@@ -1416,6 +1435,7 @@ export function useChat({
inputArtifacts: [],
outputArtifacts: [],
round: roundNumber,
+ statusMessage: getToolStatusLabel(tc.name, tc.name),
}));
// Add executions to store for real-time UI updates
@@ -1494,6 +1514,13 @@ export function useChat({
};
executionRounds.push(round);
+ // Attach tool execution to the current completed round
+ if (allCompletedRounds.length > 0) {
+ const lastIdx = allCompletedRounds.length - 1;
+ allCompletedRounds[lastIdx] = { ...allCompletedRounds[lastIdx], toolExecution: round };
+ streamingStore.setCompletedRoundToolExecution(storeKey, round);
+ }
+
// Build continuation input with tool results
const toolResultItems = buildToolResultInputItems(result.toolCalls, toolResults);
@@ -1550,13 +1577,7 @@ export function useChat({
// Clear tool calls from streaming store before next iteration
streamingStore.clearToolCalls(storeKey);
- // Add separator to streaming store so the next round's appendContent
- // builds on top of the accumulated content with a visual break.
- // Only add if this round had actual text output (avoid double separators
- // from rounds that only had tool calls with no text).
- if (isActualOutput) {
- streamingStore.appendContent(storeKey, "\n\n---\n\n");
- }
+ // Content for the next round will stream fresh (pushCompletedRound resets it)
}
// Complete debug capture successfully
@@ -1564,10 +1585,17 @@ export function useChat({
debugStore.completeDebugCapture(messageId, model, true);
}
+ const flatContent = allCompletedRounds
+ .map((r) => r.content)
+ .filter(Boolean)
+ .join("\n\n---\n\n");
+
return {
- content: accumulatedContent,
+ content: flatContent,
+ hasOutputText: true,
usage: accumulatedUsage,
reasoningContent: lastReasoningContent,
+ completedRounds: allCompletedRounds.length > 0 ? allCompletedRounds : undefined,
toolExecutionRounds: executionRounds.length > 0 ? executionRounds : undefined,
};
},
@@ -1711,6 +1739,7 @@ export function useChat({
citations?: Citation[];
artifacts?: Artifact[];
toolExecutionRounds?: ToolExecutionRound[];
+ completedRounds?: CompletedRound[];
/** Debug message ID for looking up debug info */
debugMessageId?: string;
}> = [];
@@ -1740,6 +1769,7 @@ export function useChat({
citations: stream?.citations,
artifacts: stream?.artifacts,
toolExecutionRounds: stream?.toolExecutionRounds,
+ completedRounds: stream?.completedRounds.length ? stream.completedRounds : undefined,
});
}
}
@@ -1758,6 +1788,7 @@ export function useChat({
citations: stream?.citations,
artifacts: stream?.artifacts,
toolExecutionRounds: stream?.toolExecutionRounds,
+ completedRounds: stream?.completedRounds.length ? stream.completedRounds : undefined,
});
}
}
@@ -1778,6 +1809,7 @@ export function useChat({
citations: stream?.citations,
artifacts: stream?.artifacts,
toolExecutionRounds: stream?.toolExecutionRounds,
+ completedRounds: stream?.completedRounds.length ? stream.completedRounds : undefined,
// Only include debugMessageId for multiple mode (default)
debugMessageId: conversationMode === "multiple" ? debugMessageId : undefined,
});
@@ -1792,6 +1824,9 @@ export function useChat({
citations: stream?.citations,
artifacts: stream?.artifacts,
toolExecutionRounds: stream?.toolExecutionRounds,
+ completedRounds: stream?.completedRounds.length
+ ? stream.completedRounds
+ : undefined,
debugMessageId: conversationMode === "multiple" ? debugMessageId : undefined,
});
}
diff --git a/ui/src/pages/chat/utils/toolCallParser.ts b/ui/src/pages/chat/utils/toolCallParser.ts
index 990498b..76e3a78 100644
--- a/ui/src/pages/chat/utils/toolCallParser.ts
+++ b/ui/src/pages/chat/utils/toolCallParser.ts
@@ -28,10 +28,27 @@
* ```
*/
-import type { ToolCall, ToolCallType, ToolCallStatus } from "@/components/ToolCallIndicator";
-
-// Re-export for convenience
-export type { ToolCall, ToolCallType, ToolCallStatus };
+/** Types of tool calls that can be displayed */
+export type ToolCallType =
+ | "file_search"
+ | "web_search"
+ | "code_interpreter"
+ | "js_code_interpreter"
+ | "sql_query"
+ | "chart_render"
+ | "function";
+
+/** Status of a tool call execution */
+export type ToolCallStatus = "pending" | "executing" | "completed" | "failed";
+
+/** Represents a single tool call being executed */
+export interface ToolCall {
+ id: string;
+ type: ToolCallType;
+ name?: string;
+ status: ToolCallStatus;
+ error?: string;
+}
/**
* SSE event types emitted by the backend for function calls
diff --git a/ui/src/stores/chatUIStore.ts b/ui/src/stores/chatUIStore.ts
index c522500..1710719 100644
--- a/ui/src/stores/chatUIStore.ts
+++ b/ui/src/stores/chatUIStore.ts
@@ -205,6 +205,12 @@ interface ChatUIState {
* When null, uses the current streaming model as fallback.
*/
subAgentModel: string | null;
+ /**
+ * Whether compact mode is enabled for model responses.
+ * Hides reasoning sections, tool execution details, and collapses
+ * rounds without content to minimal "Thinking" / "Processing" indicators.
+ */
+ compactMode: boolean;
}
interface ChatUIActions {
@@ -317,6 +323,10 @@ interface ChatUIActions {
clearPendingPrompt: () => void;
/** Set the default model for sub-agent tool */
setSubAgentModel: (model: string | null) => void;
+ /** Set compact mode */
+ setCompactMode: (enabled: boolean) => void;
+ /** Toggle compact mode */
+ toggleCompactMode: () => void;
}
export type ChatUIStore = ChatUIState & ChatUIActions;
@@ -340,6 +350,14 @@ function loadViewMode(): ViewMode {
return "grid";
}
+function loadCompactMode(): boolean {
+ try {
+ return localStorage.getItem("hadrian:compactMode") !== "false";
+ } catch {
+ return true;
+ }
+}
+
const initialState: ChatUIState = {
viewMode: loadViewMode(),
expandedModel: null,
@@ -369,6 +387,7 @@ const initialState: ChatUIState = {
editingMessageId: null,
pendingPrompt: null,
subAgentModel: null,
+ compactMode: loadCompactMode(),
};
export const useChatUIStore = create((set) => ({
@@ -623,6 +642,26 @@ export const useChatUIStore = create((set) => ({
clearPendingPrompt: () => set({ pendingPrompt: null }),
setSubAgentModel: (model) => set({ subAgentModel: model }),
+
+ setCompactMode: (enabled) => {
+ try {
+ localStorage.setItem("hadrian:compactMode", String(enabled));
+ } catch {
+ // localStorage unavailable
+ }
+ set({ compactMode: enabled });
+ },
+
+ toggleCompactMode: () =>
+ set((state) => {
+ const next = !state.compactMode;
+ try {
+ localStorage.setItem("hadrian:compactMode", String(next));
+ } catch {
+ // localStorage unavailable
+ }
+ return { compactMode: next };
+ }),
}));
/**
@@ -753,6 +792,9 @@ export const usePendingPrompt = () => useChatUIStore((state: ChatUIState) => sta
/** Get the default model for sub-agent tool */
export const useSubAgentModel = () => useChatUIStore((state: ChatUIState) => state.subAgentModel);
+/** Get compact mode state - hides reasoning/tools in model responses */
+export const useCompactMode = () => useChatUIStore((state: ChatUIState) => state.compactMode);
+
/** Get MCP config modal open state */
export const useMCPConfigModalOpen = () =>
useChatUIStore((state: ChatUIState) => state.mcpConfigModalOpen);
diff --git a/ui/src/stores/conversationStore.ts b/ui/src/stores/conversationStore.ts
index 46aac0e..c76cea8 100644
--- a/ui/src/stores/conversationStore.ts
+++ b/ui/src/stores/conversationStore.ts
@@ -5,6 +5,7 @@ import type {
Artifact,
ChatMessage,
Citation,
+ CompletedRound,
Conversation,
HistoryMode,
MessageModeMetadata,
@@ -100,6 +101,7 @@ interface ConversationActions {
citations?: Citation[];
artifacts?: Artifact[];
toolExecutionRounds?: ToolExecutionRound[];
+ completedRounds?: CompletedRound[];
debugMessageId?: string;
}>
) => void;
@@ -202,6 +204,7 @@ export const useConversationStore = create((set) => ({
citations: m.citations,
artifacts: m.artifacts,
toolExecutionRounds: m.toolExecutionRounds,
+ completedRounds: m.completedRounds,
debugMessageId: m.debugMessageId,
})),
],
diff --git a/ui/src/stores/streamingStore.ts b/ui/src/stores/streamingStore.ts
index f635886..56ab933 100644
--- a/ui/src/stores/streamingStore.ts
+++ b/ui/src/stores/streamingStore.ts
@@ -6,6 +6,7 @@ import type {
ResponseFeedbackData,
Citation,
Artifact,
+ CompletedRound,
ToolExecution,
ToolExecutionRound,
} from "@/components/chat-types";
@@ -70,8 +71,10 @@ export interface StreamingResponse {
*/
instanceId?: string;
content: string;
- /** Reasoning content (extended thinking) */
+ /** Reasoning content for the current round (extended thinking) */
reasoningContent: string;
+ /** Completed rounds bundling reasoning, content, and tool execution (multi-round tool execution) */
+ completedRounds: CompletedRound[];
isStreaming: boolean;
error?: string;
usage?: MessageUsage;
@@ -713,8 +716,14 @@ interface StreamingActions {
appendReasoningContent: (instanceId: string, delta: string) => void;
/** Set the full reasoning content for an instance */
setReasoningContent: (instanceId: string, content: string) => void;
+ /** Push a completed round, then reset reasoningContent and content for the next round */
+ pushCompletedRound: (instanceId: string, round: CompletedRound) => void;
+ /** Attach tool execution data to the last completed round */
+ setCompletedRoundToolExecution: (instanceId: string, toolExecution: ToolExecutionRound) => void;
/** Mark an instance's stream as complete */
completeStream: (instanceId: string, usage?: MessageUsage) => void;
+ /** Resume streaming for an instance (e.g., between tool-calling rounds) */
+ resumeStreaming: (instanceId: string) => void;
/** Set an error for an instance's stream */
setError: (instanceId: string, error: string) => void;
/** Clear all streams and reset mode state */
@@ -813,6 +822,7 @@ export const useStreamingStore = create((set) => ({
instanceId,
content: "",
reasoningContent: "",
+ completedRounds: [],
isStreaming: true,
startTime,
});
@@ -878,6 +888,33 @@ export const useStreamingStore = create((set) => ({
return { streams: newStreams };
}),
+ pushCompletedRound: (model, round) =>
+ set((state) => {
+ const existing = state.streams.get(model);
+ if (!existing) return state;
+
+ const newStreams = new Map(state.streams);
+ newStreams.set(model, {
+ ...existing,
+ completedRounds: [...existing.completedRounds, round],
+ reasoningContent: "",
+ content: "",
+ });
+ return { streams: newStreams };
+ }),
+
+ setCompletedRoundToolExecution: (model, toolExecution) =>
+ set((state) => {
+ const existing = state.streams.get(model);
+ if (!existing || existing.completedRounds.length === 0) return state;
+
+ const rounds = [...existing.completedRounds];
+ rounds[rounds.length - 1] = { ...rounds[rounds.length - 1], toolExecution };
+ const newStreams = new Map(state.streams);
+ newStreams.set(model, { ...existing, completedRounds: rounds });
+ return { streams: newStreams };
+ }),
+
completeStream: (model, usage) =>
set((state) => {
const existing = state.streams.get(model);
@@ -899,6 +936,16 @@ export const useStreamingStore = create((set) => ({
};
}),
+ resumeStreaming: (model) =>
+ set((state) => {
+ const existing = state.streams.get(model);
+ if (!existing) return state;
+
+ const newStreams = new Map(state.streams);
+ newStreams.set(model, { ...existing, isStreaming: true });
+ return { streams: newStreams, isStreaming: true };
+ }),
+
setError: (model, error) =>
set((state) => {
const existing = state.streams.get(model);