diff --git a/application/single_app/app.py b/application/single_app/app.py index 2354b1b5..e0c8cfee 100644 --- a/application/single_app/app.py +++ b/application/single_app/app.py @@ -75,6 +75,7 @@ from route_backend_public_prompts import * from route_backend_user_agreement import register_route_backend_user_agreement from route_backend_conversation_export import register_route_backend_conversation_export +from route_backend_thoughts import register_route_backend_thoughts from route_backend_speech import register_route_backend_speech from route_backend_tts import register_route_backend_tts from route_enhanced_citations import register_enhanced_citations_routes @@ -641,6 +642,9 @@ def list_semantic_kernel_plugins(): # ------------------- API User Agreement Routes ---------- register_route_backend_user_agreement(app) +# ------------------- API Thoughts Routes ---------------- +register_route_backend_thoughts(app) + # ------------------- Extenral Health Routes ---------- register_route_external_health(app) diff --git a/application/single_app/config.py b/application/single_app/config.py index da63c230..934e47af 100644 --- a/application/single_app/config.py +++ b/application/single_app/config.py @@ -459,6 +459,18 @@ def get_redis_cache_infrastructure_endpoint(redis_hostname: str) -> str: default_ttl=-1 # TTL disabled by default, enabled per-document for auto-cleanup ) +cosmos_thoughts_container_name = "thoughts" +cosmos_thoughts_container = cosmos_database.create_container_if_not_exists( + id=cosmos_thoughts_container_name, + partition_key=PartitionKey(path="/user_id") +) + +cosmos_archived_thoughts_container_name = "archive_thoughts" +cosmos_archived_thoughts_container = cosmos_database.create_container_if_not_exists( + id=cosmos_archived_thoughts_container_name, + partition_key=PartitionKey(path="/user_id") +) + def ensure_custom_logo_file_exists(app, settings): """ If custom_logo_base64 or custom_logo_dark_base64 is present in settings, ensure the appropriate diff --git a/application/single_app/functions_settings.py b/application/single_app/functions_settings.py index 8176939d..4164fab4 100644 --- a/application/single_app/functions_settings.py +++ b/application/single_app/functions_settings.py @@ -205,6 +205,9 @@ def get_settings(use_cosmos=False): 'require_member_of_feedback_admin': False, 'enable_conversation_archiving': False, + # Processing Thoughts + 'enable_thoughts': False, + # Search and Extract 'azure_ai_search_endpoint': '', 'azure_ai_search_key': '', diff --git a/application/single_app/functions_thoughts.py b/application/single_app/functions_thoughts.py new file mode 100644 index 00000000..c6ffe9dd --- /dev/null +++ b/application/single_app/functions_thoughts.py @@ -0,0 +1,256 @@ +# functions_thoughts.py + +import uuid +import time +from datetime import datetime, timezone +from config import cosmos_thoughts_container, cosmos_archived_thoughts_container, cosmos_messages_container +from functions_appinsights import log_event +from functions_settings import get_settings + + +class ThoughtTracker: + """Stateful per-request tracker that writes processing step records to Cosmos DB. + + Each add_thought() call immediately upserts a document so that polling + clients can see partial progress before the final response is sent. + + All Cosmos writes are wrapped in try/except so thought errors never + interrupt the chat processing flow. + """ + + def __init__(self, conversation_id, message_id, thread_id, user_id): + self.conversation_id = conversation_id + self.message_id = message_id + self.thread_id = thread_id + self.user_id = user_id + self.current_index = 0 + settings = get_settings() + self.enabled = settings.get('enable_thoughts', False) + + def add_thought(self, step_type, content, detail=None): + """Write a thought step to Cosmos immediately. + + Args: + step_type: One of search, tabular_analysis, web_search, + agent_tool_call, generation, content_safety. + content: Short human-readable description of the step. + detail: Optional technical detail (function names, params, etc.). + + Returns: + The thought document id, or None if disabled/failed. + """ + if not self.enabled: + return None + + thought_id = str(uuid.uuid4()) + thought_doc = { + 'id': thought_id, + 'conversation_id': self.conversation_id, + 'message_id': self.message_id, + 'thread_id': self.thread_id, + 'user_id': self.user_id, + 'step_index': self.current_index, + 'step_type': step_type, + 'content': content, + 'detail': detail, + 'duration_ms': None, + 'timestamp': datetime.now(timezone.utc).isoformat() + } + self.current_index += 1 + + try: + cosmos_thoughts_container.upsert_item(thought_doc) + except Exception as e: + log_event(f"ThoughtTracker.add_thought failed: {e}", level="WARNING") + return None + + return thought_id + + def complete_thought(self, thought_id, duration_ms): + """Patch an existing thought with its duration after the step finishes.""" + if not self.enabled or not thought_id: + return + + try: + thought_doc = cosmos_thoughts_container.read_item( + item=thought_id, + partition_key=self.user_id + ) + thought_doc['duration_ms'] = duration_ms + cosmos_thoughts_container.upsert_item(thought_doc) + except Exception as e: + log_event(f"ThoughtTracker.complete_thought failed: {e}", level="WARNING") + + def timed_thought(self, step_type, content, detail=None): + """Convenience: add a thought and return a timer helper. + + Usage: + timer = tracker.timed_thought('search', 'Searching documents...') + # ... do work ... + timer.stop() + """ + start = time.time() + thought_id = self.add_thought(step_type, content, detail) + return _ThoughtTimer(self, thought_id, start) + + +class _ThoughtTimer: + """Helper returned by ThoughtTracker.timed_thought() for auto-duration capture.""" + + def __init__(self, tracker, thought_id, start_time): + self._tracker = tracker + self._thought_id = thought_id + self._start = start_time + + def stop(self): + elapsed_ms = int((time.time() - self._start) * 1000) + self._tracker.complete_thought(self._thought_id, elapsed_ms) + return elapsed_ms + + +# --------------------------------------------------------------------------- +# CRUD helpers +# --------------------------------------------------------------------------- + +def get_thoughts_for_message(conversation_id, message_id, user_id): + """Return all thoughts for a specific assistant message, ordered by step_index.""" + try: + query = ( + "SELECT * FROM c " + "WHERE c.conversation_id = @conv_id " + "AND c.message_id = @msg_id " + "ORDER BY c.step_index ASC" + ) + params = [ + {"name": "@conv_id", "value": conversation_id}, + {"name": "@msg_id", "value": message_id}, + ] + results = list(cosmos_thoughts_container.query_items( + query=query, + parameters=params, + partition_key=user_id + )) + return results + except Exception as e: + log_event(f"get_thoughts_for_message failed: {e}", level="WARNING") + return [] + + +def get_pending_thoughts(conversation_id, user_id): + """Return the latest thoughts for a conversation that are still in-progress. + + Used by the polling endpoint. Retrieves thoughts created within the last + 5 minutes for the conversation, grouped by the most recent message_id. + """ + try: + five_minutes_ago = datetime.now(timezone.utc) + from datetime import timedelta + five_minutes_ago = (five_minutes_ago - timedelta(minutes=5)).isoformat() + + query = ( + "SELECT * FROM c " + "WHERE c.conversation_id = @conv_id " + "AND c.timestamp >= @since " + "ORDER BY c.timestamp DESC" + ) + params = [ + {"name": "@conv_id", "value": conversation_id}, + {"name": "@since", "value": five_minutes_ago}, + ] + results = list(cosmos_thoughts_container.query_items( + query=query, + parameters=params, + partition_key=user_id + )) + + if not results: + return [] + + # Group by the most recent message_id + latest_message_id = results[0].get('message_id') + latest_thoughts = [ + t for t in results if t.get('message_id') == latest_message_id + ] + # Return in ascending step_index order + latest_thoughts.sort(key=lambda t: t.get('step_index', 0)) + return latest_thoughts + except Exception as e: + log_event(f"get_pending_thoughts failed: {e}", level="WARNING") + return [] + + +def get_thoughts_for_conversation(conversation_id, user_id): + """Return all thoughts for a conversation.""" + try: + query = ( + "SELECT * FROM c " + "WHERE c.conversation_id = @conv_id " + "ORDER BY c.timestamp ASC" + ) + params = [ + {"name": "@conv_id", "value": conversation_id}, + ] + results = list(cosmos_thoughts_container.query_items( + query=query, + parameters=params, + partition_key=user_id + )) + return results + except Exception as e: + log_event(f"get_thoughts_for_conversation failed: {e}", level="WARNING") + return [] + + +def archive_thoughts_for_conversation(conversation_id, user_id): + """Copy all thoughts for a conversation to the archive container, then delete originals.""" + try: + thoughts = get_thoughts_for_conversation(conversation_id, user_id) + for thought in thoughts: + archived = dict(thought) + archived['archived_at'] = datetime.now(timezone.utc).isoformat() + cosmos_archived_thoughts_container.upsert_item(archived) + + for thought in thoughts: + cosmos_thoughts_container.delete_item( + item=thought['id'], + partition_key=user_id + ) + except Exception as e: + log_event(f"archive_thoughts_for_conversation failed: {e}", level="WARNING") + + +def delete_thoughts_for_conversation(conversation_id, user_id): + """Delete all thoughts for a conversation.""" + try: + thoughts = get_thoughts_for_conversation(conversation_id, user_id) + for thought in thoughts: + cosmos_thoughts_container.delete_item( + item=thought['id'], + partition_key=user_id + ) + except Exception as e: + log_event(f"delete_thoughts_for_conversation failed: {e}", level="WARNING") + + +def delete_thoughts_for_message(message_id, user_id): + """Delete all thoughts associated with a specific assistant message.""" + try: + query = ( + "SELECT * FROM c " + "WHERE c.message_id = @msg_id" + ) + params = [ + {"name": "@msg_id", "value": message_id}, + ] + results = list(cosmos_thoughts_container.query_items( + query=query, + parameters=params, + partition_key=user_id + )) + for thought in results: + cosmos_thoughts_container.delete_item( + item=thought['id'], + partition_key=user_id + ) + except Exception as e: + log_event(f"delete_thoughts_for_message failed: {e}", level="WARNING") diff --git a/application/single_app/route_backend_chats.py b/application/single_app/route_backend_chats.py index e452fed4..abd7a9f4 100644 --- a/application/single_app/route_backend_chats.py +++ b/application/single_app/route_backend_chats.py @@ -28,6 +28,7 @@ from functions_activity_logging import log_chat_activity, log_conversation_creation, log_token_usage from flask import current_app from swagger_wrapper import swagger_route, get_auth_security +from functions_thoughts import ThoughtTracker def get_kernel(): @@ -668,6 +669,18 @@ def result_requires_message_reload(result: Any) -> bool: conversation_item['last_updated'] = datetime.utcnow().isoformat() cosmos_conversations_container.upsert_item(conversation_item) # Update timestamp and potentially title + + # Generate assistant_message_id early for thought tracking + assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" + + # Initialize thought tracker + thought_tracker = ThoughtTracker( + conversation_id=conversation_id, + message_id=assistant_message_id, + thread_id=current_user_thread_id, + user_id=user_id + ) + # region 3 - Content Safety # --------------------------------------------------------------------- # 3) Check Content Safety (but DO NOT return 403). @@ -679,6 +692,7 @@ def result_requires_message_reload(result: Any) -> bool: blocklist_matches = [] if settings.get('enable_content_safety') and "content_safety_client" in CLIENTS: + thought_tracker.add_thought('content_safety', 'Checking content safety...') try: content_safety_client = CLIENTS["content_safety_client"] request_obj = AnalyzeTextOptions(text=user_message) @@ -836,6 +850,7 @@ def result_requires_message_reload(result: Any) -> bool: # Perform the search + thought_tracker.add_thought('search', f"Searching {document_scope or 'personal'} workspace documents for '{(search_query or user_message)[:50]}'") try: # Prepare search arguments # Set default and maximum values for top_n @@ -899,6 +914,8 @@ def result_requires_message_reload(result: Any) -> bool: }), 500 if search_results: + unique_doc_names = set(doc.get('file_name', 'Unknown') for doc in search_results) + thought_tracker.add_thought('search', f"Found {len(search_results)} results from {len(unique_doc_names)} documents") retrieved_texts = [] combined_documents = [] classifications_found = set(conversation_item.get('classification', [])) # Load existing @@ -1489,6 +1506,7 @@ def result_requires_message_reload(result: Any) -> bool: }), status_code if web_search_enabled: + thought_tracker.add_thought('web_search', f"Searching the web for '{(search_query or user_message)[:50]}'") perform_web_search( settings=settings, conversation_id=conversation_id, @@ -1504,7 +1522,9 @@ def result_requires_message_reload(result: Any) -> bool: agent_citations_list=agent_citations_list, web_search_citations_list=web_search_citations_list, ) - + if web_search_citations_list: + thought_tracker.add_thought('web_search', f"Got {len(web_search_citations_list)} web search results") + # region 5 - FINAL conversation history preparation # --------------------------------------------------------------------- # 5) Prepare FINAL conversation history for GPT (including summarization) @@ -2110,6 +2130,23 @@ def orchestrator_error(e): }) if selected_agent: + thought_tracker.add_thought('agent_tool_call', f"Sending to agent '{getattr(selected_agent, 'display_name', getattr(selected_agent, 'name', 'unknown'))}'") + + # Register callback to write plugin thoughts to Cosmos in real-time + callback_key = f"{user_id}:{conversation_id}" + plugin_logger = get_plugin_logger() + + def on_plugin_invocation(inv): + duration_str = f" ({int(inv.duration_ms)}ms)" if inv.duration_ms else "" + tool_name = f"{inv.plugin_name}.{inv.function_name}" + thought_tracker.add_thought( + 'agent_tool_call', + f"Agent called {tool_name}{duration_str}", + detail=f"success={inv.success}" + ) + + plugin_logger.register_callback(callback_key, on_plugin_invocation) + def invoke_selected_agent(): return asyncio.run(run_sk_call( selected_agent.invoke, @@ -2120,16 +2157,18 @@ def agent_success(result): msg = str(result) notice = None agent_used = getattr(selected_agent, 'name', 'All Plugins') - + + # Deregister real-time thought callback + plugin_logger.deregister_callbacks(callback_key) + # Get the actual model deployment used by the agent actual_model_deployment = getattr(selected_agent, 'deployment_name', None) or agent_used debug_print(f"Agent '{agent_used}' using deployment: {actual_model_deployment}") - + # Extract detailed plugin invocations for enhanced agent citations - plugin_logger = get_plugin_logger() - # CRITICAL FIX: Filter by user_id and conversation_id to prevent cross-conversation contamination + # (Thoughts already written to Cosmos in real-time by callback) plugin_invocations = plugin_logger.get_invocations_for_conversation(user_id, conversation_id) - + # Convert plugin invocations to citation format with detailed information detailed_citations = [] for inv in plugin_invocations: @@ -2204,6 +2243,7 @@ def make_json_serializable(obj): ) return (msg, actual_model_deployment, "agent", notice) def agent_error(e): + plugin_logger.deregister_callbacks(callback_key) debug_print(f"Error during Semantic Kernel Agent invocation: {str(e)}") log_event( f"Error during Semantic Kernel Agent invocation: {str(e)}", @@ -2244,8 +2284,17 @@ def foundry_agent_success(result): or agent_used ) + # Deregister real-time thought callback + plugin_logger.deregister_callbacks(callback_key) + foundry_citations = getattr(selected_agent, 'last_run_citations', []) or [] if foundry_citations: + # Emit thoughts for Foundry agent citations/tool calls + for citation in foundry_citations: + thought_tracker.add_thought( + 'agent_tool_call', + f"Agent retrieved citation from Azure AI Foundry" + ) for citation in foundry_citations: try: serializable = json.loads(json.dumps(citation, default=str)) @@ -2282,6 +2331,7 @@ def foundry_agent_success(result): return (msg, actual_model_deployment, 'agent', notice) def foundry_agent_error(e): + plugin_logger.deregister_callbacks(callback_key) log_event( f"Error during Azure AI Foundry agent invocation: {str(e)}", extra={ @@ -2360,6 +2410,7 @@ def kernel_error(e): 'on_error': kernel_error }) + thought_tracker.add_thought('generation', 'Generating response...') def invoke_gpt_fallback(): if not conversation_history_for_api: raise Exception('Cannot generate response: No conversation history available.') @@ -2510,8 +2561,8 @@ def gpt_error(e): if hasattr(selected_agent, 'name'): agent_name = selected_agent.name - assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" - + # assistant_message_id was generated earlier for thought tracking + # Get user_info and thread_id from the user message for ownership tracking and threading user_info_for_assistant = None user_thread_id = None @@ -2672,7 +2723,8 @@ def gpt_error(e): 'web_search_citations': web_search_citations_list, 'agent_citations': agent_citations_list, 'reload_messages': reload_messages_required, - 'kernel_fallback_notice': kernel_fallback_notice + 'kernel_fallback_notice': kernel_fallback_notice, + 'thoughts_enabled': thought_tracker.enabled }), 200 except Exception as e: @@ -3111,10 +3163,27 @@ def generate(): conversation_item['last_updated'] = datetime.utcnow().isoformat() cosmos_conversations_container.upsert_item(conversation_item) - + + # Generate assistant_message_id early for thought tracking + assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" + + # Initialize thought tracker for streaming path + thought_tracker = ThoughtTracker( + conversation_id=conversation_id, + message_id=assistant_message_id, + thread_id=current_user_thread_id, + user_id=user_id + ) + + def emit_thought(step_type, content, detail=None): + """Add a thought to Cosmos and return an SSE event string.""" + thought_tracker.add_thought(step_type, content, detail) + return f"data: {json.dumps({'type': 'thought', 'step_index': thought_tracker.current_index - 1, 'step_type': step_type, 'content': content})}\n\n" + # Hybrid search (if enabled) combined_documents = [] if hybrid_search_enabled: + yield emit_thought('search', f"Searching {document_scope or 'personal'} workspace documents for '{(search_query or user_message)[:50]}'") try: search_args = { "query": search_query, @@ -3144,8 +3213,10 @@ def generate(): search_results = hybrid_search(**search_args) except Exception as e: debug_print(f"Error during hybrid search: {e}") - + if search_results: + unique_doc_names_stream = set(doc.get('file_name', 'Unknown') for doc in search_results) + yield emit_thought('search', f"Found {len(search_results)} results from {len(unique_doc_names_stream)} documents") retrieved_texts = [] for doc in search_results: @@ -3324,6 +3395,7 @@ def generate(): hybrid_citations_list.sort(key=lambda x: x.get('page_number', 0), reverse=True) if web_search_enabled: + yield emit_thought('web_search', f"Searching the web for '{(search_query or user_message)[:50]}'") perform_web_search( settings=settings, conversation_id=conversation_id, @@ -3339,6 +3411,8 @@ def generate(): agent_citations_list=agent_citations_list, web_search_citations_list=web_search_citations_list, ) + if web_search_citations_list: + yield emit_thought('web_search', f"Got {len(web_search_citations_list)} web search results") # Update message chat type message_chat_type = None @@ -3472,7 +3546,7 @@ def generate(): # Stream the response accumulated_content = "" token_usage_data = None # Will be populated from final stream chunk - assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" + # assistant_message_id was generated earlier for thought tracking final_model_used = gpt_model # Default to gpt_model, will be overridden if agent is used # DEBUG: Check agent streaming decision @@ -3482,8 +3556,23 @@ def generate(): try: if use_agent_streaming and selected_agent: # Stream from agent using invoke_stream + yield emit_thought('agent_tool_call', f"Sending to agent '{agent_display_name_used or agent_name_used}'") debug_print(f"--- Streaming from Agent: {agent_name_used} ---") - + + # Register callback to persist plugin thoughts to Cosmos in real-time + callback_key = f"{user_id}:{conversation_id}" + plugin_logger_cb = get_plugin_logger() + + def on_plugin_invocation_streaming(inv): + duration_str = f" ({int(inv.duration_ms)}ms)" if inv.duration_ms else "" + tool_name = f"{inv.plugin_name}.{inv.function_name}" + thought_tracker.add_thought( + 'agent_tool_call', + f"Agent called {tool_name}{duration_str}" + ) + + plugin_logger_cb.register_callback(callback_key, on_plugin_invocation_streaming) + # Import required classes from semantic_kernel.contents.chat_message_content import ChatMessageContent @@ -3539,36 +3628,49 @@ async def stream_agent_async(): try: # Run streaming and collect chunks and usage chunks, stream_usage = loop.run_until_complete(stream_agent_async()) - - # Yield chunks to frontend - for chunk_content in chunks: - accumulated_content += chunk_content - yield f"data: {json.dumps({'content': chunk_content})}\n\n" - - # Try to capture token usage from stream metadata - if stream_usage: - # stream_usage is a CompletionUsage object, not a dict - prompt_tokens = getattr(stream_usage, 'prompt_tokens', 0) - completion_tokens = getattr(stream_usage, 'completion_tokens', 0) - total_tokens = getattr(stream_usage, 'total_tokens', None) - - # Calculate total if not provided - if total_tokens is None or total_tokens == 0: - total_tokens = prompt_tokens + completion_tokens - - token_usage_data = { - 'prompt_tokens': prompt_tokens, - 'completion_tokens': completion_tokens, - 'total_tokens': total_tokens, - 'captured_at': datetime.utcnow().isoformat() - } - debug_print(f"[Agent Streaming Tokens] From metadata - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}") except Exception as stream_error: + plugin_logger_cb.deregister_callbacks(callback_key) debug_print(f"❌ Agent streaming error: {stream_error}") import traceback traceback.print_exc() yield f"data: {json.dumps({'error': f'Agent streaming failed: {str(stream_error)}'})}\n\n" return + + # Deregister callback (agent completed successfully) + plugin_logger_cb.deregister_callbacks(callback_key) + + # Emit SSE-only events for streaming UI (Cosmos writes already done by callback) + agent_plugin_invocations = plugin_logger_cb.get_invocations_for_conversation(user_id, conversation_id) + for inv in agent_plugin_invocations: + duration_str = f" ({int(inv.duration_ms)}ms)" if inv.duration_ms else "" + tool_name = f"{inv.plugin_name}.{inv.function_name}" + content = f"Agent called {tool_name}{duration_str}" + yield f"data: {json.dumps({'type': 'thought', 'step_index': thought_tracker.current_index, 'step_type': 'agent_tool_call', 'content': content})}\n\n" + thought_tracker.current_index += 1 + + # Yield chunks to frontend + for chunk_content in chunks: + accumulated_content += chunk_content + yield f"data: {json.dumps({'content': chunk_content})}\n\n" + + # Try to capture token usage from stream metadata + if stream_usage: + # stream_usage is a CompletionUsage object, not a dict + prompt_tokens = getattr(stream_usage, 'prompt_tokens', 0) + completion_tokens = getattr(stream_usage, 'completion_tokens', 0) + total_tokens = getattr(stream_usage, 'total_tokens', None) + + # Calculate total if not provided + if total_tokens is None or total_tokens == 0: + total_tokens = prompt_tokens + completion_tokens + + token_usage_data = { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': total_tokens, + 'captured_at': datetime.utcnow().isoformat() + } + debug_print(f"[Agent Streaming Tokens] From metadata - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}") # Collect token usage from kernel services if not captured from stream if not token_usage_data: @@ -3650,6 +3752,7 @@ def make_json_serializable(obj): else: # Stream from regular GPT model (non-agent) + yield emit_thought('generation', 'Generating response...') debug_print(f"--- Streaming from GPT ({gpt_model}) ---") # Prepare stream parameters @@ -3818,7 +3921,8 @@ def make_json_serializable(obj): 'agent_citations': agent_citations_list, 'agent_display_name': agent_display_name_used if use_agent_streaming else None, 'agent_name': agent_name_used if use_agent_streaming else None, - 'full_content': accumulated_content + 'full_content': accumulated_content, + 'thoughts_enabled': thought_tracker.enabled } yield f"data: {json.dumps(final_data)}\n\n" diff --git a/application/single_app/route_backend_conversations.py b/application/single_app/route_backend_conversations.py index f267d729..ac25bd8c 100644 --- a/application/single_app/route_backend_conversations.py +++ b/application/single_app/route_backend_conversations.py @@ -8,6 +8,7 @@ from functions_debug import debug_print from swagger_wrapper import swagger_route, get_auth_security from functions_activity_logging import log_conversation_creation, log_conversation_deletion, log_conversation_archival +from functions_thoughts import archive_thoughts_for_conversation, delete_thoughts_for_conversation def register_route_backend_conversations(app): @@ -430,7 +431,14 @@ def delete_conversation(conversation_id): cosmos_archived_messages_container.upsert_item(archived_doc) cosmos_messages_container.delete_item(doc['id'], partition_key=conversation_id) - + + # Archive/delete thoughts for conversation + user_id_for_thoughts = conversation_item.get('user_id') + if archiving_enabled: + archive_thoughts_for_conversation(conversation_id, user_id_for_thoughts) + else: + delete_thoughts_for_conversation(conversation_id, user_id_for_thoughts) + # Log conversation deletion before actual deletion log_conversation_deletion( user_id=conversation_item.get('user_id'), @@ -530,7 +538,13 @@ def delete_multiple_conversations(): cosmos_archived_messages_container.upsert_item(archived_message) cosmos_messages_container.delete_item(message['id'], partition_key=conversation_id) - + + # Archive/delete thoughts for conversation + if archiving_enabled: + archive_thoughts_for_conversation(conversation_id, user_id) + else: + delete_thoughts_for_conversation(conversation_id, user_id) + # Log conversation deletion before actual deletion log_conversation_deletion( user_id=user_id, diff --git a/application/single_app/route_backend_thoughts.py b/application/single_app/route_backend_thoughts.py new file mode 100644 index 00000000..a7624a3f --- /dev/null +++ b/application/single_app/route_backend_thoughts.py @@ -0,0 +1,80 @@ +# route_backend_thoughts.py + +from flask import request, jsonify +from functions_authentication import login_required, user_required, get_current_user_id +from functions_settings import get_settings +from functions_thoughts import get_thoughts_for_message, get_pending_thoughts +from swagger_wrapper import swagger_route, get_auth_security +from functions_appinsights import log_event + + +def register_route_backend_thoughts(app): + + @app.route('/api/conversations//messages//thoughts', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_get_message_thoughts(conversation_id, message_id): + """Return persisted thoughts for a specific assistant message.""" + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + settings = get_settings() + if not settings.get('enable_thoughts', False): + return jsonify({'thoughts': [], 'enabled': False}), 200 + + try: + thoughts = get_thoughts_for_message(conversation_id, message_id, user_id) + # Strip internal Cosmos fields before returning + sanitized = [] + for t in thoughts: + sanitized.append({ + 'id': t.get('id'), + 'step_index': t.get('step_index'), + 'step_type': t.get('step_type'), + 'content': t.get('content'), + 'detail': t.get('detail'), + 'duration_ms': t.get('duration_ms'), + 'timestamp': t.get('timestamp') + }) + return jsonify({'thoughts': sanitized, 'enabled': True}), 200 + except Exception as e: + log_event(f"api_get_message_thoughts error: {e}", level="WARNING") + return jsonify({'error': 'Failed to retrieve thoughts'}), 500 + + @app.route('/api/conversations//thoughts/pending', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_get_pending_thoughts(conversation_id): + """Return the latest in-progress thoughts for a conversation. + + Used by the non-streaming frontend to poll for thought updates + while waiting for the chat response. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + settings = get_settings() + if not settings.get('enable_thoughts', False): + return jsonify({'thoughts': [], 'enabled': False}), 200 + + try: + thoughts = get_pending_thoughts(conversation_id, user_id) + sanitized = [] + for t in thoughts: + sanitized.append({ + 'id': t.get('id'), + 'step_index': t.get('step_index'), + 'step_type': t.get('step_type'), + 'content': t.get('content'), + 'detail': t.get('detail'), + 'duration_ms': t.get('duration_ms'), + 'timestamp': t.get('timestamp') + }) + return jsonify({'thoughts': sanitized, 'enabled': True}), 200 + except Exception as e: + log_event(f"api_get_pending_thoughts error: {e}", level="WARNING") + return jsonify({'error': 'Failed to retrieve pending thoughts'}), 500 diff --git a/application/single_app/route_frontend_admin_settings.py b/application/single_app/route_frontend_admin_settings.py index 578e1545..8c49522b 100644 --- a/application/single_app/route_frontend_admin_settings.py +++ b/application/single_app/route_frontend_admin_settings.py @@ -809,9 +809,10 @@ def is_valid_url(url): 'require_member_of_safety_violation_admin': require_member_of_safety_violation_admin, # ADDED 'require_member_of_feedback_admin': require_member_of_feedback_admin, # ADDED - # Feedback & Archiving + # Feedback, Archiving & Thoughts 'enable_user_feedback': form_data.get('enable_user_feedback') == 'on', 'enable_conversation_archiving': form_data.get('enable_conversation_archiving') == 'on', + 'enable_thoughts': form_data.get('enable_thoughts') == 'on', # Search (Web Search via Azure AI Foundry agent) 'enable_web_search': enable_web_search, diff --git a/application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py b/application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py index f982f0a4..bddf9cda 100644 --- a/application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py +++ b/application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py @@ -11,6 +11,7 @@ import logging import functools import inspect +import threading from typing import Any, Dict, List, Optional, Callable from datetime import datetime from dataclasses import dataclass, asdict @@ -51,24 +52,29 @@ def __init__(self): self.invocations: List[PluginInvocation] = [] self.max_history = 1000 # Keep last 1000 invocations in memory self.logger = get_appinsights_logger() or logging.getLogger(__name__) + self._callbacks: Dict[str, List[Callable[[PluginInvocation], None]]] = {} + self._callback_lock = threading.Lock() def log_invocation(self, invocation: PluginInvocation): """Log a plugin invocation to Application Insights and local history.""" # Add to local history self.invocations.append(invocation) - + # Trim history if needed if len(self.invocations) > self.max_history: self.invocations = self.invocations[-self.max_history:] - + # Enhanced terminal logging self._log_to_terminal(invocation) - + # Log to Application Insights self._log_to_appinsights(invocation) - + # Log to standard logging self._log_to_standard(invocation) + + # Fire registered thought callbacks + self._fire_callbacks(invocation) def _log_to_terminal(self, invocation: PluginInvocation): """Log detailed invocation information to terminal.""" @@ -277,6 +283,34 @@ def clear_history(self): """Clear the invocation history.""" self.invocations.clear() + def register_callback(self, key, callback): + """Register a callback fired on each plugin invocation for the given key. + + Args: + key: A string key, typically f"{user_id}:{conversation_id}". + callback: Called with the PluginInvocation after it is logged. + """ + with self._callback_lock: + if key not in self._callbacks: + self._callbacks[key] = [] + self._callbacks[key].append(callback) + + def deregister_callbacks(self, key): + """Remove all callbacks for the given key.""" + with self._callback_lock: + self._callbacks.pop(key, None) + + def _fire_callbacks(self, invocation): + """Fire matching callbacks for this invocation's user+conversation.""" + key = f"{invocation.user_id}:{invocation.conversation_id}" + with self._callback_lock: + callbacks = list(self._callbacks.get(key, [])) + for cb in callbacks: + try: + cb(invocation) + except Exception as e: + log_event(f"Plugin invocation callback error: {e}", level="WARNING") + # Global instance _plugin_logger = PluginInvocationLogger() diff --git a/application/single_app/static/css/chats.css b/application/single_app/static/css/chats.css index 38e11c3a..6a64bbc1 100644 --- a/application/single_app/static/css/chats.css +++ b/application/single_app/static/css/chats.css @@ -1676,4 +1676,160 @@ mark.search-highlight { 100% { transform: scale(1.05); } +} + +/* ============================================= + Processing Thoughts + ============================================= */ + +/* Loading indicator thought text */ +.thought-live-text { + font-style: italic; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + max-width: 300px; +} + +/* Toggle button in message footer */ +.thoughts-toggle-btn { + font-size: 0.9rem; + color: #6c757d; + padding: 0 0.25rem; + border: none; + background: none; + cursor: pointer; + transition: color 0.15s ease-in-out; +} + +.thoughts-toggle-btn:hover { + color: #ffc107; +} + +/* Collapsible container inside message bubble */ +.thoughts-container { + max-height: 300px; + overflow-y: auto; + font-size: 0.85rem; +} + +/* Timeline wrapper */ +.thoughts-list { + position: relative; + padding-left: 1.25rem; +} + +/* Vertical timeline line */ +.thoughts-list::before { + content: ''; + position: absolute; + left: 0.5rem; + top: 0.25rem; + bottom: 0.25rem; + width: 2px; + background: linear-gradient(to bottom, #0d6efd, #6ea8fe); + border-radius: 1px; +} + +/* Individual thought step */ +.thought-step { + display: flex; + align-items: flex-start; + padding-left: 0.75rem; + padding-top: 0.25rem; + padding-bottom: 0.25rem; + position: relative; +} + +/* Timeline node dot */ +.thought-step::before { + content: ''; + position: absolute; + left: -1rem; + top: 0.55rem; + width: 8px; + height: 8px; + border-radius: 50%; + background-color: #0d6efd; + border: 2px solid #fff; + box-shadow: 0 0 0 1px #0d6efd; + z-index: 1; +} + +/* Last thought step gets a slightly different dot */ +.thought-step:last-child::before { + background-color: #198754; + box-shadow: 0 0 0 1px #198754; +} + +.thought-step i { + flex-shrink: 0; + margin-top: 2px; +} + +/* Streaming cursor thought badge pulse animation */ +.animate-pulse { + animation: thought-pulse 1.5s ease-in-out infinite; +} + +/* Streaming thought display (before content arrives) */ +.streaming-thought-display { + display: flex; + align-items: center; + padding: 0.5rem 0; +} + +/* Light mode: use darker, more readable colors */ +.streaming-thought-display .badge { + background-color: rgba(13, 110, 253, 0.08) !important; + color: #0a58ca !important; + border-color: rgba(13, 110, 253, 0.25) !important; +} + +/* Dark mode: lighter accent colors */ +[data-bs-theme="dark"] .streaming-thought-display .badge { + background-color: rgba(13, 202, 240, 0.15) !important; + color: #6edff6 !important; + border-color: rgba(13, 202, 240, 0.3) !important; +} + +@keyframes thought-pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.6; + } +} + +/* Dark mode overrides */ +[data-bs-theme="dark"] .thoughts-toggle-btn { + color: #adb5bd; +} + +[data-bs-theme="dark"] .thoughts-toggle-btn:hover { + color: #ffc107; +} + +[data-bs-theme="dark"] .thought-step { + /* Dark mode dot border matches dark background */ +} + +[data-bs-theme="dark"] .thought-step::before { + border-color: #212529; + background-color: #6ea8fe; + box-shadow: 0 0 0 1px #6ea8fe; +} + +[data-bs-theme="dark"] .thought-step:last-child::before { + background-color: #75b798; + box-shadow: 0 0 0 1px #75b798; +} + +[data-bs-theme="dark"] .thoughts-list::before { + background: linear-gradient(to bottom, #6ea8fe, #9ec5fe); +} + +[data-bs-theme="dark"] .thoughts-container { + border-top-color: #495057 !important; } \ No newline at end of file diff --git a/application/single_app/static/images/custom_logo.png b/application/single_app/static/images/custom_logo.png new file mode 100644 index 00000000..ecf6e652 Binary files /dev/null and b/application/single_app/static/images/custom_logo.png differ diff --git a/application/single_app/static/images/custom_logo_dark.png b/application/single_app/static/images/custom_logo_dark.png new file mode 100644 index 00000000..4f281945 Binary files /dev/null and b/application/single_app/static/images/custom_logo_dark.png differ diff --git a/application/single_app/static/js/admin/admin_settings.js b/application/single_app/static/js/admin/admin_settings.js index 85719128..87478db0 100644 --- a/application/single_app/static/js/admin/admin_settings.js +++ b/application/single_app/static/js/admin/admin_settings.js @@ -3827,11 +3827,12 @@ function checkOptionalFeaturesEnabled(stepNumber) { return endpoint && key; } - case 11: // User feedback and archiving - // Check if feedback is enabled + case 11: // User feedback, archiving, and thoughts + // Check if feedback, archiving, or thoughts is enabled const feedbackEnabled = document.getElementById('enable_user_feedback')?.checked; const archivingEnabled = document.getElementById('enable_conversation_archiving')?.checked; - return feedbackEnabled || archivingEnabled; + const thoughtsEnabled = document.getElementById('enable_thoughts')?.checked; + return feedbackEnabled || archivingEnabled || thoughtsEnabled; case 12: // Enhanced citations and image generation // Check if enhanced citations or image generation is enabled diff --git a/application/single_app/static/js/chat/chat-loading-indicator.js b/application/single_app/static/js/chat/chat-loading-indicator.js index c1ab20c8..e3a77ff5 100644 --- a/application/single_app/static/js/chat/chat-loading-indicator.js +++ b/application/single_app/static/js/chat/chat-loading-indicator.js @@ -39,12 +39,35 @@ export function showLoadingIndicatorInChatbox() {
AI is typing...
- AI is typing... + AI is typing... `; chatbox.appendChild(loadingIndicator); chatbox.scrollTop = chatbox.scrollHeight; } +/** + * Update the loading indicator text with a thought step. + * Called by the thought polling handler to replace "AI is typing..." dynamically. + * @param {string} text - The thought content to display. + * @param {string} iconClass - Bootstrap Icon class (e.g. 'bi-search'). + */ +export function updateLoadingIndicatorText(text, iconClass) { + const textEl = document.getElementById("loading-indicator-text"); + if (!textEl) return; + + if (iconClass) { + textEl.innerHTML = `${text}`; + } else { + textEl.textContent = text; + } + + // Scroll chatbox to keep indicator visible + const chatbox = document.getElementById("chatbox"); + if (chatbox) { + chatbox.scrollTop = chatbox.scrollHeight; + } +} + export function hideLoadingIndicatorInChatbox() { const loadingIndicator = document.getElementById("loading-indicator"); if (loadingIndicator) { diff --git a/application/single_app/static/js/chat/chat-messages.js b/application/single_app/static/js/chat/chat-messages.js index d4c54790..81469dc9 100644 --- a/application/single_app/static/js/chat/chat-messages.js +++ b/application/single_app/static/js/chat/chat-messages.js @@ -20,6 +20,7 @@ import { saveUserSetting } from "./chat-layout.js"; import { isStreamingEnabled, sendMessageWithStreaming } from "./chat-streaming.js"; import { getCurrentReasoningEffort, isReasoningEffortEnabled } from './chat-reasoning.js'; import { areAgentsEnabled } from './chat-agents.js'; +import { createThoughtsToggleHtml, attachThoughtsToggleListener, startThoughtPolling, stopThoughtPolling } from './chat-thoughts.js'; // Conditionally import TTS if enabled let ttsModule = null; @@ -743,11 +744,13 @@ export function appendMessage( const metadataContainerId = `metadata-${messageId || Date.now()}`; const metadataContainerHtml = ``; - + + const thoughtsHtml = createThoughtsToggleHtml(messageId); + const footerContentHtml = ``; @@ -760,6 +763,7 @@ export function appendMessage(
${senderLabel}
${mainMessageHtml} ${citationContentContainerHtml} + ${thoughtsHtml.containerHtml} ${metadataContainerHtml} ${footerContentHtml} @@ -816,6 +820,9 @@ export function appendMessage( } }); } + + // Attach thoughts toggle listener + attachThoughtsToggleListener(messageDiv, messageId, currentConversationId); const maskBtn = messageDiv.querySelector(".mask-btn"); if (maskBtn) { @@ -1516,6 +1523,7 @@ export function actuallySendMessage(finalMessageToSend) { } // Regular non-streaming fetch + startThoughtPolling(currentConversationId); fetch("/api/chat", { method: "POST", headers: { @@ -1547,6 +1555,7 @@ export function actuallySendMessage(finalMessageToSend) { }) .then((data) => { // Only successful responses reach here + stopThoughtPolling(); hideLoadingIndicatorInChatbox(); console.log("--- Data received from /api/chat ---"); @@ -1688,6 +1697,7 @@ export function actuallySendMessage(finalMessageToSend) { } }) .catch((error) => { + stopThoughtPolling(); hideLoadingIndicatorInChatbox(); console.error("Error sending message:", error); diff --git a/application/single_app/static/js/chat/chat-streaming.js b/application/single_app/static/js/chat/chat-streaming.js index faf6f59e..d2b5b218 100644 --- a/application/single_app/static/js/chat/chat-streaming.js +++ b/application/single_app/static/js/chat/chat-streaming.js @@ -5,6 +5,7 @@ import { loadUserSettings, saveUserSetting } from './chat-layout.js'; import { showToast } from './chat-toast.js'; import { updateSidebarConversationTitle } from './chat-sidebar-conversations.js'; import { applyScopeLock } from './chat-documents.js'; +import { handleStreamingThought } from './chat-thoughts.js'; let streamingEnabled = false; let currentEventSource = null; @@ -207,8 +208,11 @@ export function sendMessageWithStreaming(messageData, tempUserMessageId, current handleStreamError(tempAiMessageId, data.partial_content || accumulatedContent, data.error); return; } - - if (data.content) { + + if (data.type === 'thought') { + handleStreamingThought(data); + // Continue reading — don't fall through to content handling + } else if (data.content) { // Append chunk to accumulated content accumulatedContent += data.content; updateStreamingMessage(tempAiMessageId, accumulatedContent); diff --git a/application/single_app/static/js/chat/chat-thoughts.js b/application/single_app/static/js/chat/chat-thoughts.js new file mode 100644 index 00000000..a780bd3f --- /dev/null +++ b/application/single_app/static/js/chat/chat-thoughts.js @@ -0,0 +1,215 @@ +// chat-thoughts.js + +import { updateLoadingIndicatorText } from './chat-loading-indicator.js'; +import { escapeHtml } from './chat-utils.js'; + +let thoughtPollingInterval = null; +let lastSeenThoughtIndex = -1; + +// --------------------------------------------------------------------------- +// Icon map: step_type → Bootstrap Icon class +// --------------------------------------------------------------------------- +function getThoughtIcon(stepType) { + const iconMap = { + 'search': 'bi-search', + 'tabular_analysis': 'bi-table', + 'web_search': 'bi-globe', + 'agent_tool_call': 'bi-robot', + 'generation': 'bi-lightning', + 'content_safety': 'bi-shield-check' + }; + return iconMap[stepType] || 'bi-stars'; +} + +// --------------------------------------------------------------------------- +// Polling (non-streaming mode) +// --------------------------------------------------------------------------- + +/** + * Start polling for pending thoughts while waiting for a non-streaming response. + * @param {string} conversationId - The current conversation ID. + */ +export function startThoughtPolling(conversationId) { + if (!conversationId) return; + if (!window.appSettings?.enable_thoughts) return; + + stopThoughtPolling(); // clear any previous interval + lastSeenThoughtIndex = -1; + + thoughtPollingInterval = setInterval(() => { + fetch(`/api/conversations/${conversationId}/thoughts/pending`, { + credentials: 'same-origin' + }) + .then(r => r.json()) + .then(data => { + if (data.thoughts && data.thoughts.length > 0) { + const latest = data.thoughts[data.thoughts.length - 1]; + if (latest.step_index > lastSeenThoughtIndex) { + lastSeenThoughtIndex = latest.step_index; + const icon = getThoughtIcon(latest.step_type); + updateLoadingIndicatorText(latest.content, icon); + } + } + }) + .catch(() => { /* ignore polling errors */ }); + }, 2000); +} + +/** + * Stop the thought polling interval. + */ +export function stopThoughtPolling() { + if (thoughtPollingInterval) { + clearInterval(thoughtPollingInterval); + thoughtPollingInterval = null; + } + lastSeenThoughtIndex = -1; +} + +// --------------------------------------------------------------------------- +// Streaming handler +// --------------------------------------------------------------------------- + +/** + * Handle a streaming thought event received via SSE. + * Updates the streaming message placeholder with a styled thought indicator. + * When actual content starts streaming, updateStreamingMessage() will overwrite this. + * @param {object} thoughtData - { step_index, step_type, content } + */ +export function handleStreamingThought(thoughtData) { + // Find the streaming message's content area + const messageElement = document.querySelector('[data-message-id^="temp_ai_"]'); + if (!messageElement) return; + + const contentElement = messageElement.querySelector('.message-text'); + if (!contentElement) return; + + const icon = getThoughtIcon(thoughtData.step_type); + // Replace entire content with styled thought indicator (visually distinct from AI response) + contentElement.innerHTML = `
+ + ${escapeHtml(thoughtData.content)} + +
`; +} + +// --------------------------------------------------------------------------- +// Per-message collapsible: toggle button + container HTML +// --------------------------------------------------------------------------- + +/** + * Create HTML for the thoughts toggle button and hidden container. + * Returns an object with { toggleHtml, containerHtml }. + * @param {string} messageId + */ +export function createThoughtsToggleHtml(messageId) { + if (!window.appSettings?.enable_thoughts) { + return { toggleHtml: '', containerHtml: '' }; + } + + const containerId = `thoughts-${messageId || Date.now()}`; + const toggleHtml = ``; + const containerHtml = `
Loading thoughts...
`; + + return { toggleHtml, containerHtml }; +} + +/** + * Attach event listener for the thoughts toggle button inside a message div. + * @param {HTMLElement} messageDiv + * @param {string} messageId + * @param {string} conversationId + */ +export function attachThoughtsToggleListener(messageDiv, messageId, conversationId) { + const toggleBtn = messageDiv.querySelector('.thoughts-toggle-btn'); + if (!toggleBtn) return; + + toggleBtn.addEventListener('click', () => { + const targetId = toggleBtn.getAttribute('aria-controls'); + const container = messageDiv.querySelector(`#${targetId}`); + if (!container) return; + + // Store scroll position + const scrollContainer = document.getElementById('chat-messages-container'); + const currentScroll = scrollContainer?.scrollTop || window.pageYOffset; + + const isExpanded = !container.classList.contains('d-none'); + if (isExpanded) { + container.classList.add('d-none'); + toggleBtn.setAttribute('aria-expanded', 'false'); + toggleBtn.title = 'Show processing thoughts'; + toggleBtn.innerHTML = ''; + } else { + container.classList.remove('d-none'); + toggleBtn.setAttribute('aria-expanded', 'true'); + toggleBtn.title = 'Hide processing thoughts'; + toggleBtn.innerHTML = ''; + + // Lazy-load thoughts on first expand + if (container.innerHTML.includes('Loading thoughts')) { + loadThoughtsForMessage(conversationId, messageId, container); + } + } + + // Restore scroll position + setTimeout(() => { + if (scrollContainer) { + scrollContainer.scrollTop = currentScroll; + } else { + window.scrollTo(0, currentScroll); + } + }, 10); + }); +} + +// --------------------------------------------------------------------------- +// Fetch + render thoughts for a message +// --------------------------------------------------------------------------- + +/** + * Fetch thoughts for a specific message from the API and render them. + * @param {string} conversationId + * @param {string} messageId + * @param {HTMLElement} container + */ +function loadThoughtsForMessage(conversationId, messageId, container) { + fetch(`/api/conversations/${conversationId}/messages/${messageId}/thoughts`, { + credentials: 'same-origin' + }) + .then(r => r.json()) + .then(data => { + if (!data.enabled) { + container.innerHTML = '
Processing thoughts are disabled.
'; + return; + } + if (!data.thoughts || data.thoughts.length === 0) { + container.innerHTML = '
No processing thoughts recorded for this message.
'; + return; + } + container.innerHTML = renderThoughtsList(data.thoughts); + }) + .catch(err => { + console.error('Error loading thoughts:', err); + container.innerHTML = '
Failed to load processing thoughts.
'; + }); +} + +/** + * Render a list of thought steps as HTML. + * @param {Array} thoughts + * @returns {string} HTML string + */ +function renderThoughtsList(thoughts) { + let html = '
'; + thoughts.forEach(t => { + const icon = getThoughtIcon(t.step_type); + const durationStr = t.duration_ms != null ? `(${t.duration_ms}ms)` : ''; + html += `
+ + ${escapeHtml(t.content || '')} + ${durationStr} +
`; + }); + html += '
'; + return html; +} diff --git a/application/single_app/templates/_sidebar_nav.html b/application/single_app/templates/_sidebar_nav.html index a0bceee8..33a89b04 100644 --- a/application/single_app/templates/_sidebar_nav.html +++ b/application/single_app/templates/_sidebar_nav.html @@ -287,6 +287,11 @@ GPT Configuration +