diff --git a/deploy/docker/Dockerfile.frontend b/deploy/docker/Dockerfile.frontend index 8b74bfe..e17f899 100644 --- a/deploy/docker/Dockerfile.frontend +++ b/deploy/docker/Dockerfile.frontend @@ -33,9 +33,9 @@ COPY deploy/docker/nginx.conf /etc/nginx/conf.d/default.conf # Expose port EXPOSE 80 -# Health check +# Health check (use curl which is available in nginx:alpine) HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD wget --no-verbose --tries=1 --spider http://localhost/health || exit 1 + CMD curl -f http://127.0.0.1/health || exit 1 # Run nginx CMD ["nginx", "-g", "daemon off;"] diff --git a/deploy/docker/nginx.conf b/deploy/docker/nginx.conf index 4caa547..8ddb124 100644 --- a/deploy/docker/nginx.conf +++ b/deploy/docker/nginx.conf @@ -36,7 +36,23 @@ server { proxy_set_header X-Forwarded-Proto $scheme; } - # WebSocket for chat + # Socket.IO for chat (Socket.IO uses /socket.io/ path by default) + location /socket.io/ { + proxy_pass http://chat:8001/socket.io/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Socket.IO specific settings + proxy_buffering off; + proxy_read_timeout 86400; + } + + # WebSocket for chat (alternative endpoint) location /ws/ { proxy_pass http://chat:8001/; proxy_http_version 1.1; diff --git a/frontend/package.json b/frontend/package.json index b1f4d12..e14cff3 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,7 +12,8 @@ "@emotion/react": "^11.11.0", "@emotion/styled": "^11.11.0", "socket.io-client": "^4.6.0", - "markdown-it": "^14.0.0", + "react-markdown": "^9.0.1", + "remark-gfm": "^4.0.0", "date-fns": "^3.0.0" }, "scripts": { diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 4f6ad37..7a3d45d 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -3,20 +3,25 @@ import { AppBar, Toolbar, Typography, Container, Box, Paper, TextField, Button, List, ListItem, ListItemText, CircularProgress, Chip, Grid, Card, CardContent, - Tabs, Tab, Divider, IconButton + Tabs, Tab, Divider, IconButton, Select, MenuItem, + FormControl, InputLabel } from '@mui/material'; import { Send as SendIcon, Search as SearchIcon, Description as DocIcon, Support as SupportIcon, - CloudUpload as UploadIcon + CloudUpload as UploadIcon, + Language as LanguageIcon } from '@mui/icons-material'; import axios from 'axios'; import io from 'socket.io-client'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; -const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:8000'; -const CHAT_URL = import.meta.env.VITE_CHAT_URL || 'http://localhost:8001'; +// Use relative URLs to work with nginx proxy in production +const API_URL = import.meta.env.VITE_API_URL || (typeof window !== 'undefined' ? window.location.origin + '/api' : 'http://localhost:8000'); +const CHAT_URL = import.meta.env.VITE_CHAT_URL || (typeof window !== 'undefined' ? window.location.origin : 'http://localhost:8001'); function App() { const [activeTab, setActiveTab] = useState(0); @@ -56,6 +61,7 @@ function ChatInterface() { const [input, setInput] = useState(''); const [loading, setLoading] = useState(false); const [socket, setSocket] = useState(null); + const [language, setLanguage] = useState('en'); const messagesEndRef = useRef(null); useEffect(() => { @@ -90,8 +96,8 @@ function ChatInterface() { setMessages(prev => [...prev, userMessage]); setLoading(true); - - socket.emit('chat', { message: input, history: messages }); + + socket.emit('chat', { message: input, history: messages, language: language }); setInput(''); }; @@ -99,11 +105,32 @@ function ChatInterface() { - - Technical Support Chat - - AI-powered assistant with access to datacenter documentation - + + + Technical Support Chat + + AI-powered assistant with access to datacenter documentation + + + + Language + + @@ -117,7 +144,38 @@ function ChatInterface() { maxWidth: '70%', bgcolor: msg.role === 'user' ? 'primary.light' : 'grey.100' }}> - {msg.content} + {msg.role === 'user' ? ( + {msg.content} + ) : ( + + + {msg.content} + + + )} {msg.related_docs && ( {msg.related_docs.map((doc, i) => ( diff --git a/src/datacenter_docs/chat/agent.py b/src/datacenter_docs/chat/agent.py index 90fd7ea..b11275b 100644 --- a/src/datacenter_docs/chat/agent.py +++ b/src/datacenter_docs/chat/agent.py @@ -8,10 +8,10 @@ from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Optional -from langchain.embeddings import HuggingFaceEmbeddings +from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.vectorstores import Chroma from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma from ..mcp.client import MCPClient from ..utils.llm_client import LLMClient @@ -27,7 +27,7 @@ class DocumentationAgent: def __init__( self, - mcp_client: MCPClient, + mcp_client: Optional[MCPClient] = None, llm_client: Optional[LLMClient] = None, vector_store_path: str = "./data/chroma_db", ): @@ -35,7 +35,7 @@ class DocumentationAgent: Initialize Documentation Agent. Args: - mcp_client: MCP client for infrastructure connectivity + mcp_client: MCP client for infrastructure connectivity (optional for chat-only mode) llm_client: LLM client (uses default if not provided) vector_store_path: Path to vector store directory """ @@ -43,11 +43,17 @@ class DocumentationAgent: self.client = llm_client or LLMClient() self.vector_store_path = Path(vector_store_path) - # Initialize embeddings and vector store - self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") + # Initialize embeddings and vector store (optional - for RAG functionality) + self.embeddings: Optional[HuggingFaceEmbeddings] = None + self.vector_store: Optional[Chroma] = None - self.vector_store: Chroma - self._load_vector_store() + try: + self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") + self._load_vector_store() + logger.info("Vector store initialized successfully") + except Exception as e: + logger.warning(f"Vector store not available (will use LLM without RAG): {e}") + # Agent will work without document search, using only LLM knowledge def _load_vector_store(self) -> None: """Load or create vector store""" @@ -119,18 +125,25 @@ class DocumentationAgent: Returns: List of relevant documentation chunks with metadata """ + # If vector store not available, return empty list + if self.vector_store is None: + logger.warning("⚠️ Vector store not available, skipping document search") + return [] + try: + logger.info(f"🔍 Searching documentation for: '{query[:100]}'...") + # Build filter if sections specified filter_dict = None if sections: filter_dict = {"section": {"$in": sections}} # Perform similarity search - results: list[Any] = [] - if self.vector_store is not None: - results = self.vector_store.similarity_search_with_score( - query=query, k=limit, filter=filter_dict # type: ignore[arg-type] - ) + results: list[Any] = self.vector_store.similarity_search_with_score( + query=query, k=limit, filter=filter_dict # type: ignore[arg-type] + ) + + logger.info(f"📚 Found {len(results)} relevant document chunks") # Format results formatted_results = [] @@ -144,11 +157,12 @@ class DocumentationAgent: "last_updated": doc.metadata.get("indexed_at", ""), } ) + logger.info(f" ✓ Section: {doc.metadata.get('section')} (relevance: {(1-score)*100:.1f}%)") return formatted_results except Exception as e: - logger.error(f"Documentation search failed: {e}") + logger.error(f"❌ Documentation search failed: {e}", exc_info=True) return [] async def resolve_ticket( @@ -265,7 +279,7 @@ Respond in JSON format: } async def chat_with_context( - self, user_message: str, conversation_history: List[Dict[str, str]] + self, user_message: str, conversation_history: List[Dict[str, str]], language: str = "en" ) -> Dict[str, Any]: """ Chat with user while autonomously searching documentation @@ -273,6 +287,7 @@ Respond in JSON format: Args: user_message: User's message conversation_history: Previous messages + language: Language code for response (en, it, es, fr, de, pt, zh, ja) Returns: Response with documentation references @@ -284,6 +299,23 @@ Respond in JSON format: # Build context context = self._build_context(relevant_docs) + # Language names mapping + language_names = { + "en": "English", + "it": "Italian", + "es": "Spanish", + "fr": "French", + "de": "German", + "pt": "Portuguese", + "zh": "Chinese", + "ja": "Japanese" + } + + language_instruction = "" + if language != "en": + lang_name = language_names.get(language, language) + language_instruction = f"\n\n**IMPORTANT: You MUST respond in {lang_name}. Translate your entire response to {lang_name}.**" + # Build conversation system_prompt = f"""You are a helpful datacenter technical support assistant. You have access to comprehensive datacenter documentation. @@ -296,6 +328,7 @@ When answering questions: **Available Documentation Context:** {context} +{language_instruction} Answer naturally and helpfully.""" diff --git a/src/datacenter_docs/chat/main.py b/src/datacenter_docs/chat/main.py index 41cc8b1..81f8c1b 100644 --- a/src/datacenter_docs/chat/main.py +++ b/src/datacenter_docs/chat/main.py @@ -1,23 +1,94 @@ """ -Chat server stub for development. - -TODO: Implement full chat server with WebSocket support. -Currently this is a minimal stub to allow the development environment to start. +Chat server with Socket.IO support for real-time communication. """ +import asyncio import logging +from pathlib import Path +from typing import Any, Dict +import socketio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware +from .agent import DocumentationAgent + # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) +# Global agent instance +agent = None + + +async def index_documentation_if_needed() -> None: + """Index documentation on first startup if needed.""" + vector_store_path = Path("/app/data/chroma_db") + index_marker = vector_store_path / ".indexed" + docs_path = Path("/app/output") + + if not index_marker.exists() and docs_path.exists(): + logger.info("=" * 60) + logger.info("First Time Setup - Indexing Documentation") + logger.info("=" * 60) + logger.info("This may take a few minutes...") + + try: + # Create temporary agent for indexing + temp_agent = DocumentationAgent( + mcp_client=None, + llm_client=None, + vector_store_path=str(vector_store_path) + ) + + # Index documentation + await temp_agent.index_documentation(docs_path) + + # Create marker + vector_store_path.mkdir(parents=True, exist_ok=True) + index_marker.touch() + + logger.info("✓ Documentation indexed successfully!") + logger.info("=" * 60) + + except Exception as e: + logger.error(f"Failed to index documentation: {e}", exc_info=True) + logger.warning("Chat will work with limited functionality") + else: + if index_marker.exists(): + logger.info("✓ Vector store already initialized") + else: + logger.warning(f"Documentation path not found: {docs_path}") + + +# Initialize Documentation Agent (without MCP for chat-only mode) +async def initialize_agent() -> None: + """Initialize the documentation agent.""" + global agent + + try: + # Index documentation if needed + await index_documentation_if_needed() + + # Create agent with vector store access + agent = DocumentationAgent(mcp_client=None) # type: ignore + logger.info("Documentation Agent initialized successfully") + except Exception as e: + logger.warning(f"Failed to initialize Documentation Agent: {e}") + agent = None + +# Create Socket.IO server +sio = socketio.AsyncServer( + async_mode="asgi", + cors_allowed_origins="*", + logger=True, + engineio_logger=True, +) + # Create FastAPI app app = FastAPI( title="Datacenter Documentation Chat Server", - description="WebSocket-based chat interface for documentation queries (STUB - NOT IMPLEMENTED)", + description="WebSocket-based chat interface for documentation queries", version="1.0.0", ) @@ -31,31 +102,134 @@ app.add_middleware( ) +# Startup event +@app.on_event("startup") +async def startup_event() -> None: + """Initialize agent on startup.""" + await initialize_agent() + + +# Mount Socket.IO to FastAPI +socket_app = socketio.ASGIApp(sio, app) + + @app.get("/health") async def health_check() -> dict[str, str | bool]: """Health check endpoint.""" - return {"status": "ok", "service": "chat", "implemented": False} + return {"status": "ok", "service": "chat", "implemented": True} @app.get("/") async def root() -> dict[str, str]: """Root endpoint.""" return { - "message": "Chat server stub - not yet implemented", - "status": "stub", - "todo": "Implement WebSocket chat functionality", + "message": "Chat server with Socket.IO support", + "status": "active", + "socket_io": "enabled", } -# TODO: Implement WebSocket endpoint for chat -# @app.websocket("/ws") -# async def websocket_endpoint(websocket: WebSocket): -# await websocket.accept() -# # Implement chat logic here +# Socket.IO event handlers +@sio.event +async def connect(sid: str, environ: Dict[str, Any]) -> None: + """Handle client connection.""" + logger.info(f"Client connected: {sid}") + await sio.emit("connection_response", {"status": "connected", "sid": sid}, room=sid) + + +@sio.event +async def disconnect(sid: str) -> None: + """Handle client disconnection.""" + logger.info(f"Client disconnected: {sid}") + + +@sio.event +async def message(sid: str, data: Dict[str, Any]) -> None: + """Handle incoming chat messages.""" + logger.info(f"Message from {sid}: {data}") + + # Echo the message back for now (TODO: integrate with DocumentationAgent) + response = { + "message": f"Received: {data.get('message', '')}", + "timestamp": data.get("timestamp"), + "type": "echo", + } + + await sio.emit("message", response, room=sid) + + +@sio.event +async def chat_message(sid: str, data: Dict[str, Any]) -> None: + """Handle chat messages (alternative event name).""" + logger.info(f"Chat message from {sid}: {data}") + + # TODO: Integrate with DocumentationAgent for intelligent responses + user_message = data.get("message", "") + + response = { + "message": f"Chat server received: {user_message}", + "timestamp": data.get("timestamp"), + "type": "response", + "status": "ok", + } + + await sio.emit("chat_response", response, room=sid) + + +@sio.event +async def chat(sid: str, data: Dict[str, Any]) -> None: + """Handle chat event from frontend.""" + logger.info(f"Chat event from {sid}: {data}") + + user_message = data.get("message", "") + conversation_history = data.get("history", []) + language = data.get("language", "en") + + # Use Documentation Agent for intelligent responses + if agent is not None: + try: + # Convert frontend history format to agent format + agent_history = [] + for msg in conversation_history: + role = "assistant" if msg.get("role") == "assistant" else "user" + agent_history.append({"role": role, "content": msg.get("content", "")}) + + # Get AI response + ai_response = await agent.chat_with_context( + user_message=user_message, conversation_history=agent_history, language=language + ) + + response = { + "message": ai_response.get("message", "I apologize, I couldn't generate a response."), + "type": "bot", + "timestamp": data.get("timestamp"), + "related_docs": ai_response.get("related_docs", []), + } + + except Exception as e: + logger.error(f"Error in agent response: {e}", exc_info=True) + response = { + "message": f"I apologize, but I encountered an error: {str(e)}. Please try again.", + "type": "bot", + "timestamp": data.get("timestamp"), + "related_docs": [], + } + else: + # Fallback if agent not initialized + response = { + "message": f"Echo (Agent not initialized): {user_message}", + "type": "bot", + "timestamp": data.get("timestamp"), + "related_docs": [], + } + + # Frontend listens to 'message' event + await sio.emit("message", response, room=sid) if __name__ == "__main__": import uvicorn - logger.info("Starting chat server stub on port 8001...") - uvicorn.run(app, host="0.0.0.0", port=8001) + logger.info("Starting chat server with Socket.IO on port 8001...") + # Use socket_app instead of app to enable Socket.IO + uvicorn.run(socket_app, host="0.0.0.0", port=8001)