feat: add multilingual chat support with markdown rendering
Some checks failed
CI/CD Pipeline / Run Tests (push) Has been cancelled
CI/CD Pipeline / Security Scanning (push) Has been cancelled
CI/CD Pipeline / Build and Push Docker Images (api) (push) Has been cancelled
CI/CD Pipeline / Build and Push Docker Images (chat) (push) Has been cancelled
CI/CD Pipeline / Build and Push Docker Images (frontend) (push) Has been cancelled
CI/CD Pipeline / Build and Push Docker Images (worker) (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / Lint Code (push) Has started running
CI/CD Pipeline / Generate Documentation (push) Has started running

- Fix Socket.IO proxy configuration in nginx for chat connectivity
- Add Socket.IO path routing (/socket.io/) with WebSocket upgrade support
- Fix frontend healthcheck to use curl instead of wget
- Add react-markdown and remark-gfm for proper markdown rendering
- Implement language selector in chat interface (8 languages supported)
- Add language parameter to chat agent and LLM prompts
- Support English, Italian, Spanish, French, German, Portuguese, Chinese, Japanese

This resolves the chat connection issues and enables users to receive
AI responses in their preferred language with properly formatted markdown.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-10-20 19:14:38 +02:00
parent 8c2fa6af47
commit 6f5deb0879
6 changed files with 329 additions and 47 deletions

View File

@@ -33,9 +33,9 @@ COPY deploy/docker/nginx.conf /etc/nginx/conf.d/default.conf
# Expose port
EXPOSE 80
# Health check
# Health check (use curl which is available in nginx:alpine)
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost/health || exit 1
CMD curl -f http://127.0.0.1/health || exit 1
# Run nginx
CMD ["nginx", "-g", "daemon off;"]

View File

@@ -36,7 +36,23 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
}
# WebSocket for chat
# Socket.IO for chat (Socket.IO uses /socket.io/ path by default)
location /socket.io/ {
proxy_pass http://chat:8001/socket.io/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Socket.IO specific settings
proxy_buffering off;
proxy_read_timeout 86400;
}
# WebSocket for chat (alternative endpoint)
location /ws/ {
proxy_pass http://chat:8001/;
proxy_http_version 1.1;

View File

@@ -12,7 +12,8 @@
"@emotion/react": "^11.11.0",
"@emotion/styled": "^11.11.0",
"socket.io-client": "^4.6.0",
"markdown-it": "^14.0.0",
"react-markdown": "^9.0.1",
"remark-gfm": "^4.0.0",
"date-fns": "^3.0.0"
},
"scripts": {

View File

@@ -3,20 +3,25 @@ import {
AppBar, Toolbar, Typography, Container, Box, Paper,
TextField, Button, List, ListItem, ListItemText,
CircularProgress, Chip, Grid, Card, CardContent,
Tabs, Tab, Divider, IconButton
Tabs, Tab, Divider, IconButton, Select, MenuItem,
FormControl, InputLabel
} from '@mui/material';
import {
Send as SendIcon,
Search as SearchIcon,
Description as DocIcon,
Support as SupportIcon,
CloudUpload as UploadIcon
CloudUpload as UploadIcon,
Language as LanguageIcon
} from '@mui/icons-material';
import axios from 'axios';
import io from 'socket.io-client';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:8000';
const CHAT_URL = import.meta.env.VITE_CHAT_URL || 'http://localhost:8001';
// Use relative URLs to work with nginx proxy in production
const API_URL = import.meta.env.VITE_API_URL || (typeof window !== 'undefined' ? window.location.origin + '/api' : 'http://localhost:8000');
const CHAT_URL = import.meta.env.VITE_CHAT_URL || (typeof window !== 'undefined' ? window.location.origin : 'http://localhost:8001');
function App() {
const [activeTab, setActiveTab] = useState(0);
@@ -56,6 +61,7 @@ function ChatInterface() {
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const [socket, setSocket] = useState(null);
const [language, setLanguage] = useState('en');
const messagesEndRef = useRef(null);
useEffect(() => {
@@ -91,7 +97,7 @@ function ChatInterface() {
setMessages(prev => [...prev, userMessage]);
setLoading(true);
socket.emit('chat', { message: input, history: messages });
socket.emit('chat', { message: input, history: messages, language: language });
setInput('');
};
@@ -99,12 +105,33 @@ function ChatInterface() {
<Grid container spacing={3}>
<Grid item xs={12} md={8}>
<Paper sx={{ height: '70vh', display: 'flex', flexDirection: 'column' }}>
<Box sx={{ p: 2, bgcolor: 'primary.main', color: 'white' }}>
<Box sx={{ p: 2, bgcolor: 'primary.main', color: 'white', display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
<Box>
<Typography variant="h6">Technical Support Chat</Typography>
<Typography variant="caption">
AI-powered assistant with access to datacenter documentation
</Typography>
</Box>
<FormControl size="small" sx={{ minWidth: 120, bgcolor: 'white', borderRadius: 1 }}>
<InputLabel id="language-select-label">Language</InputLabel>
<Select
labelId="language-select-label"
value={language}
label="Language"
onChange={(e) => setLanguage(e.target.value)}
startAdornment={<LanguageIcon sx={{ mr: 0.5, color: 'action.active' }} />}
>
<MenuItem value="en">🇬🇧 English</MenuItem>
<MenuItem value="it">🇮🇹 Italiano</MenuItem>
<MenuItem value="es">🇪🇸 Español</MenuItem>
<MenuItem value="fr">🇫🇷 Français</MenuItem>
<MenuItem value="de">🇩🇪 Deutsch</MenuItem>
<MenuItem value="pt">🇵🇹 Português</MenuItem>
<MenuItem value="zh">🇨🇳 中文</MenuItem>
<MenuItem value="ja">🇯🇵 日本語</MenuItem>
</Select>
</FormControl>
</Box>
<Box sx={{ flexGrow: 1, overflow: 'auto', p: 2 }}>
<List>
@@ -117,7 +144,38 @@ function ChatInterface() {
maxWidth: '70%',
bgcolor: msg.role === 'user' ? 'primary.light' : 'grey.100'
}}>
{msg.role === 'user' ? (
<Typography variant="body1">{msg.content}</Typography>
) : (
<Box sx={{
'& h1, & h2, & h3': { mt: 2, mb: 1 },
'& h1': { fontSize: '1.5rem', fontWeight: 600 },
'& h2': { fontSize: '1.3rem', fontWeight: 600 },
'& h3': { fontSize: '1.1rem', fontWeight: 600 },
'& p': { mb: 1 },
'& ul, & ol': { pl: 2, mb: 1 },
'& li': { mb: 0.5 },
'& code': {
bgcolor: 'rgba(0,0,0,0.05)',
p: 0.5,
borderRadius: 1,
fontFamily: 'monospace',
fontSize: '0.9em'
},
'& pre': {
bgcolor: 'rgba(0,0,0,0.05)',
p: 2,
borderRadius: 1,
overflow: 'auto',
'& code': { bgcolor: 'transparent', p: 0 }
},
'& hr': { my: 2, border: 'none', borderTop: '1px solid rgba(0,0,0,0.12)' }
}}>
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{msg.content}
</ReactMarkdown>
</Box>
)}
{msg.related_docs && (
<Box sx={{ mt: 1 }}>
{msg.related_docs.map((doc, i) => (

View File

@@ -8,10 +8,10 @@ from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from ..mcp.client import MCPClient
from ..utils.llm_client import LLMClient
@@ -27,7 +27,7 @@ class DocumentationAgent:
def __init__(
self,
mcp_client: MCPClient,
mcp_client: Optional[MCPClient] = None,
llm_client: Optional[LLMClient] = None,
vector_store_path: str = "./data/chroma_db",
):
@@ -35,7 +35,7 @@ class DocumentationAgent:
Initialize Documentation Agent.
Args:
mcp_client: MCP client for infrastructure connectivity
mcp_client: MCP client for infrastructure connectivity (optional for chat-only mode)
llm_client: LLM client (uses default if not provided)
vector_store_path: Path to vector store directory
"""
@@ -43,11 +43,17 @@ class DocumentationAgent:
self.client = llm_client or LLMClient()
self.vector_store_path = Path(vector_store_path)
# Initialize embeddings and vector store
self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Initialize embeddings and vector store (optional - for RAG functionality)
self.embeddings: Optional[HuggingFaceEmbeddings] = None
self.vector_store: Optional[Chroma] = None
self.vector_store: Chroma
try:
self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
self._load_vector_store()
logger.info("Vector store initialized successfully")
except Exception as e:
logger.warning(f"Vector store not available (will use LLM without RAG): {e}")
# Agent will work without document search, using only LLM knowledge
def _load_vector_store(self) -> None:
"""Load or create vector store"""
@@ -119,19 +125,26 @@ class DocumentationAgent:
Returns:
List of relevant documentation chunks with metadata
"""
# If vector store not available, return empty list
if self.vector_store is None:
logger.warning("⚠️ Vector store not available, skipping document search")
return []
try:
logger.info(f"🔍 Searching documentation for: '{query[:100]}'...")
# Build filter if sections specified
filter_dict = None
if sections:
filter_dict = {"section": {"$in": sections}}
# Perform similarity search
results: list[Any] = []
if self.vector_store is not None:
results = self.vector_store.similarity_search_with_score(
results: list[Any] = self.vector_store.similarity_search_with_score(
query=query, k=limit, filter=filter_dict # type: ignore[arg-type]
)
logger.info(f"📚 Found {len(results)} relevant document chunks")
# Format results
formatted_results = []
for doc, score in results:
@@ -144,11 +157,12 @@ class DocumentationAgent:
"last_updated": doc.metadata.get("indexed_at", ""),
}
)
logger.info(f" ✓ Section: {doc.metadata.get('section')} (relevance: {(1-score)*100:.1f}%)")
return formatted_results
except Exception as e:
logger.error(f"Documentation search failed: {e}")
logger.error(f"Documentation search failed: {e}", exc_info=True)
return []
async def resolve_ticket(
@@ -265,7 +279,7 @@ Respond in JSON format:
}
async def chat_with_context(
self, user_message: str, conversation_history: List[Dict[str, str]]
self, user_message: str, conversation_history: List[Dict[str, str]], language: str = "en"
) -> Dict[str, Any]:
"""
Chat with user while autonomously searching documentation
@@ -273,6 +287,7 @@ Respond in JSON format:
Args:
user_message: User's message
conversation_history: Previous messages
language: Language code for response (en, it, es, fr, de, pt, zh, ja)
Returns:
Response with documentation references
@@ -284,6 +299,23 @@ Respond in JSON format:
# Build context
context = self._build_context(relevant_docs)
# Language names mapping
language_names = {
"en": "English",
"it": "Italian",
"es": "Spanish",
"fr": "French",
"de": "German",
"pt": "Portuguese",
"zh": "Chinese",
"ja": "Japanese"
}
language_instruction = ""
if language != "en":
lang_name = language_names.get(language, language)
language_instruction = f"\n\n**IMPORTANT: You MUST respond in {lang_name}. Translate your entire response to {lang_name}.**"
# Build conversation
system_prompt = f"""You are a helpful datacenter technical support assistant. You have access to comprehensive datacenter documentation.
@@ -296,6 +328,7 @@ When answering questions:
**Available Documentation Context:**
{context}
{language_instruction}
Answer naturally and helpfully."""

View File

@@ -1,23 +1,94 @@
"""
Chat server stub for development.
TODO: Implement full chat server with WebSocket support.
Currently this is a minimal stub to allow the development environment to start.
Chat server with Socket.IO support for real-time communication.
"""
import asyncio
import logging
from pathlib import Path
from typing import Any, Dict
import socketio
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .agent import DocumentationAgent
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global agent instance
agent = None
async def index_documentation_if_needed() -> None:
"""Index documentation on first startup if needed."""
vector_store_path = Path("/app/data/chroma_db")
index_marker = vector_store_path / ".indexed"
docs_path = Path("/app/output")
if not index_marker.exists() and docs_path.exists():
logger.info("=" * 60)
logger.info("First Time Setup - Indexing Documentation")
logger.info("=" * 60)
logger.info("This may take a few minutes...")
try:
# Create temporary agent for indexing
temp_agent = DocumentationAgent(
mcp_client=None,
llm_client=None,
vector_store_path=str(vector_store_path)
)
# Index documentation
await temp_agent.index_documentation(docs_path)
# Create marker
vector_store_path.mkdir(parents=True, exist_ok=True)
index_marker.touch()
logger.info("✓ Documentation indexed successfully!")
logger.info("=" * 60)
except Exception as e:
logger.error(f"Failed to index documentation: {e}", exc_info=True)
logger.warning("Chat will work with limited functionality")
else:
if index_marker.exists():
logger.info("✓ Vector store already initialized")
else:
logger.warning(f"Documentation path not found: {docs_path}")
# Initialize Documentation Agent (without MCP for chat-only mode)
async def initialize_agent() -> None:
"""Initialize the documentation agent."""
global agent
try:
# Index documentation if needed
await index_documentation_if_needed()
# Create agent with vector store access
agent = DocumentationAgent(mcp_client=None) # type: ignore
logger.info("Documentation Agent initialized successfully")
except Exception as e:
logger.warning(f"Failed to initialize Documentation Agent: {e}")
agent = None
# Create Socket.IO server
sio = socketio.AsyncServer(
async_mode="asgi",
cors_allowed_origins="*",
logger=True,
engineio_logger=True,
)
# Create FastAPI app
app = FastAPI(
title="Datacenter Documentation Chat Server",
description="WebSocket-based chat interface for documentation queries (STUB - NOT IMPLEMENTED)",
description="WebSocket-based chat interface for documentation queries",
version="1.0.0",
)
@@ -31,31 +102,134 @@ app.add_middleware(
)
# Startup event
@app.on_event("startup")
async def startup_event() -> None:
"""Initialize agent on startup."""
await initialize_agent()
# Mount Socket.IO to FastAPI
socket_app = socketio.ASGIApp(sio, app)
@app.get("/health")
async def health_check() -> dict[str, str | bool]:
"""Health check endpoint."""
return {"status": "ok", "service": "chat", "implemented": False}
return {"status": "ok", "service": "chat", "implemented": True}
@app.get("/")
async def root() -> dict[str, str]:
"""Root endpoint."""
return {
"message": "Chat server stub - not yet implemented",
"status": "stub",
"todo": "Implement WebSocket chat functionality",
"message": "Chat server with Socket.IO support",
"status": "active",
"socket_io": "enabled",
}
# TODO: Implement WebSocket endpoint for chat
# @app.websocket("/ws")
# async def websocket_endpoint(websocket: WebSocket):
# await websocket.accept()
# # Implement chat logic here
# Socket.IO event handlers
@sio.event
async def connect(sid: str, environ: Dict[str, Any]) -> None:
"""Handle client connection."""
logger.info(f"Client connected: {sid}")
await sio.emit("connection_response", {"status": "connected", "sid": sid}, room=sid)
@sio.event
async def disconnect(sid: str) -> None:
"""Handle client disconnection."""
logger.info(f"Client disconnected: {sid}")
@sio.event
async def message(sid: str, data: Dict[str, Any]) -> None:
"""Handle incoming chat messages."""
logger.info(f"Message from {sid}: {data}")
# Echo the message back for now (TODO: integrate with DocumentationAgent)
response = {
"message": f"Received: {data.get('message', '')}",
"timestamp": data.get("timestamp"),
"type": "echo",
}
await sio.emit("message", response, room=sid)
@sio.event
async def chat_message(sid: str, data: Dict[str, Any]) -> None:
"""Handle chat messages (alternative event name)."""
logger.info(f"Chat message from {sid}: {data}")
# TODO: Integrate with DocumentationAgent for intelligent responses
user_message = data.get("message", "")
response = {
"message": f"Chat server received: {user_message}",
"timestamp": data.get("timestamp"),
"type": "response",
"status": "ok",
}
await sio.emit("chat_response", response, room=sid)
@sio.event
async def chat(sid: str, data: Dict[str, Any]) -> None:
"""Handle chat event from frontend."""
logger.info(f"Chat event from {sid}: {data}")
user_message = data.get("message", "")
conversation_history = data.get("history", [])
language = data.get("language", "en")
# Use Documentation Agent for intelligent responses
if agent is not None:
try:
# Convert frontend history format to agent format
agent_history = []
for msg in conversation_history:
role = "assistant" if msg.get("role") == "assistant" else "user"
agent_history.append({"role": role, "content": msg.get("content", "")})
# Get AI response
ai_response = await agent.chat_with_context(
user_message=user_message, conversation_history=agent_history, language=language
)
response = {
"message": ai_response.get("message", "I apologize, I couldn't generate a response."),
"type": "bot",
"timestamp": data.get("timestamp"),
"related_docs": ai_response.get("related_docs", []),
}
except Exception as e:
logger.error(f"Error in agent response: {e}", exc_info=True)
response = {
"message": f"I apologize, but I encountered an error: {str(e)}. Please try again.",
"type": "bot",
"timestamp": data.get("timestamp"),
"related_docs": [],
}
else:
# Fallback if agent not initialized
response = {
"message": f"Echo (Agent not initialized): {user_message}",
"type": "bot",
"timestamp": data.get("timestamp"),
"related_docs": [],
}
# Frontend listens to 'message' event
await sio.emit("message", response, room=sid)
if __name__ == "__main__":
import uvicorn
logger.info("Starting chat server stub on port 8001...")
uvicorn.run(app, host="0.0.0.0", port=8001)
logger.info("Starting chat server with Socket.IO on port 8001...")
# Use socket_app instead of app to enable Socket.IO
uvicorn.run(socket_app, host="0.0.0.0", port=8001)