feat: add Docker Compose configuration for MongoDB, Redis, FastAPI, Chat, Worker, Flower, and Frontend services; include health checks and volume management
Some checks failed
CI/CD Pipeline / Lint Code (push) Failing after 7m34s
CI/CD Pipeline / Run Tests (push) Has been skipped
CI/CD Pipeline / Security Scanning (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (api) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (chat) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (frontend) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (worker) (push) Has been skipped
CI/CD Pipeline / Deploy to Staging (push) Has been skipped
CI/CD Pipeline / Deploy to Production (push) Has been skipped
CI/CD Pipeline / Generate Documentation (push) Failing after 7m59s

This commit is contained in:
2025-10-21 11:45:08 +02:00
parent e9c2b18bf0
commit 73c352128b
3 changed files with 95 additions and 23 deletions

View File

@@ -2,8 +2,9 @@ services:
# MongoDB Database # MongoDB Database
mongodb: mongodb:
image: docker.io/library/mongo:7-jammy image: docker.io/library/mongo:7-jammy
container_name: datacenter-docs-mongodb-dev container_name: datacenter-docs-mongodb
hostname: mongodb hostname: mongodb
restart: always
ports: ports:
- "${MONGODB_PORT}:27017" - "${MONGODB_PORT}:27017"
env_file: env_file:
@@ -26,8 +27,9 @@ services:
# Redis Cache & Message Broker # Redis Cache & Message Broker
redis: redis:
image: docker.io/library/redis:7-alpine image: docker.io/library/redis:7-alpine
container_name: datacenter-docs-redis-dev container_name: datacenter-docs-redis
hostname: redis hostname: redis
restart: always
ports: ports:
- "${REDIS_PORT}:6379" - "${REDIS_PORT}:6379"
env_file: env_file:
@@ -48,8 +50,9 @@ services:
build: build:
context: ../.. context: ../..
dockerfile: deploy/docker/Dockerfile.api dockerfile: deploy/docker/Dockerfile.api
container_name: datacenter-docs-api-dev container_name: datacenter-docs-api
hostname: api hostname: api
restart: always
ports: ports:
- "${API_PORT}:8000" - "${API_PORT}:8000"
env_file: env_file:
@@ -67,15 +70,15 @@ services:
condition: service_healthy condition: service_healthy
networks: networks:
- datacenter-network - datacenter-network
restart: unless-stopped
# Chat Service # Chat Service
chat: chat:
build: build:
context: ../.. context: ../..
dockerfile: deploy/docker/Dockerfile.chat dockerfile: deploy/docker/Dockerfile.chat
container_name: datacenter-docs-chat-dev container_name: datacenter-docs-chat
hostname: chat hostname: chat
restart: always
ports: ports:
- "${CHAT_PORT}:8001" - "${CHAT_PORT}:8001"
env_file: env_file:
@@ -94,15 +97,15 @@ services:
condition: service_healthy condition: service_healthy
networks: networks:
- datacenter-network - datacenter-network
restart: unless-stopped
# Celery Worker # Celery Worker
worker: worker:
build: build:
context: ../.. context: ../..
dockerfile: deploy/docker/Dockerfile.worker dockerfile: deploy/docker/Dockerfile.worker
container_name: datacenter-docs-worker-dev container_name: datacenter-docs-worker
hostname: worker hostname: worker
restart: always
env_file: env_file:
- ../../.env - ../../.env
volumes: volumes:
@@ -118,13 +121,19 @@ services:
condition: service_healthy condition: service_healthy
networks: networks:
- datacenter-network - datacenter-network
restart: unless-stopped healthcheck:
test: ["CMD-SHELL", "celery -A datacenter_docs.workers.celery_app inspect ping -d celery@worker || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Flower - Celery Monitoring # Flower - Celery Monitoring
flower: flower:
image: docker.io/mher/flower:2.0 image: docker.io/mher/flower:2.0
container_name: datacenter-docs-flower-dev container_name: datacenter-docs-flower
hostname: flower hostname: flower
restart: always
ports: ports:
- "${FLOWER_PORT}:5555" - "${FLOWER_PORT}:5555"
env_file: env_file:
@@ -136,15 +145,15 @@ services:
condition: service_healthy condition: service_healthy
networks: networks:
- datacenter-network - datacenter-network
restart: unless-stopped
# Frontend # Frontend
frontend: frontend:
build: build:
context: ../.. context: ../..
dockerfile: deploy/docker/Dockerfile.frontend dockerfile: deploy/docker/Dockerfile.frontend
container_name: datacenter-docs-frontend-dev container_name: datacenter-docs-frontend
hostname: frontend hostname: frontend
restart: always
ports: ports:
- "${FRONTEND_PORT}:80" - "${FRONTEND_PORT}:80"
env_file: env_file:
@@ -154,27 +163,26 @@ services:
- chat - chat
networks: networks:
- datacenter-network - datacenter-network
restart: unless-stopped
volumes: volumes:
mongodb-data: mongodb-data:
name: datacenter-docs-mongodb-data-dev name: datacenter-docs-mongodb-data
mongodb-config: mongodb-config:
name: datacenter-docs-mongodb-config-dev name: datacenter-docs-mongodb-config
redis-data: redis-data:
name: datacenter-docs-redis-data-dev name: datacenter-docs-redis-data
api-logs: api-logs:
name: datacenter-docs-api-logs-dev name: datacenter-docs-api-logs
chat-logs: chat-logs:
name: datacenter-docs-chat-logs-dev name: datacenter-docs-chat-logs
chat-data: chat-data:
name: datacenter-docs-chat-data-dev name: datacenter-docs-chat-data
worker-logs: worker-logs:
name: datacenter-docs-worker-logs-dev name: datacenter-docs-worker-logs
worker-output: worker-output:
name: datacenter-docs-worker-output-dev name: datacenter-docs-worker-output
networks: networks:
datacenter-network: datacenter-network:
name: datacenter-docs-network-dev name: datacenter-docs-network
driver: bridge driver: bridge

59
scripts/test_chat_rag.py Normal file
View File

@@ -0,0 +1,59 @@
#!/usr/bin/env python3
"""
Test script for RAG system in chat service
"""
import asyncio
import sys
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
from datacenter_docs.chat.agent import DocumentationAgent
from datacenter_docs.utils.llm_client import get_llm_client
async def test_rag_search():
"""Test RAG search and retrieval"""
print("🧪 Testing RAG system...\n")
# Initialize agent
print("1⃣ Initializing DocumentationAgent...")
agent = DocumentationAgent(vector_store_path="./data/chroma_db")
print("✅ Agent initialized\n")
# Test queries
test_queries = [
"Come è configurato smarthome-services?",
"Quali VM sono in esecuzione?",
"Come faccio il backup dei container?",
"Configurazione di storage su Proxmox",
]
for i, query in enumerate(test_queries, 1):
print(f"\n{'='*60}")
print(f"Query {i}: {query}")
print('='*60)
# Search documentation
results = await agent.search_documentation(query, limit=3)
if results:
print(f"\n📚 Found {len(results)} results:\n")
for j, result in enumerate(results, 1):
print(f" {j}. Section: {result['section']}")
print(f" Relevance: {result['relevance_score']:.3f} ({result['relevance_score']*100:.1f}%)")
print(f" Source: {Path(result['source']).name}")
print(f" Content preview: {result['content'][:100]}...")
print()
else:
print("❌ No results found")
print("\n" + "="*60)
print("✅ RAG test completed successfully!")
print("="*60)
if __name__ == "__main__":
asyncio.run(test_rag_search())

View File

@@ -177,16 +177,21 @@ class DocumentationAgent:
# Format results # Format results
formatted_results = [] formatted_results = []
for doc, score in results: for doc, score in results:
# ChromaDB returns distance scores (lower is better)
# Normalize to similarity score (0-1, higher is better)
# Using 1/(1+distance) ensures values are always between 0 and 1
relevance_score = 1.0 / (1.0 + score)
formatted_results.append( formatted_results.append(
{ {
"content": doc.page_content, "content": doc.page_content,
"section": doc.metadata.get("section", "unknown"), "section": doc.metadata.get("section", "unknown"),
"source": doc.metadata.get("source", ""), "source": doc.metadata.get("source", ""),
"relevance_score": float(1 - score), # Convert distance to similarity "relevance_score": float(relevance_score),
"last_updated": doc.metadata.get("indexed_at", ""), "last_updated": doc.metadata.get("indexed_at", ""),
} }
) )
logger.info(f" ✓ Section: {doc.metadata.get('section')} (relevance: {(1-score)*100:.1f}%)") logger.info(f" ✓ Section: {doc.metadata.get('section')} (relevance: {relevance_score*100:.1f}%, distance: {score:.3f})")
return formatted_results return formatted_results