feat: enhance chat service with documentation indexing and improved Docker configuration
Some checks failed
CI/CD Pipeline / Generate Documentation (push) Failing after 7m41s
CI/CD Pipeline / Lint Code (push) Failing after 7m44s
CI/CD Pipeline / Run Tests (push) Has been skipped
CI/CD Pipeline / Security Scanning (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (api) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (chat) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (frontend) (push) Has been skipped
CI/CD Pipeline / Build and Push Docker Images (worker) (push) Has been skipped
CI/CD Pipeline / Deploy to Staging (push) Has been skipped
CI/CD Pipeline / Deploy to Production (push) Has been skipped

This commit is contained in:
2025-10-20 19:15:32 +02:00
parent 6f5deb0879
commit 27dd9e00b6
14 changed files with 784 additions and 94 deletions

View File

@@ -3,8 +3,9 @@ Configuration management using Pydantic Settings
"""
from functools import lru_cache
from typing import List
from typing import Any, Dict, List
from pydantic import model_validator
from pydantic_settings import BaseSettings
@@ -67,10 +68,25 @@ class Settings(BaseSettings):
VECTOR_STORE_PATH: str = "./data/chroma_db"
EMBEDDING_MODEL: str = "sentence-transformers/all-MiniLM-L6-v2"
# Celery
# Celery (uses REDIS_URL as default if not set)
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/0"
@model_validator(mode="before")
@classmethod
def set_celery_defaults(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Use REDIS_URL as default for Celery if not explicitly set"""
redis_url = values.get("REDIS_URL", "redis://localhost:6379/0")
# Only set if not already provided via environment
if "CELERY_BROKER_URL" not in values or not values.get("CELERY_BROKER_URL"):
values["CELERY_BROKER_URL"] = redis_url
if "CELERY_RESULT_BACKEND" not in values or not values.get("CELERY_RESULT_BACKEND"):
values["CELERY_RESULT_BACKEND"] = redis_url
return values
class Config:
env_file = ".env"
case_sensitive = True

View File

@@ -14,6 +14,7 @@ This client works with:
import logging
from typing import Any, AsyncIterator, Dict, List, Optional, Union, cast
import httpx
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletion, ChatCompletionChunk
@@ -79,8 +80,13 @@ class LLMClient:
self.temperature = temperature if temperature is not None else settings.LLM_TEMPERATURE
self.max_tokens = max_tokens or settings.LLM_MAX_TOKENS
# Initialize AsyncOpenAI client
self.client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key)
# Initialize AsyncOpenAI client with custom HTTP client (disable SSL verification for self-signed certs)
http_client = httpx.AsyncClient(verify=False, timeout=30.0)
self.client = AsyncOpenAI(
base_url=self.base_url,
api_key=self.api_key,
http_client=http_client
)
logger.info(f"Initialized LLM client: base_url={self.base_url}, model={self.model}")