- Added OpenAI-compatible LLM endpoints to API backend - Introduced web frontend with Jinja2 templates and static assets - Implemented API proxy routes in web service - Added sample db.json data for items, users, orders, reviews, categories, llm_requests - Updated ADC and Helm configs for separate AI and standard rate limiting - Upgraded FastAPI, Uvicorn, and added httpx, Jinja2, python-multipart dependencies - Added API configuration modal and client-side JS for web app
134 lines
4.3 KiB
Python
134 lines
4.3 KiB
Python
from fastapi import FastAPI, Request, HTTPException
|
|
from fastapi.responses import HTMLResponse, JSONResponse
|
|
from fastapi.staticfiles import StaticFiles
|
|
from fastapi.templating import Jinja2Templates
|
|
import uvicorn
|
|
import os
|
|
import httpx
|
|
|
|
app = FastAPI(title="Web Demo Application")
|
|
|
|
# Get the directory where this script is located
|
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
# API Configuration - can be set via environment variable
|
|
API_BASE_URL = os.getenv("API_BASE_URL", "http://localhost:8001")
|
|
|
|
# Mount static files
|
|
static_dir = os.path.join(BASE_DIR, "static")
|
|
if os.path.exists(static_dir):
|
|
app.mount("/static", StaticFiles(directory=static_dir), name="static")
|
|
|
|
# Setup templates
|
|
templates_dir = os.path.join(BASE_DIR, "templates")
|
|
templates = Jinja2Templates(directory=templates_dir)
|
|
|
|
# HTTP client for API calls
|
|
async def api_request(method: str, endpoint: str, **kwargs):
|
|
"""Make a request to the API backend"""
|
|
url = f"{API_BASE_URL}{endpoint}"
|
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
try:
|
|
response = await client.request(method, url, **kwargs)
|
|
response.raise_for_status()
|
|
return response.json()
|
|
except httpx.HTTPStatusError as e:
|
|
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"API request failed: {str(e)}")
|
|
|
|
# ===== ROUTES - HTML Pages =====
|
|
@app.get("/", response_class=HTMLResponse)
|
|
async def home(request: Request):
|
|
"""Serve the home page"""
|
|
return templates.TemplateResponse("index.html", {"request": request})
|
|
|
|
@app.get("/items", response_class=HTMLResponse)
|
|
async def items_page(request: Request):
|
|
"""Serve the items page"""
|
|
return templates.TemplateResponse("items.html", {"request": request})
|
|
|
|
@app.get("/users", response_class=HTMLResponse)
|
|
async def users_page(request: Request):
|
|
"""Serve the users page"""
|
|
return templates.TemplateResponse("users.html", {"request": request})
|
|
|
|
@app.get("/llm", response_class=HTMLResponse)
|
|
async def llm_page(request: Request):
|
|
"""Serve the LLM chat page"""
|
|
return templates.TemplateResponse("llm.html", {"request": request})
|
|
|
|
# ===== API PROXY ENDPOINTS =====
|
|
@app.get("/api/items")
|
|
async def proxy_get_items():
|
|
"""Proxy GET /items to API backend"""
|
|
return await api_request("GET", "/items")
|
|
|
|
@app.get("/api/items/{item_id}")
|
|
async def proxy_get_item(item_id: int):
|
|
"""Proxy GET /items/{id} to API backend"""
|
|
return await api_request("GET", f"/items/{item_id}")
|
|
|
|
@app.get("/api/users")
|
|
async def proxy_get_users():
|
|
"""Proxy GET /users to API backend"""
|
|
return await api_request("GET", "/users")
|
|
|
|
@app.get("/api/users/{user_id}")
|
|
async def proxy_get_user(user_id: int):
|
|
"""Proxy GET /users/{id} to API backend"""
|
|
return await api_request("GET", f"/users/{user_id}")
|
|
|
|
@app.post("/api/llm/chat")
|
|
async def proxy_llm_chat(request: Request):
|
|
"""Proxy POST /llm/chat to API backend"""
|
|
body = await request.json()
|
|
return await api_request("POST", "/llm/chat", json=body)
|
|
|
|
@app.get("/api/llm/models")
|
|
async def proxy_llm_models():
|
|
"""Proxy GET /llm/models to API backend"""
|
|
return await api_request("GET", "/llm/models")
|
|
|
|
@app.get("/api/llm/health")
|
|
async def proxy_llm_health():
|
|
"""Proxy GET /llm/health to API backend"""
|
|
return await api_request("GET", "/llm/health")
|
|
|
|
# ===== WEB HEALTH CHECK =====
|
|
@app.get("/health")
|
|
async def health():
|
|
"""Health check endpoint"""
|
|
# Try to connect to API backend
|
|
api_status = "unknown"
|
|
try:
|
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
|
response = await client.get(f"{API_BASE_URL}/health")
|
|
if response.status_code == 200:
|
|
api_status = "healthy"
|
|
else:
|
|
api_status = "unhealthy"
|
|
except:
|
|
api_status = "unreachable"
|
|
|
|
return {
|
|
"status": "healthy",
|
|
"service": "web",
|
|
"version": "1.0.0",
|
|
"api_backend": API_BASE_URL,
|
|
"api_status": api_status
|
|
}
|
|
|
|
# ===== CONFIG ENDPOINT =====
|
|
@app.get("/api/config")
|
|
async def get_config():
|
|
"""Get current API configuration"""
|
|
return {
|
|
"api_base_url": API_BASE_URL
|
|
}
|
|
|
|
if __name__ == "__main__":
|
|
print(f"Starting Web service")
|
|
print(f"API Backend: {API_BASE_URL}")
|
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|