From ed660dce5ac1ee67027c5a53553759bf9c6b0989 Mon Sep 17 00:00:00 2001 From: "d.viti" Date: Tue, 7 Oct 2025 17:29:12 +0200 Subject: [PATCH] Add LLM endpoints, web frontend, and rate limiting config - Added OpenAI-compatible LLM endpoints to API backend - Introduced web frontend with Jinja2 templates and static assets - Implemented API proxy routes in web service - Added sample db.json data for items, users, orders, reviews, categories, llm_requests - Updated ADC and Helm configs for separate AI and standard rate limiting - Upgraded FastAPI, Uvicorn, and added httpx, Jinja2, python-multipart dependencies - Added API configuration modal and client-side JS for web app --- adc.yaml | 34 +- api/db.json | 199 +++++++ api/main.py | 90 +++- api/requirements.txt | 5 +- .../templates/configmap-adc.yaml | 27 +- helm/api7ee-demo-k8s/values.yaml | 11 +- web/.env.example | 12 + web/main.py | 236 ++++---- web/requirements.txt | 9 +- web/static/css/style.css | 503 ++++++++++++++++++ web/static/js/app.js | 123 +++++ web/templates/base.html | 95 ++++ web/templates/index.html | 86 +++ web/templates/items.html | 55 ++ web/templates/llm.html | 135 +++++ web/templates/users.html | 69 +++ 16 files changed, 1551 insertions(+), 138 deletions(-) create mode 100644 api/db.json create mode 100644 web/.env.example create mode 100644 web/static/css/style.css create mode 100644 web/static/js/app.js create mode 100644 web/templates/base.html create mode 100644 web/templates/index.html create mode 100644 web/templates/items.html create mode 100644 web/templates/llm.html create mode 100644 web/templates/users.html diff --git a/adc.yaml b/adc.yaml index a861399..97e166c 100644 --- a/adc.yaml +++ b/adc.yaml @@ -17,7 +17,7 @@ services: vars: - - uri - "~~" - - "^(?!/api)" + - "^(?!/api|/docs)" priority: 1 plugins: redirect: @@ -35,11 +35,20 @@ services: port: 80 weight: 100 routes: - - name: nginx-api-route + - name: nginx-api-docs-route uris: - - /api - - /api/* - priority: 10 + - /docs + - /docs/* + priority: 30 + plugins: + redirect: + http_to_https: true + + - name: nginx-api-llm-route + uris: + - /api/llm + - /api/llm/* + priority: 20 plugins: redirect: http_to_https: true @@ -48,3 +57,18 @@ services: time_window: 60 rejected_code: 429 limit_strategy: "total_tokens" + + - name: nginx-api-route + uris: + - /api + - /api/* + priority: 10 + plugins: + redirect: + http_to_https: true + limit-count: + count: 100 + time_window: 60 + rejected_code: 429 + key_type: "var" + key: "remote_addr" diff --git a/api/db.json b/api/db.json new file mode 100644 index 0000000..1a578f6 --- /dev/null +++ b/api/db.json @@ -0,0 +1,199 @@ +{ + "items": [ + { + "id": 1, + "name": "Gaming Laptop RTX 4090", + "description": "High-performance gaming laptop with RTX 4090", + "price": 2999.99, + "in_stock": true, + "category": "electronics", + "tags": ["gaming", "laptop", "nvidia"] + }, + { + "id": 2, + "name": "Mechanical Keyboard RGB", + "description": "Cherry MX switches with RGB backlighting", + "price": 149.99, + "in_stock": true, + "category": "peripherals", + "tags": ["keyboard", "mechanical", "rgb"] + }, + { + "id": 3, + "name": "Wireless Gaming Mouse", + "description": "25K DPI wireless gaming mouse", + "price": 89.99, + "in_stock": false, + "category": "peripherals", + "tags": ["mouse", "wireless", "gaming"] + }, + { + "id": 4, + "name": "4K Gaming Monitor 32\"", + "description": "144Hz refresh rate, HDR support", + "price": 599.99, + "in_stock": true, + "category": "displays", + "tags": ["monitor", "4k", "gaming"] + }, + { + "id": 5, + "name": "Gaming Headset 7.1", + "description": "Surround sound gaming headset with noise cancellation", + "price": 129.99, + "in_stock": true, + "category": "audio", + "tags": ["headset", "audio", "gaming"] + } + ], + "users": [ + { + "id": 1, + "username": "john_doe", + "email": "john@example.com", + "active": true, + "role": "user", + "created_at": "2024-01-15T10:30:00Z" + }, + { + "id": 2, + "username": "jane_smith", + "email": "jane@example.com", + "active": true, + "role": "admin", + "created_at": "2024-02-20T14:22:00Z" + }, + { + "id": 3, + "username": "bob_wilson", + "email": "bob@example.com", + "active": false, + "role": "user", + "created_at": "2024-03-10T09:15:00Z" + }, + { + "id": 4, + "username": "alice_johnson", + "email": "alice@example.com", + "active": true, + "role": "moderator", + "created_at": "2024-04-05T16:45:00Z" + } + ], + "orders": [ + { + "id": 1, + "user_id": 1, + "items": [ + {"item_id": 1, "quantity": 1, "price": 2999.99}, + {"item_id": 2, "quantity": 1, "price": 149.99} + ], + "total": 3149.98, + "status": "shipped", + "created_at": "2024-09-15T12:00:00Z" + }, + { + "id": 2, + "user_id": 2, + "items": [ + {"item_id": 4, "quantity": 2, "price": 599.99} + ], + "total": 1199.98, + "status": "delivered", + "created_at": "2024-09-20T15:30:00Z" + }, + { + "id": 3, + "user_id": 4, + "items": [ + {"item_id": 5, "quantity": 1, "price": 129.99}, + {"item_id": 3, "quantity": 1, "price": 89.99} + ], + "total": 219.98, + "status": "pending", + "created_at": "2024-10-01T10:15:00Z" + } + ], + "reviews": [ + { + "id": 1, + "item_id": 1, + "user_id": 1, + "rating": 5, + "comment": "Amazing laptop! Best purchase ever!", + "created_at": "2024-09-20T14:30:00Z" + }, + { + "id": 2, + "item_id": 2, + "user_id": 2, + "rating": 4, + "comment": "Great keyboard, but a bit loud", + "created_at": "2024-09-22T09:45:00Z" + }, + { + "id": 3, + "item_id": 4, + "user_id": 2, + "rating": 5, + "comment": "Crystal clear display, perfect for gaming", + "created_at": "2024-09-25T18:20:00Z" + }, + { + "id": 4, + "item_id": 5, + "user_id": 4, + "rating": 4, + "comment": "Good sound quality, comfortable to wear", + "created_at": "2024-10-02T11:00:00Z" + } + ], + "categories": [ + { + "id": 1, + "name": "electronics", + "description": "Electronic devices and gadgets" + }, + { + "id": 2, + "name": "peripherals", + "description": "Computer peripherals and accessories" + }, + { + "id": 3, + "name": "displays", + "description": "Monitors and display devices" + }, + { + "id": 4, + "name": "audio", + "description": "Audio devices and accessories" + } + ], + "llm_requests": [ + { + "id": 1, + "user_id": 1, + "model": "videogame-expert", + "prompt": "What are the best RPG games of 2024?", + "tokens_used": 250, + "timestamp": "2024-10-05T10:00:00Z" + }, + { + "id": 2, + "user_id": 2, + "model": "videogame-expert", + "prompt": "Recommend me games similar to Dark Souls", + "tokens_used": 180, + "timestamp": "2024-10-05T11:30:00Z" + }, + { + "id": 3, + "user_id": 1, + "model": "videogame-expert", + "prompt": "What's the best strategy for Elden Ring bosses?", + "tokens_used": 320, + "timestamp": "2024-10-05T14:15:00Z" + } + ] +} diff --git a/api/main.py b/api/main.py index 56b6102..a6b41af 100644 --- a/api/main.py +++ b/api/main.py @@ -1,8 +1,15 @@ -from fastapi import FastAPI, HTTPException -from pydantic import BaseModel from typing import List, Optional +from pydantic import BaseModel import uvicorn from datetime import datetime +from fastapi import FastAPI, HTTPException +import os +import httpx + +# OpenAI API configuration +OPENAI_API_BASE = os.getenv("OPENAI_API_BASE", "http://localhost/api") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "your-api-key") +DEFAULT_MODEL = os.getenv("DEFAULT_LLM_MODEL", "your-model-id") app = FastAPI( title="API Demo Application", @@ -119,5 +126,84 @@ async def create_user(user: User): users_db.append(user_dict) return user_dict +# LLM endpoints +class LLMRequest(BaseModel): + prompt: str + max_tokens: Optional[int] = 150 + temperature: Optional[float] = 0.7 + model: Optional[str] = DEFAULT_MODEL + +class LLMResponse(BaseModel): + response: str + tokens_used: int + model: str + timestamp: str + +@app.post("/llm/chat", response_model=LLMResponse, tags=["LLM"]) +async def llm_chat(request: LLMRequest): + """ + LLM Chat endpoint - connects to OpenAI-compatible API (Open WebUI) + This endpoint is rate limited by AI token usage via API7 Gateway + """ + try: + async with httpx.AsyncClient() as client: + response = await client.post( + f"{OPENAI_API_BASE}/chat/completions", + headers={ + "Authorization": f"Bearer {OPENAI_API_KEY}", + "Content-Type": "application/json" + }, + json={ + "model": request.model, + "messages": [ + {"role": "user", "content": request.prompt} + ], + "max_tokens": request.max_tokens, + "temperature": request.temperature + }, + timeout=30.0 + ) + response.raise_for_status() + data = response.json() + + # Extract response and token usage + llm_response = data["choices"][0]["message"]["content"] + tokens_used = data.get("usage", {}).get("total_tokens", 0) + + return LLMResponse( + response=llm_response, + tokens_used=tokens_used, + model=request.model, + timestamp=datetime.now().isoformat() + ) + except httpx.HTTPStatusError as e: + raise HTTPException(status_code=e.response.status_code, detail=f"OpenAI API error: {e.response.text}") + except Exception as e: + raise HTTPException(status_code=500, detail=f"LLM service error: {str(e)}") + +@app.get("/llm/models", tags=["LLM"]) +async def list_llm_models(): + """List available LLM models""" + return { + "models": [ + {"id": "videogame-expert", "name": "Videogame Expert", "max_tokens": 4096, "provider": "Open WebUI"} + ], + "default_model": DEFAULT_MODEL, + "timestamp": datetime.now().isoformat() + } + +@app.get("/llm/health", tags=["LLM"]) +async def llm_health(): + """LLM service health check""" + return { + "status": "healthy", + "service": "llm-api", + "provider": "Open WebUI", + "endpoint": OPENAI_API_BASE, + "default_model": DEFAULT_MODEL, + "rate_limit": "ai-rate-limiting enabled (100 tokens/60s)", + "timestamp": datetime.now().isoformat() + } + if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8001) diff --git a/api/requirements.txt b/api/requirements.txt index 5313262..1d2f64d 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,3 +1,4 @@ -fastapi==0.104.1 -uvicorn[standard]==0.24.0 +fastapi==0.109.0 +uvicorn==0.27.0 pydantic==2.5.0 +httpx==0.26.0 diff --git a/helm/api7ee-demo-k8s/templates/configmap-adc.yaml b/helm/api7ee-demo-k8s/templates/configmap-adc.yaml index a2701aa..3a20988 100644 --- a/helm/api7ee-demo-k8s/templates/configmap-adc.yaml +++ b/helm/api7ee-demo-k8s/templates/configmap-adc.yaml @@ -47,11 +47,11 @@ data: port: 80 weight: 100 routes: - - name: nginx-api-route + - name: nginx-api-llm-route uris: - - /api - - /api/* - priority: 10 + - /api/llm + - /api/llm/* + priority: 20 plugins: {{- if .Values.api7.tls.enabled }} redirect: @@ -65,6 +65,25 @@ data: limit_strategy: {{ .Values.api7.plugins.aiRateLimit.limitStrategy | quote }} {{- end }} + - name: nginx-api-route + uris: + - /api + - /api/* + priority: 10 + plugins: + {{- if .Values.api7.tls.enabled }} + redirect: + http_to_https: true + {{- end }} + {{- if .Values.api7.plugins.rateLimit.enabled }} + limit-count: + count: {{ .Values.api7.plugins.rateLimit.count }} + time_window: {{ .Values.api7.plugins.rateLimit.timeWindow }} + rejected_code: {{ .Values.api7.plugins.rateLimit.rejectedCode }} + key_type: {{ .Values.api7.plugins.rateLimit.keyType | quote }} + key: {{ .Values.api7.plugins.rateLimit.key | quote }} + {{- end }} + {{- if .Values.api7.plugins.auth.enabled }} consumers: {{- range .Values.api7.consumers }} diff --git a/helm/api7ee-demo-k8s/values.yaml b/helm/api7ee-demo-k8s/values.yaml index c3e1b93..0683993 100644 --- a/helm/api7ee-demo-k8s/values.yaml +++ b/helm/api7ee-demo-k8s/values.yaml @@ -247,7 +247,16 @@ api7: # API7 Plugins Configuration plugins: - # AI Rate limiting (for /api route) + # Standard Rate limiting (for /api route - per IP) + rateLimit: + enabled: true + count: 100 + timeWindow: 60 + rejectedCode: 429 + keyType: "var" + key: "remote_addr" + + # AI Rate limiting (for /api/llm route) aiRateLimit: enabled: true limit: 100 diff --git a/web/.env.example b/web/.env.example new file mode 100644 index 0000000..56c8585 --- /dev/null +++ b/web/.env.example @@ -0,0 +1,12 @@ +# API Backend Configuration +# Set this to the base URL where the API service is running + +# Local development +API_BASE_URL=http://localhost:8001 + +# Production +# API_BASE_URL=https://commandware.it/api + +# Other examples +# API_BASE_URL=http://api:8001 +# API_BASE_URL=http://192.168.1.100:8001 diff --git a/web/main.py b/web/main.py index 0b5feef..ce48904 100644 --- a/web/main.py +++ b/web/main.py @@ -1,137 +1,133 @@ -from fastapi import FastAPI -from fastapi.responses import HTMLResponse +from fastapi import FastAPI, Request, HTTPException +from fastapi.responses import HTMLResponse, JSONResponse from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates import uvicorn import os -import subprocess +import httpx app = FastAPI(title="Web Demo Application") -# Build MkDocs documentation on startup -def build_docs(): - docs_dir = os.path.join(os.path.dirname(__file__), "docs") - site_dir = os.path.join(os.path.dirname(__file__), "site") +# Get the directory where this script is located +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) - if os.path.exists(docs_dir): +# API Configuration - can be set via environment variable +API_BASE_URL = os.getenv("API_BASE_URL", "http://localhost:8001") + +# Mount static files +static_dir = os.path.join(BASE_DIR, "static") +if os.path.exists(static_dir): + app.mount("/static", StaticFiles(directory=static_dir), name="static") + +# Setup templates +templates_dir = os.path.join(BASE_DIR, "templates") +templates = Jinja2Templates(directory=templates_dir) + +# HTTP client for API calls +async def api_request(method: str, endpoint: str, **kwargs): + """Make a request to the API backend""" + url = f"{API_BASE_URL}{endpoint}" + async with httpx.AsyncClient(timeout=30.0) as client: try: - subprocess.run( - ["mkdocs", "build", "-f", os.path.join(docs_dir, "mkdocs.yml"), "-d", site_dir], - check=True, - capture_output=True - ) - print(f"✓ Documentation built successfully at {site_dir}") - return True - except subprocess.CalledProcessError as e: - print(f"✗ Failed to build documentation: {e.stderr.decode()}") - return False - except FileNotFoundError: - print("✗ MkDocs not installed. Install with: pip install mkdocs mkdocs-material") - return False - return False - -# Build docs on startup -@app.on_event("startup") -async def startup_event(): - build_docs() - -# Mount static documentation site at /docs -site_dir = os.path.join(os.path.dirname(__file__), "site") -if os.path.exists(site_dir): - app.mount("/docs", StaticFiles(directory=site_dir, html=True), name="docs") - -# Simple HTML template inline -HTML_TEMPLATE = """ - - - - Web Demo - - - -
-

Welcome to Web Demo Application

-
-

Application Information

-

Service: Web Frontend

-

Status: ✓ Running

-

Version: 1.0.0

-
-

Metrics Dashboard

-
- Requests: 1,234 - Uptime: 99.9% - Users: 567 -
-
-

About

-

This is a demo FastAPI web application serving HTML content. - It demonstrates a simple web interface with metrics and information display.

-
-
- 📚 View Documentation - 🏥 Health Check -
-
- - -""" + response = await client.request(method, url, **kwargs) + response.raise_for_status() + return response.json() + except httpx.HTTPStatusError as e: + raise HTTPException(status_code=e.response.status_code, detail=str(e)) + except Exception as e: + raise HTTPException(status_code=500, detail=f"API request failed: {str(e)}") +# ===== ROUTES - HTML Pages ===== @app.get("/", response_class=HTMLResponse) -async def root(): - """Serve the main webpage""" - return HTML_TEMPLATE +async def home(request: Request): + """Serve the home page""" + return templates.TemplateResponse("index.html", {"request": request}) +@app.get("/items", response_class=HTMLResponse) +async def items_page(request: Request): + """Serve the items page""" + return templates.TemplateResponse("items.html", {"request": request}) + +@app.get("/users", response_class=HTMLResponse) +async def users_page(request: Request): + """Serve the users page""" + return templates.TemplateResponse("users.html", {"request": request}) + +@app.get("/llm", response_class=HTMLResponse) +async def llm_page(request: Request): + """Serve the LLM chat page""" + return templates.TemplateResponse("llm.html", {"request": request}) + +# ===== API PROXY ENDPOINTS ===== +@app.get("/api/items") +async def proxy_get_items(): + """Proxy GET /items to API backend""" + return await api_request("GET", "/items") + +@app.get("/api/items/{item_id}") +async def proxy_get_item(item_id: int): + """Proxy GET /items/{id} to API backend""" + return await api_request("GET", f"/items/{item_id}") + +@app.get("/api/users") +async def proxy_get_users(): + """Proxy GET /users to API backend""" + return await api_request("GET", "/users") + +@app.get("/api/users/{user_id}") +async def proxy_get_user(user_id: int): + """Proxy GET /users/{id} to API backend""" + return await api_request("GET", f"/users/{user_id}") + +@app.post("/api/llm/chat") +async def proxy_llm_chat(request: Request): + """Proxy POST /llm/chat to API backend""" + body = await request.json() + return await api_request("POST", "/llm/chat", json=body) + +@app.get("/api/llm/models") +async def proxy_llm_models(): + """Proxy GET /llm/models to API backend""" + return await api_request("GET", "/llm/models") + +@app.get("/api/llm/health") +async def proxy_llm_health(): + """Proxy GET /llm/health to API backend""" + return await api_request("GET", "/llm/health") + +# ===== WEB HEALTH CHECK ===== @app.get("/health") async def health(): """Health check endpoint""" - return {"status": "healthy", "service": "web"} + # Try to connect to API backend + api_status = "unknown" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(f"{API_BASE_URL}/health") + if response.status_code == 200: + api_status = "healthy" + else: + api_status = "unhealthy" + except: + api_status = "unreachable" + + return { + "status": "healthy", + "service": "web", + "version": "1.0.0", + "api_backend": API_BASE_URL, + "api_status": api_status + } + +# ===== CONFIG ENDPOINT ===== +@app.get("/api/config") +async def get_config(): + """Get current API configuration""" + return { + "api_base_url": API_BASE_URL + } if __name__ == "__main__": + print(f"Starting Web service") + print(f"API Backend: {API_BASE_URL}") uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/web/requirements.txt b/web/requirements.txt index 13545f9..124cb40 100644 --- a/web/requirements.txt +++ b/web/requirements.txt @@ -1,4 +1,5 @@ -fastapi==0.104.1 -uvicorn[standard]==0.24.0 -mkdocs==1.5.3 -mkdocs-material==9.5.3 +fastapi==0.109.0 +uvicorn==0.27.0 +jinja2==3.1.3 +python-multipart==0.0.6 +httpx==0.26.0 diff --git a/web/static/css/style.css b/web/static/css/style.css new file mode 100644 index 0000000..9647814 --- /dev/null +++ b/web/static/css/style.css @@ -0,0 +1,503 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: + -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", + Arial, sans-serif; + line-height: 1.6; + color: #333; + background-color: #f5f5f5; +} + +.container { + max-width: 1200px; + margin: 0 auto; + padding: 0 20px; +} + +/* Navbar */ +.navbar { + background-color: #2c3e50; + color: white; + padding: 1rem 0; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); +} + +.nav-brand h2 { + display: inline-block; + margin: 0; +} + +.nav-menu { + list-style: none; + display: inline-block; + float: right; +} + +.nav-menu li { + display: inline-block; + margin-left: 30px; +} + +.nav-menu a { + color: white; + text-decoration: none; + transition: color 0.3s; +} + +.nav-menu a:hover { + color: #3498db; +} + +/* Main content */ +main { + min-height: calc(100vh - 200px); + padding: 40px 20px; +} + +/* Hero section */ +.hero { + text-align: center; + padding: 60px 20px; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; + border-radius: 10px; + margin-bottom: 40px; +} + +.hero h1 { + font-size: 2.5rem; + margin-bottom: 1rem; +} + +.subtitle { + font-size: 1.2rem; + opacity: 0.9; +} + +/* Cards grid */ +.cards-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 20px; + margin-bottom: 40px; +} + +.card { + background: white; + padding: 30px; + border-radius: 10px; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); + text-align: center; + transition: + transform 0.3s, + box-shadow 0.3s; +} + +.card:hover { + transform: translateY(-5px); + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.15); +} + +.card-icon { + font-size: 3rem; + margin-bottom: 1rem; +} + +.card h3 { + margin-bottom: 1rem; + color: #2c3e50; +} + +/* Buttons */ +.btn { + display: inline-block; + padding: 10px 20px; + background-color: #3498db; + color: white; + text-decoration: none; + border-radius: 5px; + border: none; + cursor: pointer; + transition: background-color 0.3s; +} + +.btn:hover { + background-color: #2980b9; +} + +.btn-primary { + background-color: #667eea; +} + +.btn-primary:hover { + background-color: #5568d3; +} + +.btn-sm { + padding: 5px 15px; + font-size: 0.9rem; +} + +/* Info section */ +.info-section { + background: white; + padding: 30px; + border-radius: 10px; + margin-bottom: 40px; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); +} + +.features-list { + list-style: none; + padding-left: 0; +} + +.features-list li { + padding: 10px 0; + font-size: 1.1rem; +} + +/* Stats */ +.stats { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 20px; + margin-top: 40px; +} + +.stat-box { + background: white; + padding: 30px; + border-radius: 10px; + text-align: center; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); +} + +.stat-box h3 { + font-size: 2.5rem; + color: #667eea; + margin-bottom: 10px; +} + +/* Items grid */ +.items-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); + gap: 20px; +} + +.item-card { + background: white; + padding: 20px; + border-radius: 10px; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); +} + +.item-card.out-of-stock { + opacity: 0.6; +} + +.item-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 15px; +} + +.item-description { + color: #666; + margin-bottom: 15px; +} + +.item-footer { + display: flex; + justify-content: space-between; + align-items: center; +} + +.price { + font-size: 1.5rem; + font-weight: bold; + color: #27ae60; +} + +/* Table */ +.table-container { + background: white; + padding: 20px; + border-radius: 10px; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); + overflow-x: auto; +} + +.data-table { + width: 100%; + border-collapse: collapse; +} + +.data-table th, +.data-table td { + padding: 15px; + text-align: left; + border-bottom: 1px solid #ddd; +} + +.data-table th { + background-color: #f8f9fa; + font-weight: 600; +} + +/* Badges */ +.badge { + padding: 5px 10px; + border-radius: 20px; + font-size: 0.85rem; + font-weight: 600; +} + +.badge-success { + background-color: #d4edda; + color: #155724; +} + +.badge-danger { + background-color: #f8d7da; + color: #721c24; +} + +/* Chat */ +.chat-container { + background: white; + padding: 20px; + border-radius: 10px; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); + max-width: 800px; + margin: 0 auto; +} + +.chat-messages { + height: 400px; + overflow-y: auto; + padding: 20px; + background: #f8f9fa; + border-radius: 10px; + margin-bottom: 20px; +} + +.user-message, +.assistant-message, +.system-message { + padding: 10px 15px; + margin-bottom: 10px; + border-radius: 10px; + max-width: 80%; +} + +.user-message { + background-color: #667eea; + color: white; + margin-left: auto; + text-align: right; +} + +.assistant-message { + background-color: white; + border: 1px solid #ddd; +} + +.system-message { + background-color: #e3f2fd; + color: #1976d2; + text-align: center; + max-width: 100%; + font-size: 0.9rem; +} + +.chat-input-container { + display: flex; + gap: 10px; + margin-bottom: 10px; +} + +.chat-input-container textarea { + flex: 1; + padding: 10px; + border: 1px solid #ddd; + border-radius: 5px; + font-family: inherit; + resize: vertical; +} + +.chat-info { + text-align: center; + color: #666; +} + +/* Page header */ +.page-header { + margin-bottom: 30px; +} + +.page-header h1 { + font-size: 2rem; + margin-bottom: 10px; +} + +/* Footer */ +.footer { + background-color: #2c3e50; + color: white; + text-align: center; + padding: 20px 0; + margin-top: 40px; +} + +/* Modal */ +.modal { + display: none; + position: fixed; + z-index: 1000; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: auto; + background-color: rgba(0, 0, 0, 0.5); +} + +.modal-content { + background-color: white; + margin: 5% auto; + padding: 0; + border-radius: 10px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3); + max-width: 600px; + animation: modalFadeIn 0.3s; +} + +@keyframes modalFadeIn { + from { + opacity: 0; + transform: translateY(-50px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.modal-header { + padding: 20px 30px; + border-bottom: 1px solid #ddd; + display: flex; + justify-content: space-between; + align-items: center; +} + +.modal-header h2 { + margin: 0; + color: #2c3e50; +} + +.close { + color: #aaa; + font-size: 28px; + font-weight: bold; + cursor: pointer; + transition: color 0.3s; +} + +.close:hover, +.close:focus { + color: #000; +} + +.modal-body { + padding: 30px; +} + +.form-group { + margin-bottom: 20px; +} + +.form-group label { + display: block; + margin-bottom: 8px; + font-weight: 600; + color: #333; +} + +.form-control { + width: 100%; + padding: 10px 15px; + border: 1px solid #ddd; + border-radius: 5px; + font-size: 1rem; + font-family: inherit; +} + +.form-control:focus { + outline: none; + border-color: #667eea; + box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); +} + +.form-hint { + display: block; + margin-top: 8px; + color: #666; + font-size: 0.9rem; +} + +.form-hint code { + background-color: #f5f5f5; + padding: 2px 6px; + border-radius: 3px; + font-family: "Courier New", monospace; + font-size: 0.85rem; +} + +.config-info { + background-color: #f8f9fa; + padding: 15px; + border-radius: 5px; + border-left: 4px solid #667eea; +} + +.config-info strong { + color: #2c3e50; +} + +.modal-actions { + margin-top: 30px; + display: flex; + gap: 10px; + justify-content: flex-end; +} + +.btn-secondary { + background-color: #6c757d; + color: white; +} + +.btn-secondary:hover { + background-color: #5a6268; +} + +/* Utility */ +.loading { + text-align: center; + padding: 40px; + color: #666; +} + +.error { + color: #f44336; + text-align: center; + padding: 20px; +} diff --git a/web/static/js/app.js b/web/static/js/app.js new file mode 100644 index 0000000..b625660 --- /dev/null +++ b/web/static/js/app.js @@ -0,0 +1,123 @@ +// Global API configuration +const DEFAULT_API_BASE = "/api"; +const API_BASE_KEY = "api_base_url"; + +// Get API base URL from localStorage or use default +function getApiBaseUrl() { + return localStorage.getItem(API_BASE_KEY) || DEFAULT_API_BASE; +} + +// Set API base URL +function setApiBaseUrl(url) { + localStorage.setItem(API_BASE_KEY, url); +} + +// Export for global access +window.API_BASE = getApiBaseUrl(); + +// API Configuration Modal Functions +function openApiConfig(event) { + if (event) event.preventDefault(); + + const modal = document.getElementById("api-config-modal"); + const input = document.getElementById("api-base-url"); + const currentUrl = document.getElementById("current-api-url"); + + input.value = getApiBaseUrl(); + currentUrl.textContent = getApiBaseUrl(); + modal.style.display = "block"; +} + +function closeApiConfig() { + const modal = document.getElementById("api-config-modal"); + modal.style.display = "none"; +} + +function saveApiConfig() { + const input = document.getElementById("api-base-url"); + const url = input.value.trim(); + + if (!url) { + alert("Please enter a valid API base URL"); + return; + } + + // Remove trailing slash if present + const cleanUrl = url.endsWith("/") ? url.slice(0, -1) : url; + + setApiBaseUrl(cleanUrl); + window.API_BASE = cleanUrl; + + showNotification("API configuration saved. Reloading page...", "success"); + + setTimeout(() => { + window.location.reload(); + }, 1000); +} + +function resetApiConfig() { + if (confirm("Reset API configuration to default (/api)?")) { + setApiBaseUrl(DEFAULT_API_BASE); + window.API_BASE = DEFAULT_API_BASE; + + showNotification("API configuration reset. Reloading page...", "success"); + + setTimeout(() => { + window.location.reload(); + }, 1000); + } +} + +// Close modal when clicking outside +window.onclick = function (event) { + const modal = document.getElementById("api-config-modal"); + if (event.target === modal) { + closeApiConfig(); + } +}; + +// Utility functions +function showNotification(message, type = "info") { + console.log(`[${type.toUpperCase()}] ${message}`); + // Could be extended with toast notifications +} + +function formatDate(dateString) { + const date = new Date(dateString); + return date.toLocaleDateString() + " " + date.toLocaleTimeString(); +} + +function formatPrice(price) { + return new Intl.NumberFormat("en-US", { + style: "currency", + currency: "USD", + }).format(price); +} + +// API call wrapper +async function apiCall(endpoint, options = {}) { + try { + const response = await fetch(`${API_BASE}${endpoint}`, { + ...options, + headers: { + "Content-Type": "application/json", + ...options.headers, + }, + }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + return await response.json(); + } catch (error) { + showNotification(error.message, "error"); + throw error; + } +} + +// Export for use in templates +window.apiCall = apiCall; +window.showNotification = showNotification; +window.formatDate = formatDate; +window.formatPrice = formatPrice; diff --git a/web/templates/base.html b/web/templates/base.html new file mode 100644 index 0000000..3c040b9 --- /dev/null +++ b/web/templates/base.html @@ -0,0 +1,95 @@ + + + + + + {% block title %}API Demo{% endblock %} + + + + + + + + +
{% block content %}{% endblock %}
+ + + + + {% block scripts %}{% endblock %} + + diff --git a/web/templates/index.html b/web/templates/index.html new file mode 100644 index 0000000..6326e7d --- /dev/null +++ b/web/templates/index.html @@ -0,0 +1,86 @@ +{% extends "base.html" %} + +{% block title %}Home - API Demo{% endblock %} + +{% block content %} +
+

Welcome to API7EE Demo Platform

+

Explore our API services with real-time data and AI-powered features

+
+ +
+
+
📦
+

Items Management

+

Browse and manage products in our catalog

+ View Items +
+ +
+
👥
+

Users

+

Manage user accounts and profiles

+ View Users +
+ +
+
🤖
+

AI Chat (LLM)

+

Chat with our videogame expert AI assistant

+ Start Chat +
+ +
+
📚
+

API Documentation

+

Explore our OpenAPI/Swagger documentation

+ Open Docs +
+
+ +
+

Features

+ +
+ +
+
+

-

+

Total Items

+
+
+

-

+

Active Users

+
+
+

AI Ready

+

LLM Service

+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/web/templates/items.html b/web/templates/items.html new file mode 100644 index 0000000..6b7c22b --- /dev/null +++ b/web/templates/items.html @@ -0,0 +1,55 @@ +{% extends "base.html" %} + +{% block title %}Items - API Demo{% endblock %} + +{% block content %} + + +
+
Loading items...
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/web/templates/llm.html b/web/templates/llm.html new file mode 100644 index 0000000..3f15018 --- /dev/null +++ b/web/templates/llm.html @@ -0,0 +1,135 @@ +{% extends "base.html" %} {% block title %}LLM Chat - API Demo{% endblock %} {% +block content %} + + +
+
+
+ Welcome! Ask me anything about videogames. I'm powered by the + videogame-expert model. +
+
+ +
+ + +
+ +
+ + Model: videogame-expert | Status: + Ready | Rate Limit: + 100 tokens/60s + +
+
+{% endblock %} {% block scripts %} + + +{% endblock %} diff --git a/web/templates/users.html b/web/templates/users.html new file mode 100644 index 0000000..3e300b4 --- /dev/null +++ b/web/templates/users.html @@ -0,0 +1,69 @@ +{% extends "base.html" %} + +{% block title %}Users - API Demo{% endblock %} + +{% block content %} + + +
+ + + + + + + + + + + + + + + +
IDUsernameEmailStatusActions
Loading users...
+
+{% endblock %} + +{% block scripts %} + +{% endblock %}