219 lines
7.0 KiB
Python
219 lines
7.0 KiB
Python
from typing import List, Optional
|
|
from pydantic import BaseModel
|
|
import uvicorn
|
|
from datetime import datetime
|
|
from fastapi import FastAPI, HTTPException
|
|
from pydantic import BaseModel
|
|
from typing import Optional, List
|
|
import os
|
|
import httpx
|
|
|
|
# OpenAI API configuration
|
|
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE", "http://localhost/api")
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "your-api-key")
|
|
DEFAULT_MODEL = os.getenv("DEFAULT_LLM_MODEL", "your-model-id")
|
|
|
|
app = FastAPI(
|
|
title="API Demo Application",
|
|
description="Demo API with Swagger documentation",
|
|
version="1.0.0"
|
|
)
|
|
|
|
# Models
|
|
class Item(BaseModel):
|
|
id: Optional[int] = None
|
|
name: str
|
|
description: Optional[str] = None
|
|
price: float
|
|
in_stock: bool = True
|
|
|
|
class User(BaseModel):
|
|
id: Optional[int] = None
|
|
username: str
|
|
email: str
|
|
active: bool = True
|
|
|
|
# In-memory storage
|
|
items_db = [
|
|
{"id": 1, "name": "Laptop", "description": "High-performance laptop", "price": 999.99, "in_stock": True},
|
|
{"id": 2, "name": "Mouse", "description": "Wireless mouse", "price": 29.99, "in_stock": True},
|
|
{"id": 3, "name": "Keyboard", "description": "Mechanical keyboard", "price": 79.99, "in_stock": False},
|
|
]
|
|
|
|
users_db = [
|
|
{"id": 1, "username": "john_doe", "email": "john@example.com", "active": True},
|
|
{"id": 2, "username": "jane_smith", "email": "jane@example.com", "active": True},
|
|
]
|
|
|
|
# Root endpoint
|
|
@app.get("/")
|
|
async def root():
|
|
"""Root endpoint with API information"""
|
|
return {
|
|
"message": "Welcome to API Demo",
|
|
"version": "1.0.0",
|
|
"docs": "/docs",
|
|
"timestamp": datetime.now().isoformat()
|
|
}
|
|
|
|
# Health check
|
|
@app.get("/health")
|
|
async def health():
|
|
"""Health check endpoint"""
|
|
return {"status": "healthy", "service": "api", "timestamp": datetime.now().isoformat()}
|
|
|
|
# Readiness check
|
|
@app.get("/ready")
|
|
async def ready():
|
|
"""Readiness check endpoint"""
|
|
return {"status": "ready", "service": "api", "timestamp": datetime.now().isoformat()}
|
|
|
|
# Items endpoints
|
|
@app.get("/items", response_model=List[Item], tags=["Items"])
|
|
async def get_items():
|
|
"""Get all items"""
|
|
return items_db
|
|
|
|
@app.get("/items/{item_id}", response_model=Item, tags=["Items"])
|
|
async def get_item(item_id: int):
|
|
"""Get a specific item by ID"""
|
|
item = next((item for item in items_db if item["id"] == item_id), None)
|
|
if item is None:
|
|
raise HTTPException(status_code=404, detail="Item not found")
|
|
return item
|
|
|
|
@app.post("/items", response_model=Item, tags=["Items"])
|
|
async def create_item(item: Item):
|
|
"""Create a new item"""
|
|
new_id = max([i["id"] for i in items_db]) + 1 if items_db else 1
|
|
item_dict = item.dict()
|
|
item_dict["id"] = new_id
|
|
items_db.append(item_dict)
|
|
return item_dict
|
|
|
|
@app.put("/items/{item_id}", response_model=Item, tags=["Items"])
|
|
async def update_item(item_id: int, item: Item):
|
|
"""Update an existing item"""
|
|
for idx, existing_item in enumerate(items_db):
|
|
if existing_item["id"] == item_id:
|
|
item_dict = item.dict()
|
|
item_dict["id"] = item_id
|
|
items_db[idx] = item_dict
|
|
return item_dict
|
|
raise HTTPException(status_code=404, detail="Item not found")
|
|
|
|
@app.delete("/items/{item_id}", tags=["Items"])
|
|
async def delete_item(item_id: int):
|
|
"""Delete an item"""
|
|
for idx, item in enumerate(items_db):
|
|
if item["id"] == item_id:
|
|
items_db.pop(idx)
|
|
return {"message": "Item deleted successfully"}
|
|
raise HTTPException(status_code=404, detail="Item not found")
|
|
|
|
# Users endpoints
|
|
@app.get("/users", response_model=List[User], tags=["Users"])
|
|
async def get_users():
|
|
"""Get all users"""
|
|
return users_db
|
|
|
|
@app.get("/users/{user_id}", response_model=User, tags=["Users"])
|
|
async def get_user(user_id: int):
|
|
"""Get a specific user by ID"""
|
|
user = next((user for user in users_db if user["id"] == user_id), None)
|
|
if user is None:
|
|
raise HTTPException(status_code=404, detail="User not found")
|
|
return user
|
|
|
|
@app.post("/users", response_model=User, tags=["Users"])
|
|
async def create_user(user: User):
|
|
"""Create a new user"""
|
|
new_id = max([u["id"] for u in users_db]) + 1 if users_db else 1
|
|
user_dict = user.dict()
|
|
user_dict["id"] = new_id
|
|
users_db.append(user_dict)
|
|
return user_dict
|
|
|
|
# LLM endpoints
|
|
class LLMRequest(BaseModel):
|
|
prompt: str
|
|
max_tokens: Optional[int] = 150
|
|
temperature: Optional[float] = 0.7
|
|
model: Optional[str] = DEFAULT_MODEL
|
|
|
|
class LLMResponse(BaseModel):
|
|
response: str
|
|
tokens_used: int
|
|
model: str
|
|
timestamp: str
|
|
|
|
@app.post("/llm/chat", response_model=LLMResponse, tags=["LLM"])
|
|
async def llm_chat(request: LLMRequest):
|
|
"""
|
|
LLM Chat endpoint - connects to OpenAI-compatible API (Open WebUI)
|
|
This endpoint is rate limited by AI token usage via API7 Gateway
|
|
"""
|
|
try:
|
|
async with httpx.AsyncClient() as client:
|
|
response = await client.post(
|
|
f"{OPENAI_API_BASE}/chat/completions",
|
|
headers={
|
|
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
|
"Content-Type": "application/json"
|
|
},
|
|
json={
|
|
"model": request.model,
|
|
"messages": [
|
|
{"role": "user", "content": request.prompt}
|
|
],
|
|
"max_tokens": request.max_tokens,
|
|
"temperature": request.temperature
|
|
},
|
|
timeout=30.0
|
|
)
|
|
response.raise_for_status()
|
|
data = response.json()
|
|
|
|
# Extract response and token usage
|
|
llm_response = data["choices"][0]["message"]["content"]
|
|
tokens_used = data.get("usage", {}).get("total_tokens", 0)
|
|
|
|
return LLMResponse(
|
|
response=llm_response,
|
|
tokens_used=tokens_used,
|
|
model=request.model,
|
|
timestamp=datetime.now().isoformat()
|
|
)
|
|
except httpx.HTTPStatusError as e:
|
|
raise HTTPException(status_code=e.response.status_code, detail=f"OpenAI API error: {e.response.text}")
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"LLM service error: {str(e)}")
|
|
|
|
@app.get("/llm/models", tags=["LLM"])
|
|
async def list_llm_models():
|
|
"""List available LLM models"""
|
|
return {
|
|
"models": [
|
|
{"id": "videogame-expert", "name": "Videogame Expert", "max_tokens": 4096, "provider": "Open WebUI"}
|
|
],
|
|
"default_model": DEFAULT_MODEL,
|
|
"timestamp": datetime.now().isoformat()
|
|
}
|
|
|
|
@app.get("/llm/health", tags=["LLM"])
|
|
async def llm_health():
|
|
"""LLM service health check"""
|
|
return {
|
|
"status": "healthy",
|
|
"service": "llm-api",
|
|
"provider": "Open WebUI",
|
|
"endpoint": OPENAI_API_BASE,
|
|
"default_model": DEFAULT_MODEL,
|
|
"rate_limit": "ai-rate-limiting enabled (100 tokens/60s)",
|
|
"timestamp": datetime.now().isoformat()
|
|
}
|
|
|
|
if __name__ == "__main__":
|
|
port = int(os.getenv("PORT", 8080))
|
|
uvicorn.run(app, host="0.0.0.0", port=port)
|