diff --git a/SCHEME.md b/SCHEME.md
index c8a4fdb..0cdb226 100644
--- a/SCHEME.md
+++ b/SCHEME.md
@@ -52,68 +52,73 @@ Schema semplificato per presentazioni executive e management.
graph TB
%% Styling
classDef infrastructure fill:#e1f5ff,stroke:#01579b,stroke-width:3px,color:#333
- classDef kafka fill:#fff3e0,stroke:#e65100,stroke-width:3px,color:#333
classDef cache fill:#f3e5f5,stroke:#4a148c,stroke-width:3px,color:#333
+ classDef change fill:#fff3e0,stroke:#e65100,stroke-width:3px,color:#333
classDef llm fill:#e8f5e9,stroke:#1b5e20,stroke-width:3px,color:#333
classDef git fill:#fce4ec,stroke:#880e4f,stroke-width:3px,color:#333
classDef human fill:#fff9c4,stroke:#f57f17,stroke-width:3px,color:#333
-
+
%% ========================================
%% FLUSSO 1: RACCOLTA DATI (Background)
%% ========================================
-
+
INFRA[("🏢 SISTEMI
INFRASTRUTTURALI
VMware | K8s | Linux | Cisco")]:::infrastructure
-
+
CONN["🔌 CONNETTORI
Polling Automatico"]:::infrastructure
-
- KAFKA[("📨 APACHE KAFKA
Message Broker
+ Persistenza")]:::kafka
-
- CONSUMER["⚙️ KAFKA CONSUMER
Processor Service"]:::kafka
-
- REDIS[("💾 REDIS CACHE
(Opzionale)
Performance Layer")]:::cache
-
+
+ REDIS[("💾 REDIS CACHE
Configurazione
Infrastruttura")]:::cache
+
INFRA -->|"API Polling
Continuo"| CONN
- CONN -->|"Publish
Eventi"| KAFKA
- KAFKA -->|"Consume
Stream"| CONSUMER
- CONSUMER -.->|"Update
Opzionale"| REDIS
-
+ CONN -->|"Update
Configurazione"| REDIS
+
%% ========================================
- %% FLUSSO 2: GENERAZIONE DOCUMENTAZIONE
+ %% CHANGE DETECTION
%% ========================================
-
- USER["👤 UTENTE
Richiesta Doc"]:::human
-
- LLM["🤖 LLM ENGINE
Claude / GPT"]:::llm
-
+
+ CHANGE["🔍 CHANGE DETECTOR
Rileva Modifiche
Configurazione"]:::change
+
+ REDIS -->|"Monitor
Changes"| CHANGE
+
+ %% ========================================
+ %% FLUSSO 2: GENERAZIONE DOCUMENTAZIONE (Triggered)
+ %% ========================================
+
+ TRIGGER["⚡ TRIGGER
Solo se modifiche"]:::change
+
+ USER["👤 UTENTE
Richiesta Manuale"]:::human
+
+ LLM["🤖 LLM ENGINE
Qwen (Locale)"]:::llm
+
MCP["🔧 MCP SERVER
API Control Platform"]:::llm
-
+
DOC["📄 DOCUMENTO
Markdown Generato"]:::llm
-
- USER -->|"1. Prompt"| LLM
- LLM -->|"2. Tool Call"| MCP
- MCP -->|"3a. Query"| KAFKA
- MCP -.->|"3b. Query
Fast"| REDIS
- KAFKA -->|"4a. Dati"| MCP
- REDIS -.->|"4b. Dati"| MCP
- MCP -->|"5. Context"| LLM
- LLM -->|"6. Genera"| DOC
-
+
+ CHANGE -->|"Modifiche
Rilevate"| TRIGGER
+ USER -.->|"Opzionale"| TRIGGER
+
+ TRIGGER -->|"Avvia
Generazione"| LLM
+ LLM -->|"Tool Call"| MCP
+ MCP -->|"Query"| REDIS
+ REDIS -->|"Dati Config"| MCP
+ MCP -->|"Context"| LLM
+ LLM -->|"Genera"| DOC
+
%% ========================================
%% FLUSSO 3: VALIDAZIONE E PUBBLICAZIONE
%% ========================================
-
+
GIT["📦 GITLAB
Repository"]:::git
-
+
PR["🔀 PULL REQUEST
Review Automatica"]:::git
-
+
TECH["👨💼 TEAM TECNICO
Validazione Umana"]:::human
-
+
PIPELINE["⚡ CI/CD PIPELINE
GitLab Runner"]:::git
-
+
MKDOCS["📚 MKDOCS
Static Site Generator"]:::git
-
+
WEB["🌐 DOCUMENTAZIONE
GitLab Pages
(Pubblicata)"]:::git
-
+
DOC -->|"Push +
Branch"| GIT
GIT -->|"Crea"| PR
PR -->|"Notifica"| TECH
@@ -121,20 +126,21 @@ graph TB
GIT -->|"Trigger"| PIPELINE
PIPELINE -->|"Build"| MKDOCS
MKDOCS -->|"Deploy"| WEB
-
+
%% ========================================
- %% ANNOTAZIONI SICUREZZA
+ %% ANNOTAZIONI
%% ========================================
-
+
SECURITY["🔒 SICUREZZA
LLM isolato dai sistemi live"]:::human
- PERF["⚡ PERFORMANCE
Cache Redis opzionale"]:::cache
-
+ EFFICIENCY["⚡ EFFICIENZA
Doc generata solo
su modifiche"]:::change
+
LLM -.->|"NESSUN
ACCESSO"| INFRA
-
+
SECURITY -.-> LLM
- PERF -.-> REDIS
+ EFFICIENCY -.-> CHANGE
```
+
---
## 🔧 Schema Tecnico
@@ -148,207 +154,216 @@ graph TB
%% Styling tecnico
classDef infra fill:#e1f5ff,stroke:#01579b,stroke-width:2px,color:#333,font-size:11px
classDef connector fill:#e3f2fd,stroke:#1565c0,stroke-width:2px,color:#333,font-size:11px
- classDef kafka fill:#fff3e0,stroke:#e65100,stroke-width:2px,color:#333,font-size:11px
classDef cache fill:#f3e5f5,stroke:#4a148c,stroke-width:2px,color:#333,font-size:11px
+ classDef change fill:#fff3e0,stroke:#e65100,stroke-width:2px,color:#333,font-size:11px
classDef llm fill:#e8f5e9,stroke:#1b5e20,stroke-width:2px,color:#333,font-size:11px
classDef git fill:#fce4ec,stroke:#880e4f,stroke-width:2px,color:#333,font-size:11px
classDef monitor fill:#fff8e1,stroke:#f57f17,stroke-width:2px,color:#333,font-size:11px
-
+
%% =====================================
%% LAYER 1: SISTEMI SORGENTE
%% =====================================
-
+
subgraph SOURCES["🏢 INFRASTRUCTURE SOURCES"]
VCENTER["VMware vCenter
API: vSphere REST 7.0+
Port: 443/HTTPS
Auth: API Token"]:::infra
K8S_API["Kubernetes API
API: v1.28+
Port: 6443/HTTPS
Auth: ServiceAccount + RBAC"]:::infra
LINUX["Linux Servers
Protocol: SSH/Ansible
Port: 22
Auth: SSH Keys"]:::infra
CISCO["Cisco Devices
Protocol: NETCONF/RESTCONF
Port: 830/443
Auth: AAA"]:::infra
end
-
+
%% =====================================
%% LAYER 2: CONNETTORI
%% =====================================
-
+
subgraph CONNECTORS["🔌 DATA COLLECTORS (Python/Go)"]
- CONN_VM["VMware Collector
Lang: Python 3.11
Lib: pyvmomi
Schedule: */15 * * * *
Output: JSON"]:::connector
-
+ CONN_VM["VMware Collector
Lang: Python 3.11
Lib: pyvmomi
Schedule: */15 * * * *
Output: JSON → Redis"]:::connector
+
CONN_K8S["K8s Collector
Lang: Python 3.11
Lib: kubernetes-client
Schedule: */5 * * * *
Resources: pods,svc,ing,deploy"]:::connector
-
+
CONN_LNX["Linux Collector
Lang: Python 3.11
Lib: paramiko/ansible
Schedule: */30 * * * *
Data: sysinfo,packages,services"]:::connector
-
+
CONN_CSC["Cisco Collector
Lang: Python 3.11
Lib: ncclient
Schedule: */30 * * * *
Data: interfaces,routing,vlans"]:::connector
end
-
+
VCENTER -->|"GET /api/vcenter/vm"| CONN_VM
K8S_API -->|"kubectl proxy
API calls"| CONN_K8S
LINUX -->|"SSH batch
commands"| CONN_LNX
CISCO -->|"NETCONF
get-config"| CONN_CSC
-
+
%% =====================================
- %% LAYER 3: MESSAGE BROKER
+ %% LAYER 3: REDIS STORAGE
%% =====================================
-
- subgraph MESSAGING["📨 KAFKA CLUSTER (3 brokers)"]
- KAFKA_TOPICS["Kafka Topics:
• vmware.inventory (P:6, R:3)
• k8s.resources (P:12, R:3)
• linux.systems (P:3, R:3)
• cisco.network (P:3, R:3)
Retention: 7 days
Format: JSON + Schema Registry"]:::kafka
-
- SCHEMA["Schema Registry
Avro Schemas
Versioning enabled
Port: 8081"]:::kafka
- end
-
- CONN_VM -->|"Producer
Batch 100 msg"| KAFKA_TOPICS
- CONN_K8S -->|"Producer
Batch 100 msg"| KAFKA_TOPICS
- CONN_LNX -->|"Producer
Batch 50 msg"| KAFKA_TOPICS
- CONN_CSC -->|"Producer
Batch 50 msg"| KAFKA_TOPICS
-
- KAFKA_TOPICS <--> SCHEMA
-
- %% =====================================
- %% LAYER 4: PROCESSING & CACHE
- %% =====================================
-
- subgraph PROCESSING["⚙️ STREAM PROCESSING"]
- CONSUMER_GRP["Kafka Consumer Group
Group ID: doc-consumers
Lang: Python 3.11
Lib: kafka-python
Workers: 6
Commit: auto (5s)"]:::kafka
-
- PROCESSOR["Data Processor
• Validation
• Transformation
• Enrichment
• Deduplication"]:::kafka
- end
-
- KAFKA_TOPICS -->|"Subscribe
offset management"| CONSUMER_GRP
- CONSUMER_GRP --> PROCESSOR
-
- subgraph STORAGE["💾 CACHE LAYER (Optional)"]
+
+ subgraph STORAGE["💾 REDIS CLUSTER"]
REDIS_CLUSTER["Redis Cluster
Mode: Cluster (6 nodes)
Port: 6379
Persistence: RDB + AOF
Memory: 64GB
Eviction: allkeys-lru"]:::cache
-
- REDIS_KEYS["Key Structure:
• vmware:vcenter-id:vms
• k8s:cluster:namespace:resource
• linux:hostname:info
• cisco:device-id:config
TTL: 1-24h based on type"]:::cache
+
+ REDIS_KEYS["Key Structure:
• vmware:vcenter-id:vms:hash
• k8s:cluster:namespace:resource:hash
• linux:hostname:info:hash
• cisco:device-id:config:hash
• changelog:timestamp:diff
TTL: 30d for data, 90d for changelog"]:::cache
end
-
- PROCESSOR -.->|"SET/HSET
Pipeline batch"| REDIS_CLUSTER
+
+ CONN_VM -->|"HSET/HMSET
+ Hash Storage"| REDIS_CLUSTER
+ CONN_K8S -->|"HSET/HMSET
+ Hash Storage"| REDIS_CLUSTER
+ CONN_LNX -->|"HSET/HMSET
+ Hash Storage"| REDIS_CLUSTER
+ CONN_CSC -->|"HSET/HMSET
+ Hash Storage"| REDIS_CLUSTER
+
REDIS_CLUSTER --> REDIS_KEYS
-
+
%% =====================================
- %% LAYER 5: LLM & MCP
+ %% LAYER 4: CHANGE DETECTION
%% =====================================
-
- subgraph LLM_LAYER["🤖 AI GENERATION LAYER"]
- LLM_ENGINE["LLM Engine
Model: Claude Sonnet 4 / GPT-4
API: Anthropic/OpenAI
Temp: 0.3
Max Tokens: 4096
Timeout: 120s"]:::llm
-
- MCP_SERVER["MCP Server
Lang: TypeScript/Node.js
Port: 3000
Protocol: JSON-RPC 2.0
Auth: JWT tokens"]:::llm
-
- MCP_TOOLS["MCP Tools:
• getVMwareInventory(vcenter)
• getK8sResources(cluster,ns,type)
• getLinuxSystemInfo(hostname)
• getCiscoConfig(device,section)
• queryTimeRange(start,end)
Return: JSON + Metadata"]:::llm
+
+ subgraph CHANGE_DETECTION["🔍 CHANGE DETECTION SYSTEM"]
+ DETECTOR["Change Detector Service
Lang: Python 3.11
Lib: redis-py
Algorithm: Hash comparison
Check interval: */5 * * * *"]:::change
+
+ DIFF_ENGINE["Diff Engine
• Deep object comparison
• JSON diff generation
• Change classification
• Severity assessment"]:::change
+
+ CHANGE_LOG["Change Log Store
Key: changelog:*
Data: diff JSON + metadata
Indexed by: timestamp, resource"]:::change
+
+ NOTIFIER["Change Notifier
• Webhook triggers
• Slack notifications
• Event emission
Target: LLM trigger"]:::change
end
-
+
+ REDIS_CLUSTER -->|"Monitor
key changes"| DETECTOR
+ DETECTOR --> DIFF_ENGINE
+ DIFF_ENGINE -->|"Store diff"| CHANGE_LOG
+ CHANGE_LOG --> REDIS_CLUSTER
+ DIFF_ENGINE -->|"Notify if
significant"| NOTIFIER
+
+ %% =====================================
+ %% LAYER 5: LLM TRIGGER & GENERATION
+ %% =====================================
+
+ subgraph TRIGGER_SYSTEM["⚡ TRIGGER SYSTEM"]
+ TRIGGER_SVC["Trigger Service
Lang: Python 3.11
Listen: Webhook + Redis Pub/Sub
Debounce: 5 min
Batch: multiple changes"]:::change
+
+ QUEUE["Generation Queue
Type: Redis List
Priority: High/Medium/Low
Processing: FIFO"]:::change
+ end
+
+ NOTIFIER -->|"Trigger event"| TRIGGER_SVC
+ TRIGGER_SVC -->|"Enqueue
generation task"| QUEUE
+
+ subgraph LLM_LAYER["🤖 AI GENERATION LAYER"]
+ LLM_ENGINE["LLM Engine
Model: Qwen (Locale)
API: Ollama/vLLM/LM Studio
Port: 11434
Temp: 0.3
Max Tokens: 4096
Timeout: 120s"]:::llm
+
+ MCP_SERVER["MCP Server
Lang: TypeScript/Node.js
Port: 3000
Protocol: JSON-RPC 2.0
Auth: JWT tokens"]:::llm
+
+ MCP_TOOLS["MCP Tools:
• getVMwareInventory(vcenter)
• getK8sResources(cluster,ns,type)
• getLinuxSystemInfo(hostname)
• getCiscoConfig(device,section)
• getChangelog(start,end,resource)
Return: JSON + Metadata"]:::llm
+ end
+
+ QUEUE -->|"Dequeue
task"| LLM_ENGINE
+
LLM_ENGINE <-->|"Tool calls
JSON-RPC"| MCP_SERVER
MCP_SERVER --> MCP_TOOLS
-
- MCP_TOOLS -->|"1. Query Kafka Consumer API
GET /api/v1/data"| CONSUMER_GRP
- MCP_TOOLS -.->|"2. Fallback Redis
MGET/HGETALL"| REDIS_CLUSTER
-
- CONSUMER_GRP -->|"JSON Response
+ Timestamps"| MCP_TOOLS
- REDIS_CLUSTER -.->|"Cached JSON
Fast response"| MCP_TOOLS
-
+
+ MCP_TOOLS -->|"HGETALL/MGET
Read data"| REDIS_CLUSTER
+ REDIS_CLUSTER -->|"Config data
+ Changelog"| MCP_TOOLS
MCP_TOOLS -->|"Structured Data
+ Context"| LLM_ENGINE
-
+
subgraph OUTPUT["📝 DOCUMENT GENERATION"]
TEMPLATE["Template Engine
Format: Jinja2
Templates: markdown/*.j2
Variables: from LLM"]:::llm
-
- MARKDOWN["Markdown Output
Format: CommonMark
Metadata: YAML frontmatter
Assets: diagrams in mermaid"]:::llm
-
- VALIDATOR["Doc Validator
• Markdown linting
• Link checking
• Schema validation"]:::llm
+
+ MARKDOWN["Markdown Output
Format: CommonMark
Metadata: YAML frontmatter
Change summary included
Assets: diagrams in mermaid"]:::llm
+
+ VALIDATOR["Doc Validator
• Markdown linting
• Link checking
• Schema validation
• Change verification"]:::llm
end
-
+
LLM_ENGINE --> TEMPLATE
TEMPLATE --> MARKDOWN
MARKDOWN --> VALIDATOR
-
+
%% =====================================
%% LAYER 6: GITOPS
%% =====================================
-
+
subgraph GITOPS["🔄 GITOPS WORKFLOW"]
GIT_REPO["GitLab Repository
URL: gitlab.com/docs/infra
Branch strategy: main + feature/*
Protected: main (require approval)"]:::git
-
+
GIT_API["GitLab API
API: v4
Auth: Project Access Token
Permissions: api, write_repo"]:::git
-
- PR_AUTO["Automated PR Creator
Lang: Python 3.11
Lib: python-gitlab
Template: .gitlab/merge_request.md"]:::git
+
+ PR_AUTO["Automated PR Creator
Lang: Python 3.11
Lib: python-gitlab
Template: .gitlab/merge_request.md
Include: change summary"]:::git
end
-
+
VALIDATOR -->|"git add/commit/push"| GIT_REPO
GIT_REPO <--> GIT_API
GIT_API --> PR_AUTO
-
- REVIEWER["👨💼 Technical Reviewer
Role: Maintainer/Owner
Review: diff + validation
Approve: required (min 1)"]:::monitor
-
+
+ REVIEWER["👨💼 Technical Reviewer
Role: Maintainer/Owner
Review: diff + validation
Check: change correlation
Approve: required (min 1)"]:::monitor
+
PR_AUTO -->|"Notification
Email + Slack"| REVIEWER
REVIEWER -->|"Merge to main"| GIT_REPO
-
+
%% =====================================
%% LAYER 7: CI/CD & PUBLISH
%% =====================================
-
+
subgraph CICD["⚡ CI/CD PIPELINE"]
GITLAB_CI["GitLab CI/CD
Runner: docker
Image: python:3.11-alpine
Stages: build, test, deploy"]:::git
-
+
PIPELINE_JOBS["Pipeline Jobs:
1. lint (markdownlint-cli)
2. build (mkdocs build)
3. test (link-checker)
4. deploy (rsync/s3)"]:::git
-
+
MKDOCS_CFG["MkDocs Config
Theme: material
Plugins: search, tags, mermaid
Extensions: admonition, codehilite"]:::git
end
-
+
GIT_REPO -->|"on: push to main
Webhook trigger"| GITLAB_CI
GITLAB_CI --> PIPELINE_JOBS
PIPELINE_JOBS --> MKDOCS_CFG
-
+
subgraph PUBLISH["🌐 PUBLICATION"]
STATIC_SITE["Static Site
Generator: MkDocs
Output: HTML/CSS/JS
Assets: optimized images"]:::git
-
+
CDN["GitLab Pages / S3 + CloudFront
URL: docs.company.com
SSL: Let's Encrypt
Cache: 1h"]:::git
-
+
SEARCH["Search Index
Engine: Algolia/Meilisearch
Update: on publish
API: REST"]:::git
end
-
+
MKDOCS_CFG -->|"mkdocs build
--strict"| STATIC_SITE
STATIC_SITE --> CDN
STATIC_SITE --> SEARCH
-
+
%% =====================================
%% LAYER 8: MONITORING & OBSERVABILITY
%% =====================================
-
+
subgraph OBSERVABILITY["📊 MONITORING & LOGGING"]
- PROMETHEUS["Prometheus
Metrics: collector lag, cache hit/miss
Scrape: 30s
Retention: 15d"]:::monitor
-
- GRAFANA["Grafana Dashboards
• Kafka metrics
• Redis performance
• LLM response times
• Pipeline success rate"]:::monitor
-
+ PROMETHEUS["Prometheus
Metrics: collector updates, changes detected
Scrape: 30s
Retention: 15d"]:::monitor
+
+ GRAFANA["Grafana Dashboards
• Collector status
• Redis performance
• Change detection rate
• LLM response times
• Pipeline success rate"]:::monitor
+
ELK["ELK Stack
Logs: all components
Index: daily rotation
Retention: 30d"]:::monitor
-
- ALERTS["Alerting
• Connector failures
• Kafka lag > 10k
• Redis OOM
• Pipeline failures
Channel: Slack + PagerDuty"]:::monitor
+
+ ALERTS["Alerting
• Collector failures
• Redis issues
• Change detection errors
• Pipeline failures
Channel: Slack + PagerDuty"]:::monitor
end
-
+
CONN_VM -.->|"metrics"| PROMETHEUS
CONN_K8S -.->|"metrics"| PROMETHEUS
- KAFKA_TOPICS -.->|"metrics"| PROMETHEUS
REDIS_CLUSTER -.->|"metrics"| PROMETHEUS
+ DETECTOR -.->|"metrics"| PROMETHEUS
MCP_SERVER -.->|"metrics"| PROMETHEUS
GITLAB_CI -.->|"metrics"| PROMETHEUS
-
+
PROMETHEUS --> GRAFANA
-
+
CONN_VM -.->|"logs"| ELK
- CONSUMER_GRP -.->|"logs"| ELK
+ DETECTOR -.->|"logs"| ELK
MCP_SERVER -.->|"logs"| ELK
GITLAB_CI -.->|"logs"| ELK
-
+
GRAFANA --> ALERTS
-
+
%% =====================================
- %% SECURITY ANNOTATIONS
+ %% SECURITY & EFFICIENCY ANNOTATIONS
%% =====================================
-
+
SEC1["🔒 SECURITY:
• All APIs use TLS 1.3
• Secrets in Vault/K8s Secrets
• Network: private VPC
• LLM has NO direct access"]:::monitor
-
+
SEC2["🔐 AUTHENTICATION:
• API Tokens rotated 90d
• RBAC enforced
• Audit logs enabled
• MFA required for Git"]:::monitor
-
+
+ EFF1["⚡ EFFICIENCY:
• Doc generation only on changes
• Debounce prevents spam
• Hash-based change detection
• Batch processing"]:::change
+
SEC1 -.-> MCP_SERVER
SEC2 -.-> GIT_REPO
+ EFF1 -.-> DETECTOR
```
+
---
## 💬 Sistema RAG Conversazionale