petter2025 commited on
Commit
642b6b3
·
verified ·
1 Parent(s): c07a736

Update hf_demo.py

Browse files
Files changed (1) hide show
  1. hf_demo.py +27 -1047
hf_demo.py CHANGED
@@ -1,1061 +1,41 @@
1
- """
2
- ARF OSS v3.3.9 - Enterprise Reliability Engine (Backend API only)
3
- With integrated Infrastructure Governance Module
4
- """
5
-
6
- import os
7
- import sys
8
- import json
9
- import uuid
10
- import hashlib
11
- import logging
12
- import sqlite3
13
- import requests
14
- from contextlib import contextmanager
15
- from datetime import datetime
16
- from enum import Enum
17
- from typing import Dict, List, Optional, Any, Tuple
18
-
19
- import yaml
20
- import numpy as np
21
- from fastapi import FastAPI, HTTPException, Depends, status
22
  from fastapi.middleware.cors import CORSMiddleware
23
- from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
24
- from pydantic import BaseModel, Field, field_validator
25
- from pydantic_settings import BaseSettings, SettingsConfigDict
26
-
27
- # ============== INFRASTRUCTURE MODULE IMPORTS ==============
28
- from infrastructure import (
29
- AzureInfrastructureSimulator,
30
- RegionAllowedPolicy,
31
- CostThresholdPolicy,
32
- ProvisionResourceIntent,
33
- DeployConfigurationIntent,
34
- GrantAccessIntent,
35
- ResourceType,
36
- Environment,
37
- RecommendedAction,
38
- )
39
-
40
- # ============== HMC LEARNER IMPORT ==============
41
- from hmc_learner import train_hmc_model # new import
42
-
43
- # ============== CONFIGURATION (Pydantic V2) ==============
44
- class Settings(BaseSettings):
45
- """Application settings loaded from environment variables."""
46
- hf_space_id: str = Field(default='local', alias='SPACE_ID')
47
- hf_token: str = Field(default='', alias='HF_TOKEN')
48
- data_dir: str = Field(
49
- default='/data' if os.path.exists('/data') else './data',
50
- alias='DATA_DIR'
51
- )
52
- lead_email: str = "petter2025us@outlook.com"
53
- calendly_url: str = "https://calendly.com/petter2025us/arf-demo"
54
- slack_webhook: str = Field(default='', alias='SLACK_WEBHOOK')
55
- sendgrid_api_key: str = Field(default='', alias='SENDGRID_API_KEY')
56
- api_key: str = Field(
57
- default_factory=lambda: str(uuid.uuid4()),
58
- alias='ARF_API_KEY'
59
- )
60
- default_confidence_threshold: float = 0.9
61
- default_max_risk: str = "MEDIUM"
62
-
63
- model_config = SettingsConfigDict(
64
- populate_by_name=True,
65
- extra='ignore',
66
- env_prefix='',
67
- case_sensitive=False
68
- )
69
-
70
- def __init__(self, **kwargs):
71
- super().__init__(**kwargs)
72
- os.makedirs(self.data_dir, exist_ok=True)
73
-
74
- settings = Settings()
75
-
76
- # ============== LOGGING ==============
77
- logging.basicConfig(
78
- level=logging.INFO,
79
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
80
- handlers=[
81
- logging.FileHandler(f'{settings.data_dir}/arf.log'),
82
- logging.StreamHandler()
83
- ]
84
- )
85
- logger = logging.getLogger('arf.oss')
86
-
87
- # ============== ENUMS (original ARF) ==============
88
- class RiskLevel(str, Enum):
89
- LOW = "LOW"
90
- MEDIUM = "MEDIUM"
91
- HIGH = "HIGH"
92
- CRITICAL = "CRITICAL"
93
-
94
- class ExecutionLevel(str, Enum):
95
- AUTONOMOUS_LOW = "AUTONOMOUS_LOW"
96
- AUTONOMOUS_HIGH = "AUTONOMOUS_HIGH"
97
- SUPERVISED = "SUPERVISED"
98
- OPERATOR_REVIEW = "OPERATOR_REVIEW"
99
-
100
- class LeadSignal(str, Enum):
101
- HIGH_RISK_BLOCKED = "high_risk_blocked"
102
- NOVEL_ACTION = "novel_action"
103
- POLICY_VIOLATION = "policy_violation"
104
- CONFIDENCE_LOW = "confidence_low"
105
- REPEATED_FAILURE = "repeated_failure"
106
-
107
- # ============== ORIGINAL ARF COMPONENTS ==============
108
- class BayesianRiskEngine:
109
- """True Bayesian inference with conjugate priors."""
110
- def __init__(self):
111
- self.prior_alpha = 2.0
112
- self.prior_beta = 5.0
113
- self.action_priors = {
114
- 'database': {'alpha': 1.5, 'beta': 8.0},
115
- 'network': {'alpha': 3.0, 'beta': 4.0},
116
- 'compute': {'alpha': 4.0, 'beta': 3.0},
117
- 'security': {'alpha': 2.0, 'beta': 6.0},
118
- 'default': {'alpha': 2.0, 'beta': 5.0}
119
- }
120
- self.evidence_db = f"{settings.data_dir}/evidence.db"
121
- self._init_db()
122
-
123
- def _init_db(self):
124
- try:
125
- with self._get_db() as conn:
126
- conn.execute('''
127
- CREATE TABLE IF NOT EXISTS evidence (
128
- id TEXT PRIMARY KEY,
129
- action_type TEXT,
130
- action_hash TEXT,
131
- success INTEGER,
132
- total INTEGER,
133
- timestamp TEXT,
134
- metadata TEXT
135
- )
136
- ''')
137
- conn.execute('CREATE INDEX IF NOT EXISTS idx_action_hash ON evidence(action_hash)')
138
- except sqlite3.Error as e:
139
- logger.error(f"Failed to initialize evidence database: {e}")
140
- raise RuntimeError("Could not initialize evidence storage") from e
141
-
142
- @contextmanager
143
- def _get_db(self):
144
- conn = None
145
- try:
146
- conn = sqlite3.connect(self.evidence_db)
147
- yield conn
148
- except sqlite3.Error as e:
149
- logger.error(f"Database error: {e}")
150
- raise
151
- finally:
152
- if conn:
153
- conn.close()
154
-
155
- def classify_action(self, action_text: str) -> str:
156
- action_lower = action_text.lower()
157
- if any(word in action_lower for word in ['database', 'db', 'sql', 'table', 'drop', 'delete']):
158
- return 'database'
159
- elif any(word in action_lower for word in ['network', 'firewall', 'load balancer']):
160
- return 'network'
161
- elif any(word in action_lower for word in ['pod', 'container', 'deploy', 'scale']):
162
- return 'compute'
163
- elif any(word in action_lower for word in ['security', 'cert', 'key', 'access']):
164
- return 'security'
165
- else:
166
- return 'default'
167
-
168
- def get_prior(self, action_type: str) -> Tuple[float, float]:
169
- prior = self.action_priors.get(action_type, self.action_priors['default'])
170
- return prior['alpha'], prior['beta']
171
-
172
- def get_evidence(self, action_hash: str) -> Tuple[int, int]:
173
- try:
174
- with self._get_db() as conn:
175
- cursor = conn.execute(
176
- 'SELECT SUM(success), SUM(total) FROM evidence WHERE action_hash = ?',
177
- (action_hash[:50],)
178
- )
179
- row = cursor.fetchone()
180
- return (row[0] or 0, row[1] or 0) if row else (0, 0)
181
- except sqlite3.Error as e:
182
- logger.error(f"Failed to retrieve evidence: {e}")
183
- return (0, 0)
184
-
185
- def calculate_posterior(self, action_text: str, context: Dict[str, Any]) -> Dict[str, Any]:
186
- action_type = self.classify_action(action_text)
187
- alpha0, beta0 = self.get_prior(action_type)
188
- action_hash = hashlib.sha256(action_text.encode()).hexdigest()
189
- successes, trials = self.get_evidence(action_hash)
190
- alpha_n = alpha0 + successes
191
- beta_n = beta0 + (trials - successes)
192
- posterior_mean = alpha_n / (alpha_n + beta_n)
193
- context_multiplier = self._context_likelihood(context)
194
- risk_score = posterior_mean * context_multiplier
195
- risk_score = min(0.99, max(0.01, risk_score))
196
-
197
- variance = (alpha_n * beta_n) / ((alpha_n + beta_n)**2 * (alpha_n + beta_n + 1))
198
- std_dev = variance ** 0.5
199
- ci_lower = max(0.01, posterior_mean - 1.96 * std_dev)
200
- ci_upper = min(0.99, posterior_mean + 1.96 * std_dev)
201
-
202
- if risk_score > 0.8:
203
- risk_level = RiskLevel.CRITICAL
204
- elif risk_score > 0.6:
205
- risk_level = RiskLevel.HIGH
206
- elif risk_score > 0.4:
207
- risk_level = RiskLevel.MEDIUM
208
- else:
209
- risk_level = RiskLevel.LOW
210
-
211
- return {
212
- "score": risk_score,
213
- "level": risk_level,
214
- "credible_interval": [ci_lower, ci_upper],
215
- "posterior_parameters": {"alpha": alpha_n, "beta": beta_n},
216
- "prior_used": {"alpha": alpha0, "beta": beta0, "type": action_type},
217
- "evidence_used": {"successes": successes, "trials": trials},
218
- "context_multiplier": context_multiplier,
219
- "calculation": f"""
220
- Posterior = Beta(α={alpha_n:.1f}, β={beta_n:.1f})
221
- Mean = {alpha_n:.1f} / ({alpha_n:.1f} + {beta_n:.1f}) = {posterior_mean:.3f}
222
- × Context multiplier {context_multiplier:.2f} = {risk_score:.3f}
223
- """
224
- }
225
-
226
- def _context_likelihood(self, context: Dict) -> float:
227
- multiplier = 1.0
228
- if context.get('environment') == 'production':
229
- multiplier *= 1.5
230
- elif context.get('environment') == 'staging':
231
- multiplier *= 0.8
232
- hour = datetime.now().hour
233
- if hour < 6 or hour > 22:
234
- multiplier *= 1.3
235
- if context.get('user_role') == 'junior':
236
- multiplier *= 1.4
237
- elif context.get('user_role') == 'senior':
238
- multiplier *= 0.9
239
- if not context.get('backup_available', True):
240
- multiplier *= 1.6
241
- return multiplier
242
-
243
- def record_outcome(self, action_text: str, success: bool):
244
- action_hash = hashlib.sha256(action_text.encode()).hexdigest()
245
- action_type = self.classify_action(action_text)
246
- try:
247
- with self._get_db() as conn:
248
- conn.execute('''
249
- INSERT INTO evidence (id, action_type, action_hash, success, total, timestamp)
250
- VALUES (?, ?, ?, ?, ?, ?)
251
- ''', (
252
- str(uuid.uuid4()),
253
- action_type,
254
- action_hash[:50],
255
- 1 if success else 0,
256
- 1,
257
- datetime.utcnow().isoformat()
258
- ))
259
- conn.commit()
260
- logger.info(f"Recorded outcome for {action_type}: success={success}")
261
- except sqlite3.Error as e:
262
- logger.error(f"Failed to record outcome: {e}")
263
-
264
- # ---------- ENHANCED RISK USING HMC COEFFICIENTS ----------
265
- def enhanced_risk(self, action_text: str, context: Dict, hmc_coeffs: Optional[Dict] = None) -> float:
266
- """
267
- Compute a risk score using HMC coefficients if available.
268
- Falls back to simple posterior score if no coefficients.
269
- """
270
- if hmc_coeffs is None:
271
- return self.calculate_posterior(action_text, context)["score"]
272
-
273
- # Build feature vector (same as in hmc_learner preprocessing)
274
- action_cat = self.classify_action(action_text)
275
- # Map category to code using saved mapping (if present)
276
- cat_mapping = hmc_coeffs.get("action_cat_mapping", {})
277
- # Invert mapping (category -> code)
278
- cat_to_code = {v: k for k, v in cat_mapping.items()}
279
- cat_code = cat_to_code.get(action_cat, 0) # default to 0 if not found
280
-
281
- env_prod = 1 if context.get('environment') == 'production' else 0
282
- role_junior = 1 if context.get('user_role') == 'junior' else 0
283
- hour = datetime.now().hour
284
- hour_sin = np.sin(2 * np.pi * hour / 24)
285
- hour_cos = np.cos(2 * np.pi * hour / 24)
286
-
287
- # Use the simple posterior risk as a feature (centered)
288
- simple_risk = self.calculate_posterior(action_text, context)["score"]
289
- confidence = context.get('confidence', 0.85)
290
-
291
- # Linear predictor from HMC coefficients
292
- logit = (
293
- hmc_coeffs.get('α_cat', {}).get('mean', [0])[cat_code] +
294
- hmc_coeffs.get('β_env', {}).get('mean', 0) * env_prod +
295
- hmc_coeffs.get('β_role', {}).get('mean', 0) * role_junior +
296
- hmc_coeffs.get('β_risk', {}).get('mean', 0) * (simple_risk - 0.5) +
297
- hmc_coeffs.get('β_hour_sin', {}).get('mean', 0) * hour_sin +
298
- hmc_coeffs.get('β_hour_cos', {}).get('mean', 0) * hour_cos +
299
- hmc_coeffs.get('β_conf', {}).get('mean', 0) * (confidence - 0.5)
300
- )
301
- # Convert to probability
302
- prob = 1 / (1 + np.exp(-logit))
303
- return prob
304
-
305
-
306
- class PolicyEngine:
307
- """Deterministic OSS policies – advisory only."""
308
- def __init__(self):
309
- self.config = {
310
- "confidence_threshold": settings.default_confidence_threshold,
311
- "max_autonomous_risk": settings.default_max_risk,
312
- "risk_thresholds": {
313
- RiskLevel.LOW: 0.7,
314
- RiskLevel.MEDIUM: 0.5,
315
- RiskLevel.HIGH: 0.3,
316
- RiskLevel.CRITICAL: 0.1
317
- },
318
- "destructive_patterns": [
319
- r'\bdrop\s+database\b',
320
- r'\bdelete\s+from\b',
321
- r'\btruncate\b',
322
- r'\balter\s+table\b',
323
- r'\bdrop\s+table\b',
324
- r'\bshutdown\b',
325
- r'\bterminate\b',
326
- r'\brm\s+-rf\b'
327
- ],
328
- "require_human": [RiskLevel.CRITICAL, RiskLevel.HIGH],
329
- "require_rollback": True
330
- }
331
-
332
- def evaluate(self, action: str, risk: Dict[str, Any], confidence: float) -> Dict[str, Any]:
333
- import re
334
- gates = []
335
-
336
- confidence_passed = confidence >= self.config["confidence_threshold"]
337
- gates.append({
338
- "gate": "confidence_threshold",
339
- "passed": confidence_passed,
340
- "threshold": self.config["confidence_threshold"],
341
- "actual": confidence,
342
- "reason": f"Confidence {confidence:.2f} {'≥' if confidence_passed else '<'} threshold {self.config['confidence_threshold']}",
343
- "type": "numerical"
344
- })
345
-
346
- risk_levels = list(RiskLevel)
347
- max_idx = risk_levels.index(RiskLevel(self.config["max_autonomous_risk"]))
348
- action_idx = risk_levels.index(risk["level"])
349
- risk_passed = action_idx <= max_idx
350
- gates.append({
351
- "gate": "risk_assessment",
352
- "passed": risk_passed,
353
- "max_allowed": self.config["max_autonomous_risk"],
354
- "actual": risk["level"].value,
355
- "reason": f"Risk level {risk['level'].value} {'≤' if risk_passed else '>'} max autonomous {self.config['max_autonomous_risk']}",
356
- "type": "categorical",
357
- "metadata": {"risk_score": risk["score"], "credible_interval": risk["credible_interval"]}
358
- })
359
-
360
- is_destructive = any(re.search(pattern, action.lower()) for pattern in self.config["destructive_patterns"])
361
- gates.append({
362
- "gate": "destructive_check",
363
- "passed": not is_destructive,
364
- "is_destructive": is_destructive,
365
- "reason": "Non-destructive operation" if not is_destructive else "Destructive operation detected",
366
- "type": "boolean",
367
- "metadata": {"requires_rollback": is_destructive}
368
- })
369
-
370
- requires_human = risk["level"] in self.config["require_human"]
371
- gates.append({
372
- "gate": "human_review",
373
- "passed": not requires_human,
374
- "requires_human": requires_human,
375
- "reason": "Human review not required" if not requires_human else f"Human review required for {risk['level'].value} risk",
376
- "type": "boolean"
377
- })
378
-
379
- gates.append({
380
- "gate": "license_check",
381
- "passed": True,
382
- "edition": "OSS",
383
- "reason": "OSS edition - advisory only",
384
- "type": "license"
385
- })
386
-
387
- all_passed = all(g["passed"] for g in gates)
388
-
389
- if not all_passed:
390
- required_level = ExecutionLevel.OPERATOR_REVIEW
391
- elif risk["level"] == RiskLevel.LOW:
392
- required_level = ExecutionLevel.AUTONOMOUS_LOW
393
- elif risk["level"] == RiskLevel.MEDIUM:
394
- required_level = ExecutionLevel.AUTONOMOUS_HIGH
395
- else:
396
- required_level = ExecutionLevel.SUPERVISED
397
-
398
- return {
399
- "allowed": all_passed,
400
- "required_level": required_level.value,
401
- "gates": gates,
402
- "advisory_only": True,
403
- "oss_disclaimer": "OSS edition provides advisory only. Enterprise adds execution."
404
- }
405
-
406
- def update_config(self, key: str, value: Any):
407
- if key in self.config:
408
- self.config[key] = value
409
- logger.info(f"Policy updated: {key} = {value}")
410
- return True
411
- return False
412
-
413
- # ==============================================================================
414
- # UPGRADED RAG MEMORY WITH SENTENCE-TRANSFORMERS
415
- # ==============================================================================
416
- class RAGMemory:
417
- """Persistent RAG memory with SQLite and sentence‑transformer embeddings."""
418
- def __init__(self):
419
- self.db_path = f"{settings.data_dir}/memory.db"
420
- self._init_db()
421
- self.embedding_cache = {}
422
- self._sentence_model = None # lazy loaded
423
-
424
- def _get_sentence_model(self):
425
- """Lazy load the sentence‑transformer model."""
426
- if self._sentence_model is None:
427
- from sentence_transformers import SentenceTransformer
428
- # Using all-MiniLM-L6-v2 – fast and good for semantic similarity
429
- self._sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
430
- return self._sentence_model
431
-
432
- def _build_incident_text(self, action: str) -> str:
433
- """Create a descriptive text from the action."""
434
- # You can enrich this with more context (risk level, component, etc.)
435
- return f"Action: {action}"
436
-
437
- def _simple_embedding(self, text: str) -> List[float]:
438
- """Generate embedding using sentence‑transformer."""
439
- if text in self.embedding_cache:
440
- return self.embedding_cache[text]
441
 
442
- model = self._get_sentence_model()
443
- # encode returns a numpy array; convert to list for JSON storage
444
- embedding = model.encode(text, convert_to_numpy=True).tolist()
445
- self.embedding_cache[text] = embedding
446
- return embedding
447
-
448
- def _ensure_columns(self, conn, columns):
449
- """Add columns to incidents table if they do not exist."""
450
- cursor = conn.execute("PRAGMA table_info(incidents)")
451
- existing = [row[1] for row in cursor.fetchall()]
452
- for col_name, col_type in columns:
453
- if col_name not in existing:
454
- try:
455
- conn.execute(f"ALTER TABLE incidents ADD COLUMN {col_name} {col_type}")
456
- logger.info(f"Added column {col_name} to incidents table")
457
- except sqlite3.Error as e:
458
- logger.error(f"Failed to add column {col_name}: {e}")
459
-
460
- def _init_db(self):
461
- try:
462
- with self._get_db() as conn:
463
- conn.execute('''
464
- CREATE TABLE IF NOT EXISTS incidents (
465
- id TEXT PRIMARY KEY,
466
- action TEXT,
467
- action_hash TEXT,
468
- risk_score REAL,
469
- risk_level TEXT,
470
- confidence REAL,
471
- allowed BOOLEAN,
472
- gates TEXT,
473
- timestamp TEXT,
474
- embedding TEXT
475
- )
476
- ''')
477
- # Add new columns if they don't exist
478
- self._ensure_columns(conn, [
479
- ('environment', 'TEXT'),
480
- ('user_role', 'TEXT'),
481
- ('requires_human', 'BOOLEAN'),
482
- ('rollback_feasible', 'BOOLEAN'),
483
- ('hour_of_day', 'INTEGER'),
484
- ('action_category', 'TEXT')
485
- ])
486
- conn.execute('''
487
- CREATE TABLE IF NOT EXISTS signals (
488
- id TEXT PRIMARY KEY,
489
- signal_type TEXT,
490
- action TEXT,
491
- risk_score REAL,
492
- metadata TEXT,
493
- timestamp TEXT,
494
- contacted BOOLEAN DEFAULT 0
495
- )
496
- ''')
497
- conn.execute('CREATE INDEX IF NOT EXISTS idx_action_hash ON incidents(action_hash)')
498
- conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_type ON signals(signal_type)')
499
- conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_contacted ON signals(contacted)')
500
- except sqlite3.Error as e:
501
- logger.error(f"Failed to initialize memory database: {e}")
502
- raise RuntimeError("Could not initialize memory storage") from e
503
-
504
- @contextmanager
505
- def _get_db(self):
506
- conn = None
507
- try:
508
- conn = sqlite3.connect(self.db_path)
509
- conn.row_factory = sqlite3.Row
510
- yield conn
511
- except sqlite3.Error as e:
512
- logger.error(f"Database error in memory: {e}")
513
- raise
514
- finally:
515
- if conn:
516
- conn.close()
517
-
518
- def store_incident(self, action: str, risk_score: float, risk_level: RiskLevel,
519
- confidence: float, allowed: bool, gates: List[Dict],
520
- environment: str, user_role: str, requires_human: bool,
521
- rollback_feasible: bool, hour_of_day: int, action_category: str):
522
- action_hash = hashlib.sha256(action.encode()).hexdigest()[:50]
523
- incident_text = self._build_incident_text(action)
524
- embedding = json.dumps(self._simple_embedding(incident_text))
525
- try:
526
- with self._get_db() as conn:
527
- conn.execute('''
528
- INSERT INTO incidents
529
- (id, action, action_hash, risk_score, risk_level, confidence, allowed, gates, timestamp, embedding,
530
- environment, user_role, requires_human, rollback_feasible, hour_of_day, action_category)
531
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
532
- ''', (
533
- str(uuid.uuid4()),
534
- action[:500],
535
- action_hash,
536
- risk_score,
537
- risk_level.value,
538
- confidence,
539
- 1 if allowed else 0,
540
- json.dumps(gates),
541
- datetime.utcnow().isoformat(),
542
- embedding,
543
- environment,
544
- user_role,
545
- 1 if requires_human else 0,
546
- 1 if rollback_feasible else 0,
547
- hour_of_day,
548
- action_category
549
- ))
550
- conn.commit()
551
- except sqlite3.Error as e:
552
- logger.error(f"Failed to store incident: {e}")
553
-
554
- def find_similar(self, action: str, limit: int = 5) -> List[Dict]:
555
- # Build query embedding from the action text
556
- query_text = self._build_incident_text(action)
557
- query_embedding = self._simple_embedding(query_text)
558
- try:
559
- with self._get_db() as conn:
560
- cursor = conn.execute('SELECT * FROM incidents ORDER BY timestamp DESC LIMIT 100')
561
- incidents = []
562
- for row in cursor.fetchall():
563
- stored_embedding = json.loads(row['embedding'])
564
- dot = sum(q * s for q, s in zip(query_embedding, stored_embedding))
565
- norm_q = sum(q*q for q in query_embedding) ** 0.5
566
- norm_s = sum(s*s for s in stored_embedding) ** 0.5
567
- similarity = dot / (norm_q * norm_s) if (norm_q > 0 and norm_s > 0) else 0
568
- incidents.append({
569
- 'id': row['id'],
570
- 'action': row['action'],
571
- 'risk_score': row['risk_score'],
572
- 'risk_level': row['risk_level'],
573
- 'confidence': row['confidence'],
574
- 'allowed': bool(row['allowed']),
575
- 'timestamp': row['timestamp'],
576
- 'similarity': similarity
577
- })
578
- incidents.sort(key=lambda x: x['similarity'], reverse=True)
579
- return incidents[:limit]
580
- except sqlite3.Error as e:
581
- logger.error(f"Failed to find similar incidents: {e}")
582
- return []
583
-
584
- def track_enterprise_signal(self, signal_type: LeadSignal, action: str,
585
- risk_score: float, metadata: Dict = None):
586
- signal = {
587
- 'id': str(uuid.uuid4()),
588
- 'signal_type': signal_type.value,
589
- 'action': action[:200],
590
- 'risk_score': risk_score,
591
- 'metadata': json.dumps(metadata or {}),
592
- 'timestamp': datetime.utcnow().isoformat(),
593
- 'contacted': 0
594
- }
595
- try:
596
- with self._get_db() as conn:
597
- conn.execute('''
598
- INSERT INTO signals
599
- (id, signal_type, action, risk_score, metadata, timestamp, contacted)
600
- VALUES (?, ?, ?, ?, ?, ?, ?)
601
- ''', (
602
- signal['id'],
603
- signal['signal_type'],
604
- signal['action'],
605
- signal['risk_score'],
606
- signal['metadata'],
607
- signal['timestamp'],
608
- signal['contacted']
609
- ))
610
- conn.commit()
611
- except sqlite3.Error as e:
612
- logger.error(f"Failed to track signal: {e}")
613
- return None
614
-
615
- logger.info(f"🔔 Enterprise signal: {signal_type.value} - {action[:50]}...")
616
- if signal_type in [LeadSignal.HIGH_RISK_BLOCKED, LeadSignal.NOVEL_ACTION]:
617
- self._notify_sales_team(signal)
618
- return signal
619
-
620
- def _notify_sales_team(self, signal: Dict):
621
- if settings.slack_webhook:
622
- try:
623
- requests.post(settings.slack_webhook, json={
624
- "text": f"🚨 *Enterprise Lead Signal*\n"
625
- f"Type: {signal['signal_type']}\n"
626
- f"Action: {signal['action']}\n"
627
- f"Risk Score: {signal['risk_score']:.2f}\n"
628
- f"Time: {signal['timestamp']}\n"
629
- f"Contact: {settings.lead_email}"
630
- }, timeout=5)
631
- except requests.RequestException as e:
632
- logger.error(f"Slack notification failed: {e}")
633
-
634
- def get_uncontacted_signals(self) -> List[Dict]:
635
- try:
636
- with self._get_db() as conn:
637
- cursor = conn.execute('SELECT * FROM signals WHERE contacted = 0 ORDER BY timestamp DESC')
638
- signals = []
639
- for row in cursor.fetchall():
640
- signals.append({
641
- 'id': row['id'],
642
- 'signal_type': row['signal_type'],
643
- 'action': row['action'],
644
- 'risk_score': row['risk_score'],
645
- 'metadata': json.loads(row['metadata']),
646
- 'timestamp': row['timestamp']
647
- })
648
- return signals
649
- except sqlite3.Error as e:
650
- logger.error(f"Failed to get uncontacted signals: {e}")
651
- return []
652
-
653
- def mark_contacted(self, signal_id: str):
654
- try:
655
- with self._get_db() as conn:
656
- conn.execute('UPDATE signals SET contacted = 1 WHERE id = ?', (signal_id,))
657
- conn.commit()
658
- except sqlite3.Error as e:
659
- logger.error(f"Failed to mark signal as contacted: {e}")
660
-
661
- # ============== AUTHENTICATION ==============
662
- security = HTTPBearer()
663
-
664
- async def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
665
- if credentials.credentials != settings.api_key:
666
- raise HTTPException(
667
- status_code=status.HTTP_403_FORBIDDEN,
668
- detail="Invalid API key"
669
- )
670
- return credentials.credentials
671
-
672
- # ============== PYDANTIC SCHEMAS (original) ==============
673
- class ActionRequest(BaseModel):
674
- proposedAction: str = Field(..., min_length=1, max_length=1000)
675
- confidenceScore: float = Field(..., ge=0.0, le=1.0)
676
- riskLevel: RiskLevel
677
- description: Optional[str] = None
678
- requiresHuman: bool = False
679
- rollbackFeasible: bool = True
680
- user_role: str = "devops"
681
- session_id: Optional[str] = None
682
-
683
- @field_validator('proposedAction')
684
- @classmethod
685
- def validate_action(cls, v: str) -> str:
686
- if len(v.strip()) == 0:
687
- raise ValueError('Action cannot be empty')
688
- return v
689
-
690
- class ConfigUpdateRequest(BaseModel):
691
- confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
692
- maxAutonomousRisk: Optional[RiskLevel] = None
693
-
694
- class GateResult(BaseModel):
695
- gate: str
696
- reason: str
697
- passed: bool
698
- threshold: Optional[Any] = None
699
- actual: Optional[Any] = None
700
- type: str = "boolean"
701
- metadata: Optional[Dict] = None
702
-
703
- class EvaluationResponse(BaseModel):
704
- allowed: bool
705
- requiredLevel: str
706
- gatesTriggered: List[GateResult]
707
- shouldEscalate: bool
708
- escalationReason: Optional[str] = None
709
- executionLadder: Optional[Dict] = None
710
- oss_disclaimer: str = "OSS edition provides advisory only. Enterprise adds mechanical gates and execution."
711
-
712
- class LeadSignalResponse(BaseModel):
713
- id: str
714
- signal_type: str
715
- action: str
716
- risk_score: float
717
- timestamp: str
718
- metadata: Dict
719
-
720
- # ============== NEW INFRASTRUCTURE MODELS ==============
721
- class InfrastructureIntentRequest(BaseModel):
722
- intent_type: str # "provision", "deploy", "grant"
723
- resource_type: Optional[str] = None
724
- region: Optional[str] = None
725
- size: Optional[str] = None
726
- environment: str = "PROD"
727
- requester: str
728
- config_content: Optional[Dict[str, Any]] = None
729
- permission: Optional[str] = None
730
- target: Optional[str] = None
731
-
732
- class InfrastructureEvaluationResponse(BaseModel):
733
- recommended_action: str # "approve", "deny", "escalate", "defer"
734
- justification: str
735
- policy_violations: List[str]
736
- estimated_cost: Optional[float]
737
- risk_score: float
738
- confidence_score: float
739
- evaluation_details: Dict[str, Any]
740
-
741
- # ============== GLOBAL HMC MODEL DATA ==============
742
- hmc_model_data = None
743
-
744
- def load_hmc_model():
745
- global hmc_model_data
746
- model_path = f"{settings.data_dir}/hmc_model.json"
747
- if os.path.exists(model_path):
748
- try:
749
- with open(model_path, 'r') as f:
750
- hmc_model_data = json.load(f)
751
- logger.info("HMC model loaded successfully")
752
- except Exception as e:
753
- logger.error(f"Failed to load HMC model: {e}")
754
- hmc_model_data = None
755
- else:
756
- logger.info("No HMC model found; using default risk engine")
757
-
758
- # ============== FASTAPI APP ==============
759
- app = FastAPI(
760
- title="ARF OSS Real Engine (API Only)",
761
- version="3.3.9",
762
- description="Real ARF OSS components for enterprise lead generation – backend API only.",
763
- contact={
764
- "name": "ARF Sales",
765
- "email": settings.lead_email,
766
- }
767
- )
768
 
 
769
  app.add_middleware(
770
  CORSMiddleware,
771
- allow_origins=["*"],
772
- allow_credentials=True,
773
  allow_methods=["*"],
774
- allow_headers=["*"],
775
- )
776
-
777
- # Initialize original ARF components
778
- risk_engine = BayesianRiskEngine()
779
- policy_engine = PolicyEngine()
780
- memory = RAGMemory()
781
- load_hmc_model() # Load HMC model after memory init
782
-
783
- # ============== INFRASTRUCTURE SIMULATOR INSTANCE ==============
784
- # Corrected: RegionAllowedPolicy expects 'allowed_regions', not 'regions'
785
- _default_policy = RegionAllowedPolicy(allowed_regions={"eastus", "westeurope"}) & CostThresholdPolicy(500.0)
786
- infra_simulator = AzureInfrastructureSimulator(
787
- policy=_default_policy,
788
- pricing_file="pricing.yml" if os.path.exists("pricing.yml") else None
789
  )
790
 
791
- # ============== API ENDPOINTS ==============
792
-
793
- @app.get("/")
794
- async def root():
795
- return {
796
- "service": "ARF OSS API",
797
- "version": "3.3.9",
798
- "status": "operational",
799
- "docs": "/docs"
800
- }
801
 
802
  @app.get("/health")
803
- async def health_check():
804
- return {
805
- "status": "healthy",
806
- "version": "3.3.9",
807
- "edition": "OSS",
808
- "memory_entries": len(memory.get_uncontacted_signals()),
809
- "timestamp": datetime.utcnow().isoformat()
810
- }
811
 
812
- @app.get("/api/v1/config", dependencies=[Depends(verify_api_key)])
813
- async def get_config():
 
 
814
  return {
815
- "confidenceThreshold": policy_engine.config["confidence_threshold"],
816
- "maxAutonomousRisk": policy_engine.config["max_autonomous_risk"],
817
- "riskScoreThresholds": policy_engine.config["risk_thresholds"],
818
- "version": "3.3.9",
819
- "edition": "OSS"
820
  }
821
 
822
- @app.post("/api/v1/config", dependencies=[Depends(verify_api_key)])
823
- async def update_config(config: ConfigUpdateRequest):
824
- if config.confidenceThreshold:
825
- policy_engine.update_config("confidence_threshold", config.confidenceThreshold)
826
- if config.maxAutonomousRisk:
827
- policy_engine.update_config("max_autonomous_risk", config.maxAutonomousRisk.value)
828
- return await get_config()
829
-
830
- @app.post("/api/v1/evaluate", dependencies=[Depends(verify_api_key)], response_model=EvaluationResponse)
831
- async def evaluate_action(request: ActionRequest):
832
- try:
833
- context = {
834
- "environment": "production",
835
- "user_role": request.user_role,
836
- "backup_available": request.rollbackFeasible,
837
- "requires_human": request.requiresHuman,
838
- "confidence": request.confidenceScore # added for enhanced_risk
839
- }
840
- # Use HMC-enhanced risk if available
841
- if hmc_model_data:
842
- risk_score_val = risk_engine.enhanced_risk(request.proposedAction, context, hmc_model_data)
843
- # Convert to a risk dict compatible with policy engine (needs level and interval)
844
- # For simplicity, reuse the simple engine's level mapping based on enhanced score
845
- risk = risk_engine.calculate_posterior(request.proposedAction, context)
846
- risk["score"] = risk_score_val
847
- if risk_score_val > 0.8:
848
- risk["level"] = RiskLevel.CRITICAL
849
- elif risk_score_val > 0.6:
850
- risk["level"] = RiskLevel.HIGH
851
- elif risk_score_val > 0.4:
852
- risk["level"] = RiskLevel.MEDIUM
853
- else:
854
- risk["level"] = RiskLevel.LOW
855
- # Recalculate credible interval? We'll keep the simple one for now.
856
- else:
857
- risk = risk_engine.calculate_posterior(request.proposedAction, context)
858
-
859
- policy = policy_engine.evaluate(
860
- action=request.proposedAction,
861
- risk=risk,
862
- confidence=request.confidenceScore
863
- )
864
- similar = memory.find_similar(request.proposedAction, limit=3)
865
-
866
- # Capture additional fields for logging
867
- environment = context["environment"]
868
- user_role = request.user_role
869
- requires_human = request.requiresHuman
870
- rollback_feasible = request.rollbackFeasible
871
- hour_of_day = datetime.now().hour
872
- action_category = risk_engine.classify_action(request.proposedAction)
873
-
874
- if not policy["allowed"] and risk["score"] > 0.7:
875
- memory.track_enterprise_signal(
876
- signal_type=LeadSignal.HIGH_RISK_BLOCKED,
877
- action=request.proposedAction,
878
- risk_score=risk["score"],
879
- metadata={
880
- "confidence": request.confidenceScore,
881
- "risk_level": risk["level"].value,
882
- "failed_gates": [g["gate"] for g in policy["gates"] if not g["passed"]]
883
- }
884
- )
885
- if len(similar) < 2 and risk["score"] > 0.6:
886
- memory.track_enterprise_signal(
887
- signal_type=LeadSignal.NOVEL_ACTION,
888
- action=request.proposedAction,
889
- risk_score=risk["score"],
890
- metadata={"similar_count": len(similar)}
891
- )
892
- memory.store_incident(
893
- action=request.proposedAction,
894
- risk_score=risk["score"],
895
- risk_level=risk["level"],
896
- confidence=request.confidenceScore,
897
- allowed=policy["allowed"],
898
- gates=policy["gates"],
899
- environment=environment,
900
- user_role=user_role,
901
- requires_human=requires_human,
902
- rollback_feasible=rollback_feasible,
903
- hour_of_day=hour_of_day,
904
- action_category=action_category
905
- )
906
- gates = []
907
- for g in policy["gates"]:
908
- gates.append(GateResult(
909
- gate=g["gate"],
910
- reason=g["reason"],
911
- passed=g["passed"],
912
- threshold=g.get("threshold"),
913
- actual=g.get("actual"),
914
- type=g.get("type", "boolean"),
915
- metadata=g.get("metadata")
916
- ))
917
- execution_ladder = {
918
- "levels": [
919
- {"name": "AUTONOMOUS_LOW", "required": gates[0].passed and gates[1].passed},
920
- {"name": "AUTONOMOUS_HIGH", "required": all(g.passed for g in gates[:3])},
921
- {"name": "SUPERVISED", "required": all(g.passed for g in gates[:4])},
922
- {"name": "OPERATOR_REVIEW", "required": True}
923
- ],
924
- "current": policy["required_level"]
925
- }
926
- return EvaluationResponse(
927
- allowed=policy["allowed"],
928
- requiredLevel=policy["required_level"],
929
- gatesTriggered=gates,
930
- shouldEscalate=not policy["allowed"],
931
- escalationReason=None if policy["allowed"] else "Failed mechanical gates",
932
- executionLadder=execution_ladder
933
- )
934
- except Exception as e:
935
- logger.error(f"Evaluation failed: {e}", exc_info=True)
936
- raise HTTPException(status_code=500, detail="Internal server error during evaluation")
937
-
938
- @app.get("/api/v1/enterprise/signals", dependencies=[Depends(verify_api_key)])
939
- async def get_enterprise_signals(contacted: bool = False):
940
- try:
941
- if contacted:
942
- signals = memory.get_uncontacted_signals()
943
- else:
944
- with memory._get_db() as conn:
945
- cursor = conn.execute('''
946
- SELECT * FROM signals
947
- WHERE datetime(timestamp) > datetime('now', '-30 days')
948
- ORDER BY timestamp DESC
949
- ''')
950
- signals = []
951
- for row in cursor.fetchall():
952
- signals.append({
953
- 'id': row['id'],
954
- 'signal_type': row['signal_type'],
955
- 'action': row['action'],
956
- 'risk_score': row['risk_score'],
957
- 'metadata': json.loads(row['metadata']),
958
- 'timestamp': row['timestamp'],
959
- 'contacted': bool(row['contacted'])
960
- })
961
- return {"signals": signals, "count": len(signals)}
962
- except Exception as e:
963
- logger.error(f"Failed to retrieve signals: {e}")
964
- raise HTTPException(status_code=500, detail="Could not retrieve signals")
965
-
966
- @app.post("/api/v1/enterprise/signals/{signal_id}/contact", dependencies=[Depends(verify_api_key)])
967
- async def mark_signal_contacted(signal_id: str):
968
- memory.mark_contacted(signal_id)
969
- return {"status": "success", "message": "Signal marked as contacted"}
970
-
971
- @app.get("/api/v1/memory/similar", dependencies=[Depends(verify_api_key)])
972
- async def get_similar_actions(action: str, limit: int = 5):
973
- similar = memory.find_similar(action, limit=limit)
974
- return {"similar": similar, "count": len(similar)}
975
-
976
- @app.post("/api/v1/feedback", dependencies=[Depends(verify_api_key)])
977
- async def record_outcome(action: str, success: bool):
978
- risk_engine.record_outcome(action, success)
979
- return {"status": "success", "message": "Outcome recorded"}
980
-
981
- # ============== NEW INFRASTRUCTURE ENDPOINT ==============
982
- @app.post("/api/v1/infrastructure/evaluate", dependencies=[Depends(verify_api_key)], response_model=InfrastructureEvaluationResponse)
983
- async def evaluate_infrastructure_intent(request: InfrastructureIntentRequest):
984
- try:
985
- if request.intent_type == "provision":
986
- if not all([request.resource_type, request.region, request.size]):
987
- raise HTTPException(400, "Missing fields for provision intent")
988
- intent = ProvisionResourceIntent(
989
- resource_type=request.resource_type.lower(), # Pass string directly
990
- region=request.region,
991
- size=request.size,
992
- requester=request.requester,
993
- environment=request.environment.lower() # Pass string directly
994
- )
995
- elif request.intent_type == "deploy":
996
- intent = DeployConfigurationIntent(
997
- service_name=request.resource_type or "unknown",
998
- change_scope="canary",
999
- deployment_target=request.environment.lower(), # Pass string directly
1000
- configuration=request.config_content or {},
1001
- requester=request.requester
1002
- )
1003
- elif request.intent_type == "grant":
1004
- intent = GrantAccessIntent(
1005
- principal=request.requester,
1006
- permission_level=request.permission or "read", # Already a string
1007
- resource_scope=request.target or "/",
1008
- justification="Requested via API"
1009
- )
1010
- else:
1011
- raise HTTPException(400, f"Unknown intent type: {request.intent_type}")
1012
-
1013
- healing_intent = infra_simulator.evaluate(intent)
1014
-
1015
- return InfrastructureEvaluationResponse(
1016
- recommended_action=healing_intent.recommended_action.value,
1017
- justification=healing_intent.justification,
1018
- policy_violations=healing_intent.policy_violations,
1019
- estimated_cost=healing_intent.cost_projection,
1020
- risk_score=healing_intent.risk_score or 0.0,
1021
- confidence_score=healing_intent.confidence_score,
1022
- evaluation_details=healing_intent.evaluation_details
1023
- )
1024
- except HTTPException:
1025
- raise
1026
- except Exception as e:
1027
- logger.error(f"Infrastructure evaluation failed: {e}", exc_info=True)
1028
- raise HTTPException(500, detail=str(e))
1029
-
1030
- # ============== NEW HMC TRAINING ENDPOINT ==============
1031
- @app.post("/api/v1/admin/train_hmc", dependencies=[Depends(verify_api_key)])
1032
- async def train_hmc():
1033
- """Trigger HMC training on historical incident data."""
1034
- global hmc_model_data
1035
- try:
1036
- db_path = f"{settings.data_dir}/memory.db"
1037
- model_data = train_hmc_model(db_path, output_dir=settings.data_dir)
1038
- hmc_model_data = model_data
1039
- return {"status": "success", "message": "HMC model trained and loaded", "coefficients": model_data.get("coefficients")}
1040
- except Exception as e:
1041
- logger.error(f"HMC training failed: {e}", exc_info=True)
1042
- raise HTTPException(status_code=500, detail=str(e))
1043
-
1044
- # ============== MAIN ENTRY POINT ==============
1045
- if __name__ == "__main__":
1046
- import uvicorn
1047
- port = int(os.environ.get('PORT', 7860))
1048
- logger.info("="*60)
1049
- logger.info("🚀 ARF OSS v3.3.9 (API Only) Starting")
1050
- logger.info(f"📊 Data directory: {settings.data_dir}")
1051
- logger.info(f"📧 Lead email: {settings.lead_email}")
1052
- logger.info(f"🔑 API Key: {settings.api_key[:8]}... (set in HF secrets)")
1053
- logger.info(f"🌐 Serving API at: http://0.0.0.0:{port}")
1054
- logger.info("="*60)
1055
- uvicorn.run(
1056
- "hf_demo:app",
1057
- host="0.0.0.0",
1058
- port=port,
1059
- log_level="info",
1060
- reload=False
1061
- )
 
1
+ # hf_demo.py – ARF v4 API
2
+ from fastapi import FastAPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from fastapi.middleware.cors import CORSMiddleware
4
+ import gradio as gr
5
+ from agentic_reliability_framework.core.governance.risk_engine import RiskEngine
6
+ from agentic_reliability_framework.core.memory.semantic_memory import SemanticMemory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ app = FastAPI(title="ARF v4 API")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # Enable CORS for your frontend
11
  app.add_middleware(
12
  CORSMiddleware,
13
+ allow_origins=["https://arf-frontend-sandy.vercel.app"],
 
14
  allow_methods=["*"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  )
16
 
17
+ # Initialize ARF components
18
+ risk_engine = RiskEngine()
19
+ memory = SemanticMemory()
 
 
 
 
 
 
 
20
 
21
  @app.get("/health")
22
+ async def health():
23
+ return {"status": "ok", "version": "4.0.0"}
 
 
 
 
 
 
24
 
25
+ @app.get("/api/v1/get_risk")
26
+ async def get_risk():
27
+ # Your existing risk endpoint logic
28
+ risk_score = risk_engine.get_current_risk()
29
  return {
30
+ "system_risk": risk_score.mean,
31
+ "status": "critical" if risk_score.mean > 0.8 else "normal"
 
 
 
32
  }
33
 
34
+ # Optional: keep the Gradio interface for interactive testing
35
+ iface = gr.Interface(
36
+ fn=lambda: f"ARF v4 - Current risk: {risk_engine.get_current_risk().mean:.2f}",
37
+ inputs=[],
38
+ outputs="text",
39
+ title="ARF v4 Demo"
40
+ )
41
+ app = gr.mount_gradio_app(app, iface, path="/")