petter2025 commited on
Commit
6756da2
ยท
verified ยท
1 Parent(s): afa98c8

Update hf_demo.py

Browse files
Files changed (1) hide show
  1. hf_demo.py +892 -601
hf_demo.py CHANGED
@@ -1,640 +1,690 @@
1
  """
2
- ARF OSS Real Engine - Single File for Hugging Face Spaces
3
- Uses real ARF OSS components, no simulation
4
- Compatible with Replit UI frontend
5
  """
6
 
7
- import gradio as gr
8
  import os
9
  import json
10
  import uuid
 
 
11
  import logging
12
  import asyncio
 
 
13
  from datetime import datetime, timedelta
14
  from typing import Dict, List, Optional, Any, Tuple
15
- from fastapi import FastAPI, HTTPException
 
 
 
 
 
16
  from fastapi.middleware.cors import CORSMiddleware
17
- from pydantic import BaseModel, Field
 
18
  from gradio import mount_gradio_app
19
 
20
- # ============== REAL ARF OSS IMPORTS ==============
21
- # These would be from pip install agentic-reliability-framework
22
- # But for the single file, we'll implement the core logic
23
- # based on the actual ARF OSS architecture
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- # Configure logging
26
- logging.basicConfig(level=logging.INFO)
27
- logger = logging.getLogger(__name__)
 
 
 
28
 
29
- # ============== REAL BAYESIAN RISK ENGINE ==============
30
- class BayesianRiskAssessment:
31
  """
32
- Real Bayesian risk assessment - not simulation
33
- Based on ARF OSS v3.3.9 actual implementation
34
  """
35
 
36
- def __init__(self, prior_alpha: float = 2.0, prior_beta: float = 5.0):
37
- # Beta prior distribution parameters
38
- self.prior_alpha = prior_alpha
39
- self.prior_beta = prior_beta
40
- self.evidence_history = []
 
 
 
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def calculate_posterior(self,
43
- action_text: str,
44
- context: Dict[str, Any],
45
- evidence_success: Optional[int] = None,
46
- evidence_total: Optional[int] = None) -> Dict[str, Any]:
47
  """
48
- True Bayesian update:
49
- Posterior โˆ Likelihood ร— Prior
50
  """
51
- # Base risk from action analysis
52
- base_risk = self._analyze_action_risk(action_text)
 
53
 
54
- # Context multipliers (Bayesian updating)
55
- context_risk = self._incorporate_context(base_risk, context)
 
56
 
57
- # If we have historical evidence, do full Bayesian update
58
- if evidence_success is not None and evidence_total is not None:
59
- # Posterior parameters
60
- alpha_post = self.prior_alpha + evidence_success
61
- beta_post = self.prior_beta + (evidence_total - evidence_success)
62
-
63
- # Posterior mean
64
- posterior_mean = alpha_post / (alpha_post + beta_post)
65
-
66
- # Combine with context analysis (weighted)
67
- final_risk = 0.7 * posterior_mean + 0.3 * context_risk
68
-
69
- # 95% confidence interval
70
- ci_lower = self._beta_ppf(0.025, alpha_post, beta_post)
71
- ci_upper = self._beta_ppf(0.975, alpha_post, beta_post)
72
-
73
- else:
74
- # Prior-only prediction
75
- prior_mean = self.prior_alpha / (self.prior_alpha + self.prior_beta)
76
- final_risk = 0.5 * prior_mean + 0.5 * context_risk
77
-
78
- # Wider confidence interval for prior-only
79
- ci_lower = max(0.01, final_risk - 0.25)
80
- ci_upper = min(0.99, final_risk + 0.25)
81
-
82
- # Determine risk level
83
- if final_risk > 0.8:
84
- risk_level = "CRITICAL"
85
- color = "#F44336"
86
- elif final_risk > 0.6:
87
- risk_level = "HIGH"
88
- color = "#FF9800"
89
- elif final_risk > 0.4:
90
- risk_level = "MEDIUM"
91
- color = "#FFC107"
92
- else:
93
- risk_level = "LOW"
94
- color = "#4CAF50"
95
 
96
- return {
97
- "score": final_risk,
98
- "level": risk_level,
99
- "color": color,
100
- "confidence_interval": [ci_lower, ci_upper],
101
- "posterior_parameters": {
102
- "alpha": alpha_post if evidence_success else self.prior_alpha,
103
- "beta": beta_post if evidence_success else self.prior_beta
104
- },
105
- "calculation": {
106
- "prior_mean": self.prior_alpha / (self.prior_alpha + self.prior_beta),
107
- "evidence_success": evidence_success,
108
- "evidence_total": evidence_total,
109
- "context_multiplier": context_risk / base_risk if base_risk > 0 else 1.0
110
- }
111
- }
112
-
113
- def _analyze_action_risk(self, action_text: str) -> float:
114
- """Base risk analysis from action text"""
115
- action_lower = action_text.lower()
116
 
117
- # Destructive patterns
118
- destructive_patterns = ['drop', 'delete', 'terminate', 'remove', 'destroy', 'shutdown']
119
- destructive_score = sum(2.0 for p in destructive_patterns if p in action_lower)
120
 
121
- # System-level patterns
122
- system_patterns = ['database', 'cluster', 'production', 'primary', 'master']
123
- system_score = sum(1.0 for p in system_patterns if p in action_lower)
 
 
 
124
 
125
- # Calculate raw risk (0-1 scale)
126
- max_possible = len(destructive_patterns) * 2 + len(system_patterns)
127
- raw_risk = (destructive_score + system_score) / max_possible if max_possible > 0 else 0.3
 
 
 
 
 
 
128
 
129
- return min(0.95, max(0.1, raw_risk))
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
- def _incorporate_context(self, base_risk: float, context: Dict) -> float:
132
- """Context-aware risk adjustment"""
133
  multiplier = 1.0
134
 
135
- # Environment factors
136
  if context.get('environment') == 'production':
137
  multiplier *= 1.5
138
  elif context.get('environment') == 'staging':
139
  multiplier *= 0.8
140
 
141
- # User role factors
142
- user_role = context.get('user_role', '').lower()
143
- if 'junior' in user_role or 'intern' in user_role:
144
  multiplier *= 1.3
145
- elif 'admin' in user_role:
146
- multiplier *= 1.1
147
 
148
- # Time factors
149
- time_str = context.get('time', '')
150
- if '2am' in time_str.lower() or 'night' in time_str.lower():
151
  multiplier *= 1.4
 
 
152
 
153
- # Backup availability
154
  if not context.get('backup_available', True):
155
  multiplier *= 1.6
156
 
157
- # Compliance factors
158
- compliance = context.get('compliance', '').lower()
159
- if 'pci' in compliance or 'hipaa' in compliance or 'gdpr' in compliance:
160
- multiplier *= 1.3
161
-
162
- return min(0.99, base_risk * multiplier)
163
 
164
- def _beta_ppf(self, q: float, alpha: float, beta: float) -> float:
165
- """Percent point function for Beta distribution (approximation)"""
166
- # Simple approximation for demo
167
- mean = alpha / (alpha + beta)
168
- variance = (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
169
- std = variance ** 0.5
170
 
171
- # Approximate quantile
172
- if q < 0.5:
173
- return max(0.01, mean - 2 * std)
174
- else:
175
- return min(0.99, mean + 2 * std)
 
 
 
 
 
 
 
 
 
 
176
 
177
- # ============== REAL POLICY ENGINE ==============
178
  class PolicyEngine:
179
  """
180
- Real OSS policy engine - advisory mode
181
- Based on ARF OSS healing_policies.py
182
  """
183
 
184
- def __init__(self, config_path: Optional[str] = None):
185
  self.config = {
186
- "confidence_threshold": 0.9,
187
- "max_autonomous_risk": "MEDIUM",
188
  "risk_thresholds": {
189
- "LOW": 0.7,
190
- "MEDIUM": 0.5,
191
- "HIGH": 0.3,
192
- "CRITICAL": 0.1
193
  },
194
- "action_blacklist": [
195
- "DROP DATABASE",
196
- "DELETE FROM",
197
- "TRUNCATE",
198
- "ALTER TABLE",
199
- "DROP TABLE",
200
- "shutdown -h now",
201
- "rm -rf /"
 
202
  ],
203
- "require_human_for": ["CRITICAL", "HIGH"],
204
- "require_rollback_for": ["destructive"]
205
  }
206
-
207
- # Load from file if exists
208
- if config_path and os.path.exists(config_path):
209
- with open(config_path) as f:
210
- user_config = json.load(f)
211
- self.config.update(user_config)
212
-
213
- def update_confidence_threshold(self, threshold: float):
214
- """Live policy update"""
215
- self.config["confidence_threshold"] = threshold
216
- logger.info(f"Confidence threshold updated to {threshold}")
217
 
218
- def update_max_risk(self, risk_level: str):
219
- """Live policy update"""
220
- if risk_level in ["LOW", "MEDIUM", "HIGH", "CRITICAL"]:
221
- self.config["max_autonomous_risk"] = risk_level
222
- logger.info(f"Max autonomous risk updated to {risk_level}")
223
-
224
- def evaluate(self,
225
  action: str,
226
- risk_assessment: Dict,
227
- confidence: float,
228
- mode: str = "advisory") -> Dict[str, Any]:
229
  """
230
  Evaluate action against policies
231
- OSS mode = advisory only (no execution)
232
  """
233
- gates_passed = []
234
- failures = []
235
 
236
  # Gate 1: Confidence threshold
237
  confidence_passed = confidence >= self.config["confidence_threshold"]
238
- gates_passed.append({
239
  "gate": "confidence_threshold",
240
  "passed": confidence_passed,
241
  "threshold": self.config["confidence_threshold"],
242
  "actual": confidence,
243
- "reason": f"Confidence {confidence:.2f} meets threshold {self.config['confidence_threshold']}"
244
- if confidence_passed else f"Confidence {confidence:.2f} below threshold {self.config['confidence_threshold']}"
245
  })
246
- if not confidence_passed:
247
- failures.append("confidence_threshold")
248
 
249
  # Gate 2: Risk level
250
- risk_levels = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]
251
- max_idx = risk_levels.index(self.config["max_autonomous_risk"])
252
- action_idx = risk_levels.index(risk_assessment["level"])
253
  risk_passed = action_idx <= max_idx
254
 
255
- gates_passed.append({
256
  "gate": "risk_assessment",
257
  "passed": risk_passed,
258
  "max_allowed": self.config["max_autonomous_risk"],
259
- "actual": risk_assessment["level"],
260
- "reason": f"Risk level {risk_assessment['level']} within autonomous range (โ‰ค {self.config['max_autonomous_risk']})"
261
- if risk_passed else f"Risk level {risk_assessment['level']} exceeds autonomous threshold",
262
  "metadata": {
263
- "maxAutonomousRisk": self.config["max_autonomous_risk"],
264
- "actionRisk": risk_assessment["level"]
265
  }
266
  })
267
- if not risk_passed:
268
- failures.append("risk_assessment")
269
 
270
- # Gate 3: Destructive operation check
271
- is_destructive = any(blacklisted in action.upper() for blacklisted in self.config["action_blacklist"])
 
 
 
 
272
 
273
- gates_passed.append({
274
  "gate": "destructive_check",
275
  "passed": not is_destructive,
276
  "is_destructive": is_destructive,
277
  "reason": "Non-destructive operation" if not is_destructive else "Destructive operation detected",
278
- "metadata": {"requiresRollback": is_destructive}
 
279
  })
280
- if is_destructive:
281
- failures.append("destructive_check")
282
 
283
  # Gate 4: Human review requirement
284
- requires_human = risk_assessment["level"] in self.config.get("require_human_for", [])
285
 
286
- gates_passed.append({
287
  "gate": "human_review",
288
  "passed": not requires_human,
289
  "requires_human": requires_human,
290
- "reason": "Human review not required" if not requires_human else "Human review required by policy",
291
- "metadata": {"policyRequiresHuman": requires_human}
292
  })
293
- if requires_human:
294
- failures.append("human_review")
295
 
296
- # Gate 5: License check (OSS always passes)
297
- gates_passed.append({
298
  "gate": "license_check",
299
  "passed": True,
300
  "edition": "OSS",
301
  "reason": "OSS edition - advisory only",
302
- "metadata": {"licenseSensitive": False}
303
  })
304
 
305
- all_passed = len(failures) == 0
 
 
 
 
 
 
 
 
 
 
 
306
 
307
  return {
308
  "allowed": all_passed,
309
- "gates": gates_passed,
310
- "failures": failures,
311
- "mode": mode,
312
- "advisory_only": mode == "advisory",
313
- "required_level": self._determine_required_level(all_passed, risk_assessment["level"])
314
  }
315
 
316
- def _determine_required_level(self, allowed: bool, risk_level: str) -> str:
317
- """Determine execution level"""
318
- if not allowed:
319
- return "OPERATOR_REVIEW"
320
- if risk_level == "LOW":
321
- return "AUTONOMOUS_LOW"
322
- elif risk_level == "MEDIUM":
323
- return "AUTONOMOUS_HIGH"
324
- else:
325
- return "SUPERVISED"
326
 
327
- # ============== RAG MEMORY (LIGHT PERSISTENCE) ==============
328
  class RAGMemory:
329
  """
330
- Light RAG memory for similar incident recall
331
- Uses simple vector embeddings for similarity
332
  """
333
 
334
- def __init__(self, storage_path: str = "/tmp/arf_memory"):
335
- self.storage_path = storage_path
336
- self.incidents = []
337
- self.enterprise_signals = []
338
- os.makedirs(storage_path, exist_ok=True)
339
-
340
- # Load existing if any
341
- self._load()
342
-
343
- def store(self, incident: Dict[str, Any]):
344
- """Store incident in memory"""
345
- incident["id"] = str(uuid.uuid4())
346
- incident["timestamp"] = datetime.utcnow().isoformat()
347
- self.incidents.append(incident)
348
-
349
- # Keep only last 100 for memory efficiency
350
- if len(self.incidents) > 100:
351
- self.incidents = self.incidents[-100:]
352
-
353
- self._save()
354
 
355
- def find_similar(self, action: str, risk_score: float, limit: int = 5) -> List[Dict]:
356
- """
357
- Find similar incidents using simple text similarity
358
- In production, this would use FAISS/embeddings
359
- """
360
- # Simple keyword matching for demo
361
- action_keywords = set(action.lower().split())
362
-
363
- scored = []
364
- for incident in self.incidents:
365
- incident_keywords = set(incident.get("action", "").lower().split())
366
-
367
- # Jaccard similarity
368
- intersection = len(action_keywords & incident_keywords)
369
- union = len(action_keywords | incident_keywords)
370
- similarity = intersection / union if union > 0 else 0
 
 
371
 
372
- # Risk score proximity
373
- risk_diff = 1 - abs(risk_score - incident.get("risk_score", 0))
 
 
 
 
 
 
 
 
 
 
374
 
375
- # Combined score
376
- combined = (0.6 * similarity + 0.4 * risk_diff)
377
-
378
- scored.append((combined, incident))
379
-
380
- # Sort by similarity and return top k
381
- scored.sort(key=lambda x: x[0], reverse=True)
382
- return [incident for score, incident in scored[:limit] if score > 0.2]
383
-
384
- def track_enterprise_signal(self, signal_type: str, action: str, metadata: Dict = None):
385
- """Track actions that indicate Enterprise need"""
386
- signal = {
387
- "id": str(uuid.uuid4()),
388
- "type": signal_type,
389
- "action": action[:100],
390
- "timestamp": datetime.utcnow().isoformat(),
391
- "metadata": metadata or {},
392
- "source": "huggingface_demo"
393
- }
394
- self.enterprise_signals.append(signal)
395
-
396
- # Log for lead follow-up
397
- logger.info(f"๐Ÿ”” ENTERPRISE SIGNAL: {signal_type} - {action[:50]}...")
398
-
399
- # Write to file for manual review
400
- with open("/tmp/enterprise_signals.log", "a") as f:
401
- f.write(json.dumps(signal) + "\n")
402
 
403
- def get_enterprise_signals(self) -> List[Dict]:
404
- """Get all enterprise signals"""
405
- return self.enterprise_signals
406
-
407
- def _save(self):
408
- """Save to disk"""
409
- try:
410
- with open(f"{self.storage_path}/incidents.json", "w") as f:
411
- json.dump(self.incidents[-50:], f) # Save last 50
412
- except:
413
- pass
414
-
415
- def _load(self):
416
- """Load from disk"""
417
  try:
418
- if os.path.exists(f"{self.storage_path}/incidents.json"):
419
- with open(f"{self.storage_path}/incidents.json") as f:
420
- self.incidents = json.load(f)
421
- except:
422
- self.incidents = []
423
-
424
- # ============== MCP CLIENT (LIGHT) ==============
425
- class MCPClient:
426
- """
427
- Light MCP client for demonstration
428
- In production, this would connect to actual MCP servers
429
- """
430
 
431
- def __init__(self, config: Dict = None):
432
- self.config = config or {}
433
- self.servers = {
434
- "detection": {"status": "simulated", "latency_ms": 45},
435
- "prediction": {"status": "simulated", "latency_ms": 120},
436
- "remediation": {"status": "simulated", "latency_ms": 80}
437
- }
438
-
439
- async def evaluate(self, action: str, context: Dict) -> Dict:
440
- """Simulate MCP evaluation"""
441
- # In production, this would make actual MCP calls
442
- await asyncio.sleep(0.05) # Simulate network latency
443
 
444
- action_lower = action.lower()
445
-
446
- # Detection MCP
447
- if any(x in action_lower for x in ['anomaly', 'error', 'fail']):
448
- detection = {"passed": False, "reason": "Anomaly detected", "confidence": 0.87}
449
- else:
450
- detection = {"passed": True, "reason": "No anomalies", "confidence": 0.95}
451
 
452
- # Prediction MCP
453
- if 'database' in action_lower:
454
- prediction = {"passed": False, "reason": "High failure probability", "probability": 0.76}
455
- else:
456
- prediction = {"passed": True, "reason": "Low risk predicted", "probability": 0.12}
457
-
458
- # Remediation MCP
459
- if any(x in action_lower for x in ['drop', 'delete', 'terminate']):
460
- remediation = {"passed": False, "reason": "Requires rollback plan", "available": False}
461
- else:
462
- remediation = {"passed": True, "reason": "Remediation available", "available": True}
463
 
464
- return {
465
- "gate": "mcp_validation",
466
- "passed": detection["passed"] and prediction["passed"] and remediation["passed"],
467
- "reason": "All MCP checks passed" if all([detection["passed"], prediction["passed"], remediation["passed"]])
468
- else "MCP checks failed",
469
- "metadata": {
470
- "detection": detection,
471
- "prediction": prediction,
472
- "remediation": remediation
473
- }
474
- }
475
-
476
- # ============== ARF ORCHESTRATOR ==============
477
- class ARFOrchestrator:
478
- """
479
- Main orchestrator combining all real ARF components
480
- """
481
 
482
- def __init__(self):
483
- self.risk_engine = BayesianRiskAssessment()
484
- self.policy_engine = PolicyEngine()
485
- self.memory = RAGMemory()
486
- self.mcp_client = MCPClient()
487
-
488
- # Track session
489
- self.session_id = str(uuid.uuid4())
490
- self.start_time = datetime.utcnow()
 
491
 
492
- logger.info(f"ARF Orchestrator initialized (session: {self.session_id})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
 
494
- async def evaluate_action(self, action_data: Dict) -> Dict:
495
- """
496
- Complete evaluation pipeline using real components
497
- """
498
- start = datetime.utcnow()
499
 
500
- # Extract action data
501
- action = action_data.get("proposedAction", "")
502
- confidence = float(action_data.get("confidenceScore", 0.0))
503
- risk_level_input = action_data.get("riskLevel", "MEDIUM")
504
- description = action_data.get("description", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
 
506
- # Build context
507
- context = {
508
- "environment": "production", # Default for demo
509
- "user_role": action_data.get("user_role", "devops"),
510
- "time": datetime.now().strftime("%H:%M"),
511
- "backup_available": action_data.get("rollbackFeasible", True),
512
- "compliance": "pci-dss" if "financial" in action.lower() else "standard"
 
513
  }
514
 
515
- # 1. Bayesian risk assessment
516
- risk_assessment = self.risk_engine.calculate_posterior(
517
- action_text=action,
518
- context=context,
519
- evidence_success=len(self.memory.incidents) // 2, # Mock evidence
520
- evidence_total=len(self.memory.incidents)
521
- )
522
-
523
- # 2. Policy evaluation
524
- policy_result = self.policy_engine.evaluate(
525
- action=action,
526
- risk_assessment=risk_assessment,
527
- confidence=confidence,
528
- mode="advisory"
529
- )
530
-
531
- # 3. MCP check
532
- mcp_result = await self.mcp_client.evaluate(action, context)
533
-
534
- # 4. Memory recall
535
- similar = self.memory.find_similar(
536
- action=action,
537
- risk_score=risk_assessment["score"],
538
- limit=3
539
- )
540
-
541
- # 5. Combine gates
542
- all_gates = []
543
-
544
- # Add policy gates
545
- for gate in policy_result["gates"]:
546
- all_gates.append(gate)
547
-
548
- # Add MCP gate
549
- all_gates.append(mcp_result)
550
-
551
- # Add novel action gate if few similar incidents
552
- if len(similar) < 2:
553
- all_gates.append({
554
- "gate": "novel_action_review",
555
- "passed": False,
556
- "reason": "Action pattern rarely seen in historical data",
557
- "metadata": {"similar_count": len(similar)}
558
- })
559
-
560
- # 6. Track enterprise signals
561
- if len(similar) < 2 and risk_assessment["score"] > 0.7:
562
- self.memory.track_enterprise_signal(
563
- "novel_high_risk_action",
564
- action,
565
- {"risk_score": risk_assessment["score"], "similar_count": len(similar)}
566
- )
567
- elif not policy_result["allowed"] and risk_assessment["score"] > 0.8:
568
- self.memory.track_enterprise_signal(
569
- "blocked_critical_action",
570
- action,
571
- {"failures": policy_result["failures"]}
572
- )
573
 
574
- # 7. Store in memory
575
- self.memory.store({
576
- "action": action,
577
- "description": description,
578
- "risk_score": risk_assessment["score"],
579
- "risk_level": risk_assessment["level"],
580
- "confidence": confidence,
581
- "allowed": policy_result["allowed"],
582
- "timestamp": datetime.utcnow().isoformat()
583
- })
584
 
585
- # Calculate final decision
586
- all_passed = all(g.get("passed", False) for g in all_gates)
 
587
 
588
- processing_time = (datetime.utcnow() - start).total_seconds() * 1000
 
 
 
589
 
590
- logger.info(f"Evaluation complete: {processing_time:.0f}ms, allowed={all_passed}")
 
 
 
 
 
 
 
 
 
 
 
 
591
 
592
- return {
593
- "allowed": all_passed,
594
- "requiredLevel": policy_result["required_level"],
595
- "gatesTriggered": all_gates,
596
- "shouldEscalate": not all_passed,
597
- "escalationReason": None if all_passed else "Failed mechanical gates",
598
- "executionLadder": {
599
- "levels": [
600
- {"name": "AUTONOMOUS_LOW", "passed": all(g.get("passed") for g in all_gates[:2])},
601
- {"name": "AUTONOMOUS_HIGH", "passed": all(g.get("passed") for g in all_gates[:3])},
602
- {"name": "SUPERVISED", "passed": all(g.get("passed") for g in all_gates[:4])},
603
- {"name": "OPERATOR_REVIEW", "passed": True}
604
- ]
605
- },
606
- "riskAssessment": risk_assessment,
607
- "similarIncidents": similar[:2], # Return top 2 for UI
608
- "processingTimeMs": processing_time
609
- }
610
-
611
- # ============== FASTAPI SETUP ==============
612
- app = FastAPI(title="ARF OSS Real Engine", version="3.3.9")
 
 
 
 
 
 
 
 
 
 
613
 
614
- app.add_middleware(
615
- CORSMiddleware,
616
- allow_origins=["*"],
617
- allow_credentials=True,
618
- allow_methods=["*"],
619
- allow_headers=["*"],
620
- )
621
 
622
- # Initialize ARF once (singleton)
623
- arf = ARFOrchestrator()
 
 
 
624
 
625
  # ============== PYDANTIC MODELS ==============
626
  class ActionRequest(BaseModel):
627
- proposedAction: str
628
  confidenceScore: float = Field(..., ge=0.0, le=1.0)
629
- riskLevel: str = Field(..., regex="^(LOW|MEDIUM|HIGH|CRITICAL)$")
630
  description: Optional[str] = None
631
  requiresHuman: bool = False
632
  rollbackFeasible: bool = True
633
- user_role: Optional[str] = "devops"
 
 
 
 
 
 
 
634
 
635
  class ConfigUpdateRequest(BaseModel):
636
  confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
637
- maxAutonomousRisk: Optional[str] = Field(None, regex="^(LOW|MEDIUM|HIGH|CRITICAL)$")
638
 
639
  class GateResult(BaseModel):
640
  gate: str
@@ -642,6 +692,7 @@ class GateResult(BaseModel):
642
  passed: bool
643
  threshold: Optional[float] = None
644
  actual: Optional[float] = None
 
645
  metadata: Optional[Dict] = None
646
 
647
  class EvaluationResponse(BaseModel):
@@ -651,167 +702,407 @@ class EvaluationResponse(BaseModel):
651
  shouldEscalate: bool
652
  escalationReason: Optional[str] = None
653
  executionLadder: Optional[Dict] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
 
655
  # ============== API ENDPOINTS ==============
656
  @app.get("/api/v1/config")
657
  async def get_config():
 
658
  return {
659
- "confidenceThreshold": arf.policy_engine.config["confidence_threshold"],
660
- "maxAutonomousRisk": arf.policy_engine.config["max_autonomous_risk"],
661
- "riskScoreThresholds": arf.policy_engine.config["risk_thresholds"]
 
 
662
  }
663
 
664
  @app.post("/api/v1/config")
665
  async def update_config(config: ConfigUpdateRequest):
 
666
  if config.confidenceThreshold:
667
- arf.policy_engine.update_confidence_threshold(config.confidenceThreshold)
668
  if config.maxAutonomousRisk:
669
- arf.policy_engine.update_max_risk(config.maxAutonomousRisk)
670
  return await get_config()
671
 
672
  @app.post("/api/v1/evaluate", response_model=EvaluationResponse)
673
  async def evaluate_action(request: ActionRequest):
674
- """Real ARF OSS evaluation"""
675
- result = await arf.evaluate_action(request.dict())
676
-
677
- # Convert gates to proper format
678
- gates = []
679
- for g in result["gatesTriggered"]:
680
- gates.append(GateResult(
681
- gate=g["gate"],
682
- reason=g["reason"],
683
- passed=g["passed"],
684
- threshold=g.get("threshold"),
685
- actual=g.get("actual"),
686
- metadata=g.get("metadata")
687
- ))
688
-
689
- return EvaluationResponse(
690
- allowed=result["allowed"],
691
- requiredLevel=result["requiredLevel"],
692
- gatesTriggered=gates,
693
- shouldEscalate=result["shouldEscalate"],
694
- escalationReason=result["escalationReason"],
695
- executionLadder=result["executionLadder"]
696
- )
697
-
698
- @app.get("/api/v1/enterprise/signals")
699
- async def get_enterprise_signals():
700
- """Lead intelligence endpoint"""
701
- return {
702
- "signals": arf.memory.get_enterprise_signals(),
703
- "session_id": arf.session_id,
704
- "session_duration": (datetime.utcnow() - arf.start_time).total_seconds()
705
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706
 
707
  @app.get("/health")
708
- async def health():
 
709
  return {
710
  "status": "healthy",
711
- "arf_version": "3.3.9",
712
- "oss_mode": True,
713
- "memory_entries": len(arf.memory.incidents),
714
- "enterprise_signals": len(arf.memory.enterprise_signals)
715
  }
716
 
717
- # ============== GRADIO LEAD GEN PAGE ==============
718
- def create_lead_gen_page():
719
- """Simple lead generation page"""
720
 
721
- with gr.Blocks(title="ARF OSS - Real Bayesian Reliability", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722
 
723
- gr.HTML("""
724
- <div style="background: linear-gradient(135deg, #0D47A1, #1565C0); padding: 60px 30px;
725
- border-radius: 15px; text-align: center; color: white;">
726
- <h1 style="font-size: 3em; margin-bottom: 20px;">๐Ÿค– ARF OSS v3.3.9</h1>
727
- <h2 style="font-size: 1.8em; font-weight: 300; margin-bottom: 30px;">
728
- Real Bayesian Risk Assessment โ€ข Deterministic Policies โ€ข RAG Memory
729
  </h2>
730
- <div style="display: inline-block; background: rgba(255,255,255,0.2); padding: 10px 20px;
731
- border-radius: 50px; margin-bottom: 40px;">
732
- โšก Running REAL ARF OSS components - No Simulation
733
  </div>
734
  </div>
735
  """)
736
 
 
737
  with gr.Row():
738
  with gr.Column():
739
  gr.HTML("""
740
- <div style="padding: 30px; text-align: center;">
741
- <h3 style="color: #0D47A1; font-size: 2em;">๐Ÿš€ From Advisory to Autonomous</h3>
742
- <p style="font-size: 1.2em; color: #666; margin: 20px 0;">
743
- This demo uses real ARF OSS components for Bayesian risk assessment.<br>
744
  Enterprise adds mechanical gates, learning loops, and governed execution.
745
  </p>
746
  </div>
747
  """)
748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
749
  with gr.Row():
750
- features = [
751
- ("๐Ÿงฎ Bayesian Inference", "Real posterior probability calculations"),
752
- ("๐Ÿ›ก๏ธ Policy Engine", "Deterministic OSS policies"),
753
- ("๐Ÿ’พ RAG Memory", "Similar incident recall"),
754
- ("๐Ÿ”Œ MCP Client", "Model Context Protocol integration")
755
- ]
756
-
757
- for title, desc in features:
758
- with gr.Column():
759
- gr.HTML(f"""
760
- <div style="padding: 20px; background: #f8f9fa; border-radius: 10px; height: 100%;">
761
- <h4 style="color: #0D47A1;">{title}</h4>
762
- <p style="color: #666;">{desc}</p>
763
- </div>
764
- """)
765
 
766
- gr.HTML("""
767
- <div style="margin: 40px 0; padding: 50px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
768
- border-radius: 20px; text-align: center; color: white;">
769
- <h2 style="font-size: 2.5em; margin-bottom: 20px;">๐ŸŽฏ Ready for Autonomous Operations?</h2>
770
- <p style="font-size: 1.3em; margin-bottom: 30px;">
 
771
  See ARF Enterprise with mechanical gates and execution
772
  </p>
773
 
774
- <div style="display: flex; gap: 20px; justify-content: center; flex-wrap: wrap;">
775
- <a href="mailto:petter2025us@outlook.com?subject=ARF%20Enterprise%20Demo"
776
- style="background: white; color: #667eea; padding: 18px 40px; border-radius: 50px;
777
- text-decoration: none; font-weight: bold; font-size: 1.2em;">
778
- ๐Ÿ“ง petter2025us@outlook.com
779
  </a>
780
- <a href="#"
781
- style="background: #FFD700; color: #333; padding: 18px 40px; border-radius: 50px;
782
- text-decoration: none; font-weight: bold; font-size: 1.2em;"
783
- onclick="alert('Calendar booking coming soon. Please email for now!')">
784
- ๐Ÿ“… Schedule Demo
785
  </a>
786
  </div>
787
 
788
- <p style="margin-top: 30px; font-size: 0.95em; opacity: 0.9;">
789
- โšก Technical deep-dive โ€ข Live autonomous execution โ€ข Enterprise pricing
 
790
  </p>
791
  </div>
792
  """)
793
 
794
- gr.HTML("""
795
- <div style="text-align: center; padding: 30px; color: #666;">
796
- <p>๐Ÿ“ง <a href="mailto:petter2025us@outlook.com" style="color: #0D47A1;">petter2025us@outlook.com</a> โ€ข
797
- ๐Ÿ™ <a href="https://github.com/petterjuan/agentic-reliability-framework" style="color: #0D47A1;">GitHub</a></p>
798
- <p style="font-size: 0.9em;">ยฉ 2026 ARF - Real OSS, Enterprise Execution</p>
 
 
 
 
 
 
 
 
 
799
  </div>
800
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
801
 
802
- return demo
803
 
804
- # ============== MAIN ENTRY POINT ==============
805
- demo = create_lead_gen_page()
806
-
807
- # Mount FastAPI on Gradio
808
- app = mount_gradio_app(app, demo, path="/")
809
-
810
- # For Hugging Face Spaces, this must be the only app file
811
- # The Space will execute this file and look for 'demo' or 'app'
812
 
813
- # This is the critical part for Hugging Face Spaces
814
  if __name__ == "__main__":
815
  import uvicorn
816
  port = int(os.environ.get('PORT', 7860))
817
- uvicorn.run(app, host="0.0.0.0", port=port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ ARF OSS v3.3.9 - Enterprise Lead Generation Engine
3
+ Single file for Hugging Face Spaces with real ARF OSS components
 
4
  """
5
 
 
6
  import os
7
  import json
8
  import uuid
9
+ import hmac
10
+ import hashlib
11
  import logging
12
  import asyncio
13
+ import sqlite3
14
+ import requests
15
  from datetime import datetime, timedelta
16
  from typing import Dict, List, Optional, Any, Tuple
17
+ from contextlib import contextmanager
18
+ from dataclasses import dataclass, asdict
19
+ from enum import Enum
20
+
21
+ import gradio as gr
22
+ from fastapi import FastAPI, HTTPException, Depends, Header
23
  from fastapi.middleware.cors import CORSMiddleware
24
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
25
+ from pydantic import BaseModel, Field, validator
26
  from gradio import mount_gradio_app
27
 
28
+ # ============== CONFIGURATION ==============
29
+ class Settings:
30
+ """Centralized configuration - easy to modify"""
31
+
32
+ # Hugging Face settings
33
+ HF_SPACE_ID = os.environ.get('SPACE_ID', 'local')
34
+ HF_TOKEN = os.environ.get('HF_TOKEN', '')
35
+
36
+ # Persistence - HF persistent storage
37
+ DATA_DIR = '/data' if os.path.exists('/data') else './data'
38
+ os.makedirs(DATA_DIR, exist_ok=True)
39
+
40
+ # Lead generation
41
+ LEAD_EMAIL = "petter2025us@outlook.com"
42
+ CALENDLY_URL = "https://calendly.com/petter2025us/arf-demo"
43
+
44
+ # Webhook for lead alerts (set in HF secrets)
45
+ SLACK_WEBHOOK = os.environ.get('SLACK_WEBHOOK', '')
46
+ SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', '')
47
+
48
+ # Security
49
+ API_KEY = os.environ.get('ARF_API_KEY', str(uuid.uuid4()))
50
+
51
+ # ARF defaults
52
+ DEFAULT_CONFIDENCE_THRESHOLD = 0.9
53
+ DEFAULT_MAX_RISK = "MEDIUM"
54
+
55
+ settings = Settings()
56
+
57
+ # ============== LOGGING ==============
58
+ logging.basicConfig(
59
+ level=logging.INFO,
60
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
61
+ handlers=[
62
+ logging.FileHandler(f'{settings.DATA_DIR}/arf.log'),
63
+ logging.StreamHandler()
64
+ ]
65
+ )
66
+ logger = logging.getLogger('arf.oss')
67
+
68
+ # ============== ENUMS & TYPES ==============
69
+ class RiskLevel(str, Enum):
70
+ LOW = "LOW"
71
+ MEDIUM = "MEDIUM"
72
+ HIGH = "HIGH"
73
+ CRITICAL = "CRITICAL"
74
+
75
+ class ExecutionLevel(str, Enum):
76
+ AUTONOMOUS_LOW = "AUTONOMOUS_LOW"
77
+ AUTONOMOUS_HIGH = "AUTONOMOUS_HIGH"
78
+ SUPERVISED = "SUPERVISED"
79
+ OPERATOR_REVIEW = "OPERATOR_REVIEW"
80
 
81
+ class LeadSignal(str, Enum):
82
+ HIGH_RISK_BLOCKED = "high_risk_blocked"
83
+ NOVEL_ACTION = "novel_action"
84
+ POLICY_VIOLATION = "policy_violation"
85
+ CONFIDENCE_LOW = "confidence_low"
86
+ REPEATED_FAILURE = "repeated_failure"
87
 
88
+ # ============== REAL ARF BAYESIAN ENGINE ==============
89
+ class BayesianRiskEngine:
90
  """
91
+ True Bayesian inference with conjugate priors
92
+ Matches ARF OSS production implementation
93
  """
94
 
95
+ def __init__(self):
96
+ # Beta-Binomial conjugate prior
97
+ # Prior represents belief about risk before seeing evidence
98
+ self.prior_alpha = 2.0 # Pseudocounts for "safe" outcomes
99
+ self.prior_beta = 5.0 # Pseudocounts for "risky" outcomes
100
+
101
+ # Action type priors (learned from industry data)
102
+ self.action_priors = {
103
+ 'database': {'alpha': 1.5, 'beta': 8.0}, # DB ops are risky
104
+ 'network': {'alpha': 3.0, 'beta': 4.0}, # Network ops medium risk
105
+ 'compute': {'alpha': 4.0, 'beta': 3.0}, # Compute ops safer
106
+ 'security': {'alpha': 2.0, 'beta': 6.0}, # Security ops risky
107
+ 'default': {'alpha': 2.0, 'beta': 5.0}
108
+ }
109
 
110
+ # Load historical evidence from persistent storage
111
+ self.evidence_db = f"{settings.DATA_DIR}/evidence.db"
112
+ self._init_db()
113
+
114
+ def _init_db(self):
115
+ """Initialize SQLite DB for evidence storage"""
116
+ with self._get_db() as conn:
117
+ conn.execute('''
118
+ CREATE TABLE IF NOT EXISTS evidence (
119
+ id TEXT PRIMARY KEY,
120
+ action_type TEXT,
121
+ action_hash TEXT,
122
+ success INTEGER,
123
+ total INTEGER,
124
+ timestamp TEXT,
125
+ metadata TEXT
126
+ )
127
+ ''')
128
+ conn.execute('''
129
+ CREATE INDEX IF NOT EXISTS idx_action_hash
130
+ ON evidence(action_hash)
131
+ ''')
132
+
133
+ @contextmanager
134
+ def _get_db(self):
135
+ conn = sqlite3.connect(self.evidence_db)
136
+ try:
137
+ yield conn
138
+ finally:
139
+ conn.close()
140
+
141
+ def classify_action(self, action_text: str) -> str:
142
+ """Classify action type for appropriate prior"""
143
+ action_lower = action_text.lower()
144
+
145
+ if any(word in action_lower for word in ['database', 'db', 'sql', 'table', 'drop', 'delete']):
146
+ return 'database'
147
+ elif any(word in action_lower for word in ['network', 'firewall', 'load balancer']):
148
+ return 'network'
149
+ elif any(word in action_lower for word in ['pod', 'container', 'deploy', 'scale']):
150
+ return 'compute'
151
+ elif any(word in action_lower for word in ['security', 'cert', 'key', 'access']):
152
+ return 'security'
153
+ else:
154
+ return 'default'
155
+
156
+ def get_prior(self, action_type: str) -> Tuple[float, float]:
157
+ """Get prior parameters for action type"""
158
+ prior = self.action_priors.get(action_type, self.action_priors['default'])
159
+ return prior['alpha'], prior['beta']
160
+
161
+ def get_evidence(self, action_hash: str) -> Tuple[int, int]:
162
+ """Get historical evidence for similar actions"""
163
+ with self._get_db() as conn:
164
+ cursor = conn.execute(
165
+ 'SELECT SUM(success), SUM(total) FROM evidence WHERE action_hash = ?',
166
+ (action_hash[:50],)
167
+ )
168
+ row = cursor.fetchone()
169
+ return (row[0] or 0, row[1] or 0) if row else (0, 0)
170
+
171
  def calculate_posterior(self,
172
+ action_text: str,
173
+ context: Dict[str, Any]) -> Dict[str, Any]:
 
 
174
  """
175
+ True Bayesian posterior calculation
176
+ P(risk | action, context) โˆ P(action, context | risk) * P(risk)
177
  """
178
+ # 1. Classify action for appropriate prior
179
+ action_type = self.classify_action(action_text)
180
+ alpha0, beta0 = self.get_prior(action_type)
181
 
182
+ # 2. Get historical evidence
183
+ action_hash = hashlib.sha256(action_text.encode()).hexdigest()
184
+ successes, trials = self.get_evidence(action_hash)
185
 
186
+ # 3. Update prior with evidence โ†’ posterior
187
+ alpha_n = alpha0 + successes
188
+ beta_n = beta0 + (trials - successes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
+ # 4. Posterior mean (expected risk)
191
+ posterior_mean = alpha_n / (alpha_n + beta_n)
192
+
193
+ # 5. Incorporate context as likelihood adjustment
194
+ context_multiplier = self._context_likelihood(context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
+ # 6. Final risk score (posterior predictive)
197
+ risk_score = posterior_mean * context_multiplier
198
+ risk_score = min(0.99, max(0.01, risk_score))
199
 
200
+ # 7. 95% credible interval (Beta distribution quantiles)
201
+ # Using approximation for computational efficiency
202
+ variance = (alpha_n * beta_n) / ((alpha_n + beta_n)**2 * (alpha_n + beta_n + 1))
203
+ std_dev = variance ** 0.5
204
+ ci_lower = max(0.01, posterior_mean - 1.96 * std_dev)
205
+ ci_upper = min(0.99, posterior_mean + 1.96 * std_dev)
206
 
207
+ # 8. Risk level
208
+ if risk_score > 0.8:
209
+ risk_level = RiskLevel.CRITICAL
210
+ elif risk_score > 0.6:
211
+ risk_level = RiskLevel.HIGH
212
+ elif risk_score > 0.4:
213
+ risk_level = RiskLevel.MEDIUM
214
+ else:
215
+ risk_level = RiskLevel.LOW
216
 
217
+ return {
218
+ "score": risk_score,
219
+ "level": risk_level,
220
+ "credible_interval": [ci_lower, ci_upper],
221
+ "posterior_parameters": {"alpha": alpha_n, "beta": beta_n},
222
+ "prior_used": {"alpha": alpha0, "beta": beta0, "type": action_type},
223
+ "evidence_used": {"successes": successes, "trials": trials},
224
+ "context_multiplier": context_multiplier,
225
+ "calculation": f"""
226
+ Posterior = Beta(ฮฑ={alpha_n:.1f}, ฮฒ={beta_n:.1f})
227
+ Mean = {alpha_n:.1f} / ({alpha_n:.1f} + {beta_n:.1f}) = {posterior_mean:.3f}
228
+ ร— Context multiplier {context_multiplier:.2f} = {risk_score:.3f}
229
+ """
230
+ }
231
 
232
+ def _context_likelihood(self, context: Dict) -> float:
233
+ """Calculate likelihood multiplier from context"""
234
  multiplier = 1.0
235
 
236
+ # Environment
237
  if context.get('environment') == 'production':
238
  multiplier *= 1.5
239
  elif context.get('environment') == 'staging':
240
  multiplier *= 0.8
241
 
242
+ # Time
243
+ hour = datetime.now().hour
244
+ if hour < 6 or hour > 22: # Off-hours
245
  multiplier *= 1.3
 
 
246
 
247
+ # User seniority
248
+ if context.get('user_role') == 'junior':
 
249
  multiplier *= 1.4
250
+ elif context.get('user_role') == 'senior':
251
+ multiplier *= 0.9
252
 
253
+ # Backup status
254
  if not context.get('backup_available', True):
255
  multiplier *= 1.6
256
 
257
+ return multiplier
 
 
 
 
 
258
 
259
+ def record_outcome(self, action_text: str, success: bool):
260
+ """Record actual outcome for future Bayesian updates"""
261
+ action_hash = hashlib.sha256(action_text.encode()).hexdigest()
262
+ action_type = self.classify_action(action_text)
 
 
263
 
264
+ with self._get_db() as conn:
265
+ conn.execute('''
266
+ INSERT INTO evidence (id, action_type, action_hash, success, total, timestamp)
267
+ VALUES (?, ?, ?, ?, ?, ?)
268
+ ''', (
269
+ str(uuid.uuid4()),
270
+ action_type,
271
+ action_hash[:50],
272
+ 1 if success else 0,
273
+ 1,
274
+ datetime.utcnow().isoformat()
275
+ ))
276
+ conn.commit()
277
+
278
+ logger.info(f"Recorded outcome for {action_type}: success={success}")
279
 
280
+ # ============== POLICY ENGINE ==============
281
  class PolicyEngine:
282
  """
283
+ Deterministic OSS policies - advisory only
284
+ Matches ARF OSS healing_policies.py
285
  """
286
 
287
+ def __init__(self):
288
  self.config = {
289
+ "confidence_threshold": settings.DEFAULT_CONFIDENCE_THRESHOLD,
290
+ "max_autonomous_risk": settings.DEFAULT_MAX_RISK,
291
  "risk_thresholds": {
292
+ RiskLevel.LOW: 0.7,
293
+ RiskLevel.MEDIUM: 0.5,
294
+ RiskLevel.HIGH: 0.3,
295
+ RiskLevel.CRITICAL: 0.1
296
  },
297
+ "destructive_patterns": [
298
+ r'\bdrop\s+database\b',
299
+ r'\bdelete\s+from\b',
300
+ r'\btruncate\b',
301
+ r'\balter\s+table\b',
302
+ r'\bdrop\s+table\b',
303
+ r'\bshutdown\b',
304
+ r'\bterminate\b',
305
+ r'\brm\s+-rf\b'
306
  ],
307
+ "require_human": [RiskLevel.CRITICAL, RiskLevel.HIGH],
308
+ "require_rollback": True
309
  }
 
 
 
 
 
 
 
 
 
 
 
310
 
311
+ def evaluate(self,
 
 
 
 
 
 
312
  action: str,
313
+ risk: Dict[str, Any],
314
+ confidence: float) -> Dict[str, Any]:
 
315
  """
316
  Evaluate action against policies
317
+ Returns gate results and final decision
318
  """
319
+ gates = []
 
320
 
321
  # Gate 1: Confidence threshold
322
  confidence_passed = confidence >= self.config["confidence_threshold"]
323
+ gates.append({
324
  "gate": "confidence_threshold",
325
  "passed": confidence_passed,
326
  "threshold": self.config["confidence_threshold"],
327
  "actual": confidence,
328
+ "reason": f"Confidence {confidence:.2f} {'โ‰ฅ' if confidence_passed else '<'} threshold {self.config['confidence_threshold']}",
329
+ "type": "numerical"
330
  })
 
 
331
 
332
  # Gate 2: Risk level
333
+ risk_levels = list(RiskLevel)
334
+ max_idx = risk_levels.index(RiskLevel(self.config["max_autonomous_risk"]))
335
+ action_idx = risk_levels.index(risk["level"])
336
  risk_passed = action_idx <= max_idx
337
 
338
+ gates.append({
339
  "gate": "risk_assessment",
340
  "passed": risk_passed,
341
  "max_allowed": self.config["max_autonomous_risk"],
342
+ "actual": risk["level"].value,
343
+ "reason": f"Risk level {risk['level'].value} {'โ‰ค' if risk_passed else '>'} max autonomous {self.config['max_autonomous_risk']}",
344
+ "type": "categorical",
345
  "metadata": {
346
+ "risk_score": risk["score"],
347
+ "credible_interval": risk["credible_interval"]
348
  }
349
  })
 
 
350
 
351
+ # Gate 3: Destructive check
352
+ import re
353
+ is_destructive = any(
354
+ re.search(pattern, action.lower())
355
+ for pattern in self.config["destructive_patterns"]
356
+ )
357
 
358
+ gates.append({
359
  "gate": "destructive_check",
360
  "passed": not is_destructive,
361
  "is_destructive": is_destructive,
362
  "reason": "Non-destructive operation" if not is_destructive else "Destructive operation detected",
363
+ "type": "boolean",
364
+ "metadata": {"requires_rollback": is_destructive}
365
  })
 
 
366
 
367
  # Gate 4: Human review requirement
368
+ requires_human = risk["level"] in self.config["require_human"]
369
 
370
+ gates.append({
371
  "gate": "human_review",
372
  "passed": not requires_human,
373
  "requires_human": requires_human,
374
+ "reason": "Human review not required" if not requires_human else f"Human review required for {risk['level'].value} risk",
375
+ "type": "boolean"
376
  })
 
 
377
 
378
+ # Gate 5: OSS license (always passes in OSS)
379
+ gates.append({
380
  "gate": "license_check",
381
  "passed": True,
382
  "edition": "OSS",
383
  "reason": "OSS edition - advisory only",
384
+ "type": "license"
385
  })
386
 
387
+ # Overall decision
388
+ all_passed = all(g["passed"] for g in gates)
389
+
390
+ # Determine required level
391
+ if not all_passed:
392
+ required_level = ExecutionLevel.OPERATOR_REVIEW
393
+ elif risk["level"] == RiskLevel.LOW:
394
+ required_level = ExecutionLevel.AUTONOMOUS_LOW
395
+ elif risk["level"] == RiskLevel.MEDIUM:
396
+ required_level = ExecutionLevel.AUTONOMOUS_HIGH
397
+ else:
398
+ required_level = ExecutionLevel.SUPERVISED
399
 
400
  return {
401
  "allowed": all_passed,
402
+ "required_level": required_level.value,
403
+ "gates": gates,
404
+ "advisory_only": True,
405
+ "oss_disclaimer": "OSS edition provides advisory only. Enterprise adds execution."
 
406
  }
407
 
408
+ def update_config(self, key: str, value: Any):
409
+ """Live policy updates"""
410
+ if key in self.config:
411
+ self.config[key] = value
412
+ logger.info(f"Policy updated: {key} = {value}")
413
+ return True
414
+ return False
 
 
 
415
 
416
+ # ============== RAG MEMORY WITH PERSISTENCE ==============
417
  class RAGMemory:
418
  """
419
+ Persistent RAG memory using SQLite + vector embeddings
420
+ Survives HF Space restarts
421
  """
422
 
423
+ def __init__(self):
424
+ self.db_path = f"{settings.DATA_DIR}/memory.db"
425
+ self._init_db()
426
+ self.embedding_cache = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
 
428
+ def _init_db(self):
429
+ """Initialize memory tables"""
430
+ with self._get_db() as conn:
431
+ # Incidents table
432
+ conn.execute('''
433
+ CREATE TABLE IF NOT EXISTS incidents (
434
+ id TEXT PRIMARY KEY,
435
+ action TEXT,
436
+ action_hash TEXT,
437
+ risk_score REAL,
438
+ risk_level TEXT,
439
+ confidence REAL,
440
+ allowed BOOLEAN,
441
+ gates TEXT,
442
+ timestamp TEXT,
443
+ embedding TEXT
444
+ )
445
+ ''')
446
 
447
+ # Enterprise signals table
448
+ conn.execute('''
449
+ CREATE TABLE IF NOT EXISTS signals (
450
+ id TEXT PRIMARY KEY,
451
+ signal_type TEXT,
452
+ action TEXT,
453
+ risk_score REAL,
454
+ metadata TEXT,
455
+ timestamp TEXT,
456
+ contacted BOOLEAN DEFAULT 0
457
+ )
458
+ ''')
459
 
460
+ # Create indexes
461
+ conn.execute('CREATE INDEX IF NOT EXISTS idx_action_hash ON incidents(action_hash)')
462
+ conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_type ON signals(signal_type)')
463
+ conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_contacted ON signals(contacted)')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
 
465
+ @contextmanager
466
+ def _get_db(self):
467
+ conn = sqlite3.connect(self.db_path)
468
+ conn.row_factory = sqlite3.Row
 
 
 
 
 
 
 
 
 
 
469
  try:
470
+ yield conn
471
+ finally:
472
+ conn.close()
 
 
 
 
 
 
 
 
 
473
 
474
+ def _simple_embedding(self, text: str) -> List[float]:
475
+ """Simple bag-of-words embedding for demo"""
476
+ # Cache embeddings
477
+ if text in self.embedding_cache:
478
+ return self.embedding_cache[text]
 
 
 
 
 
 
 
479
 
480
+ # Simple character trigram embedding
481
+ words = text.lower().split()
482
+ trigrams = set()
483
+ for word in words:
484
+ for i in range(len(word) - 2):
485
+ trigrams.add(word[i:i+3])
 
486
 
487
+ # Convert to fixed-size vector (simplified)
488
+ # In production, use sentence-transformers
489
+ vector = [hash(t) % 1000 / 1000.0 for t in sorted(trigrams)[:100]]
490
+ # Pad to fixed length
491
+ while len(vector) < 100:
492
+ vector.append(0.0)
493
+ vector = vector[:100]
 
 
 
 
494
 
495
+ self.embedding_cache[text] = vector
496
+ return vector
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
 
498
+ def store_incident(self,
499
+ action: str,
500
+ risk_score: float,
501
+ risk_level: RiskLevel,
502
+ confidence: float,
503
+ allowed: bool,
504
+ gates: List[Dict]):
505
+ """Store incident in persistent memory"""
506
+ action_hash = hashlib.sha256(action.encode()).hexdigest()[:50]
507
+ embedding = json.dumps(self._simple_embedding(action))
508
 
509
+ with self._get_db() as conn:
510
+ conn.execute('''
511
+ INSERT INTO incidents
512
+ (id, action, action_hash, risk_score, risk_level, confidence, allowed, gates, timestamp, embedding)
513
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
514
+ ''', (
515
+ str(uuid.uuid4()),
516
+ action[:500],
517
+ action_hash,
518
+ risk_score,
519
+ risk_level.value,
520
+ confidence,
521
+ 1 if allowed else 0,
522
+ json.dumps(gates),
523
+ datetime.utcnow().isoformat(),
524
+ embedding
525
+ ))
526
+ conn.commit()
527
 
528
+ def find_similar(self, action: str, limit: int = 5) -> List[Dict]:
529
+ """Find similar incidents using cosine similarity"""
530
+ query_embedding = self._simple_embedding(action)
 
 
531
 
532
+ with self._get_db() as conn:
533
+ # Get all recent incidents
534
+ cursor = conn.execute('''
535
+ SELECT * FROM incidents
536
+ ORDER BY timestamp DESC
537
+ LIMIT 100
538
+ ''')
539
+
540
+ incidents = []
541
+ for row in cursor.fetchall():
542
+ stored_embedding = json.loads(row['embedding'])
543
+
544
+ # Cosine similarity
545
+ dot = sum(q * s for q, s in zip(query_embedding, stored_embedding))
546
+ norm_q = sum(q*q for q in query_embedding) ** 0.5
547
+ norm_s = sum(s*s for s in stored_embedding) ** 0.5
548
+
549
+ if norm_q > 0 and norm_s > 0:
550
+ similarity = dot / (norm_q * norm_s)
551
+ else:
552
+ similarity = 0
553
+
554
+ incidents.append({
555
+ 'id': row['id'],
556
+ 'action': row['action'],
557
+ 'risk_score': row['risk_score'],
558
+ 'risk_level': row['risk_level'],
559
+ 'confidence': row['confidence'],
560
+ 'allowed': bool(row['allowed']),
561
+ 'timestamp': row['timestamp'],
562
+ 'similarity': similarity
563
+ })
564
+
565
+ # Sort by similarity and return top k
566
+ incidents.sort(key=lambda x: x['similarity'], reverse=True)
567
+ return incidents[:limit]
568
+
569
+ def track_enterprise_signal(self,
570
+ signal_type: LeadSignal,
571
+ action: str,
572
+ risk_score: float,
573
+ metadata: Dict = None):
574
+ """Track enterprise interest signals with persistence"""
575
 
576
+ signal = {
577
+ 'id': str(uuid.uuid4()),
578
+ 'signal_type': signal_type.value,
579
+ 'action': action[:200],
580
+ 'risk_score': risk_score,
581
+ 'metadata': json.dumps(metadata or {}),
582
+ 'timestamp': datetime.utcnow().isoformat(),
583
+ 'contacted': 0
584
  }
585
 
586
+ with self._get_db() as conn:
587
+ conn.execute('''
588
+ INSERT INTO signals
589
+ (id, signal_type, action, risk_score, metadata, timestamp, contacted)
590
+ VALUES (?, ?, ?, ?, ?, ?, ?)
591
+ ''', (
592
+ signal['id'],
593
+ signal['signal_type'],
594
+ signal['action'],
595
+ signal['risk_score'],
596
+ signal['metadata'],
597
+ signal['timestamp'],
598
+ signal['contacted']
599
+ ))
600
+ conn.commit()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
 
602
+ logger.info(f"๐Ÿ”” Enterprise signal: {signal_type.value} - {action[:50]}...")
 
 
 
 
 
 
 
 
 
603
 
604
+ # Trigger immediate notification for high-value signals
605
+ if signal_type in [LeadSignal.HIGH_RISK_BLOCKED, LeadSignal.NOVEL_ACTION]:
606
+ self._notify_sales_team(signal)
607
 
608
+ return signal
609
+
610
+ def _notify_sales_team(self, signal: Dict):
611
+ """Real-time notification to sales team"""
612
 
613
+ # Slack webhook
614
+ if settings.SLACK_WEBHOOK:
615
+ try:
616
+ requests.post(settings.SLACK_WEBHOOK, json={
617
+ "text": f"๐Ÿšจ *Enterprise Lead Signal*\n"
618
+ f"Type: {signal['signal_type']}\n"
619
+ f"Action: {signal['action']}\n"
620
+ f"Risk Score: {signal['risk_score']:.2f}\n"
621
+ f"Time: {signal['timestamp']}\n"
622
+ f"Contact: {settings.LEAD_EMAIL}"
623
+ })
624
+ except:
625
+ pass
626
 
627
+ # Email via SendGrid (if configured)
628
+ if settings.SENDGRID_API_KEY:
629
+ # Send email logic here
630
+ pass
631
+
632
+ def get_uncontacted_signals(self) -> List[Dict]:
633
+ """Get signals that haven't been followed up"""
634
+ with self._get_db() as conn:
635
+ cursor = conn.execute('''
636
+ SELECT * FROM signals
637
+ WHERE contacted = 0
638
+ ORDER BY timestamp DESC
639
+ ''')
640
+
641
+ signals = []
642
+ for row in cursor.fetchall():
643
+ signals.append({
644
+ 'id': row['id'],
645
+ 'signal_type': row['signal_type'],
646
+ 'action': row['action'],
647
+ 'risk_score': row['risk_score'],
648
+ 'metadata': json.loads(row['metadata']),
649
+ 'timestamp': row['timestamp']
650
+ })
651
+ return signals
652
+
653
+ def mark_contacted(self, signal_id: str):
654
+ """Mark signal as contacted"""
655
+ with self._get_db() as conn:
656
+ conn.execute('UPDATE signals SET contacted = 1 WHERE id = ?', (signal_id,))
657
+ conn.commit()
658
 
659
+ # ============== AUTHENTICATION ==============
660
+ security = HTTPBearer()
 
 
 
 
 
661
 
662
+ def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
663
+ """Simple API key authentication for enterprise endpoints"""
664
+ if credentials.credentials != settings.API_KEY:
665
+ raise HTTPException(status_code=403, detail="Invalid API key")
666
+ return credentials.credentials
667
 
668
  # ============== PYDANTIC MODELS ==============
669
  class ActionRequest(BaseModel):
670
+ proposedAction: str = Field(..., min_length=1, max_length=1000)
671
  confidenceScore: float = Field(..., ge=0.0, le=1.0)
672
+ riskLevel: RiskLevel
673
  description: Optional[str] = None
674
  requiresHuman: bool = False
675
  rollbackFeasible: bool = True
676
+ user_role: str = "devops"
677
+ session_id: Optional[str] = None
678
+
679
+ @validator('proposedAction')
680
+ def validate_action(cls, v):
681
+ if len(v.strip()) == 0:
682
+ raise ValueError('Action cannot be empty')
683
+ return v
684
 
685
  class ConfigUpdateRequest(BaseModel):
686
  confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
687
+ maxAutonomousRisk: Optional[RiskLevel] = None
688
 
689
  class GateResult(BaseModel):
690
  gate: str
 
692
  passed: bool
693
  threshold: Optional[float] = None
694
  actual: Optional[float] = None
695
+ type: str = "boolean"
696
  metadata: Optional[Dict] = None
697
 
698
  class EvaluationResponse(BaseModel):
 
702
  shouldEscalate: bool
703
  escalationReason: Optional[str] = None
704
  executionLadder: Optional[Dict] = None
705
+ oss_disclaimer: str = "OSS edition provides advisory only. Enterprise adds mechanical gates and execution."
706
+
707
+ class LeadSignalResponse(BaseModel):
708
+ id: str
709
+ signal_type: str
710
+ action: str
711
+ risk_score: float
712
+ timestamp: str
713
+ metadata: Dict
714
+
715
+ # ============== FASTAPI SETUP ==============
716
+ app = FastAPI(
717
+ title="ARF OSS Real Engine",
718
+ version="3.3.9",
719
+ description="Real ARF OSS components for enterprise lead generation",
720
+ contact={
721
+ "name": "ARF Sales",
722
+ "email": settings.LEAD_EMAIL,
723
+ }
724
+ )
725
+
726
+ app.add_middleware(
727
+ CORSMiddleware,
728
+ allow_origins=["*"],
729
+ allow_credentials=True,
730
+ allow_methods=["*"],
731
+ allow_headers=["*"],
732
+ )
733
+
734
+ # Initialize ARF components
735
+ risk_engine = BayesianRiskEngine()
736
+ policy_engine = PolicyEngine()
737
+ memory = RAGMemory()
738
 
739
  # ============== API ENDPOINTS ==============
740
  @app.get("/api/v1/config")
741
  async def get_config():
742
+ """Get current ARF configuration"""
743
  return {
744
+ "confidenceThreshold": policy_engine.config["confidence_threshold"],
745
+ "maxAutonomousRisk": policy_engine.config["max_autonomous_risk"],
746
+ "riskScoreThresholds": policy_engine.config["risk_thresholds"],
747
+ "version": "3.3.9",
748
+ "edition": "OSS"
749
  }
750
 
751
  @app.post("/api/v1/config")
752
  async def update_config(config: ConfigUpdateRequest):
753
+ """Update ARF configuration (live)"""
754
  if config.confidenceThreshold:
755
+ policy_engine.update_config("confidence_threshold", config.confidenceThreshold)
756
  if config.maxAutonomousRisk:
757
+ policy_engine.update_config("max_autonomous_risk", config.maxAutonomousRisk.value)
758
  return await get_config()
759
 
760
  @app.post("/api/v1/evaluate", response_model=EvaluationResponse)
761
  async def evaluate_action(request: ActionRequest):
762
+ """
763
+ Real ARF OSS evaluation pipeline
764
+ Used by Replit UI frontend
765
+ """
766
+ try:
767
+ # Build context
768
+ context = {
769
+ "environment": "production",
770
+ "user_role": request.user_role,
771
+ "backup_available": request.rollbackFeasible,
772
+ "requires_human": request.requiresHuman
773
+ }
774
+
775
+ # 1. Bayesian risk assessment
776
+ risk = risk_engine.calculate_posterior(
777
+ action_text=request.proposedAction,
778
+ context=context
779
+ )
780
+
781
+ # 2. Policy evaluation
782
+ policy = policy_engine.evaluate(
783
+ action=request.proposedAction,
784
+ risk=risk,
785
+ confidence=request.confidenceScore
786
+ )
787
+
788
+ # 3. RAG memory recall
789
+ similar = memory.find_similar(request.proposedAction, limit=3)
790
+
791
+ # 4. Track enterprise signals
792
+ if not policy["allowed"] and risk["score"] > 0.7:
793
+ memory.track_enterprise_signal(
794
+ signal_type=LeadSignal.HIGH_RISK_BLOCKED,
795
+ action=request.proposedAction,
796
+ risk_score=risk["score"],
797
+ metadata={
798
+ "confidence": request.confidenceScore,
799
+ "risk_level": risk["level"].value,
800
+ "failed_gates": [g["gate"] for g in policy["gates"] if not g["passed"]]
801
+ }
802
+ )
803
+
804
+ if len(similar) < 2 and risk["score"] > 0.6:
805
+ memory.track_enterprise_signal(
806
+ signal_type=LeadSignal.NOVEL_ACTION,
807
+ action=request.proposedAction,
808
+ risk_score=risk["score"],
809
+ metadata={"similar_count": len(similar)}
810
+ )
811
+
812
+ # 5. Store in memory
813
+ memory.store_incident(
814
+ action=request.proposedAction,
815
+ risk_score=risk["score"],
816
+ risk_level=risk["level"],
817
+ confidence=request.confidenceScore,
818
+ allowed=policy["allowed"],
819
+ gates=policy["gates"]
820
+ )
821
+
822
+ # 6. Format gates for response
823
+ gates = []
824
+ for g in policy["gates"]:
825
+ gates.append(GateResult(
826
+ gate=g["gate"],
827
+ reason=g["reason"],
828
+ passed=g["passed"],
829
+ threshold=g.get("threshold"),
830
+ actual=g.get("actual"),
831
+ type=g.get("type", "boolean"),
832
+ metadata=g.get("metadata")
833
+ ))
834
+
835
+ # 7. Build execution ladder
836
+ execution_ladder = {
837
+ "levels": [
838
+ {"name": "AUTONOMOUS_LOW", "required": gates[0].passed and gates[1].passed},
839
+ {"name": "AUTONOMOUS_HIGH", "required": all(g.passed for g in gates[:3])},
840
+ {"name": "SUPERVISED", "required": all(g.passed for g in gates[:4])},
841
+ {"name": "OPERATOR_REVIEW", "required": True}
842
+ ],
843
+ "current": policy["required_level"]
844
+ }
845
+
846
+ return EvaluationResponse(
847
+ allowed=policy["allowed"],
848
+ requiredLevel=policy["required_level"],
849
+ gatesTriggered=gates,
850
+ shouldEscalate=not policy["allowed"],
851
+ escalationReason=None if policy["allowed"] else "Failed mechanical gates",
852
+ executionLadder=execution_ladder
853
+ )
854
+
855
+ except Exception as e:
856
+ logger.error(f"Evaluation failed: {e}", exc_info=True)
857
+ raise HTTPException(status_code=500, detail=str(e))
858
+
859
+ @app.get("/api/v1/enterprise/signals", dependencies=[Depends(verify_api_key)])
860
+ async def get_enterprise_signals(contacted: bool = False):
861
+ """
862
+ Get enterprise lead signals (protected endpoint)
863
+ Requires API key from HF secrets
864
+ """
865
+ if contacted:
866
+ signals = memory.get_uncontacted_signals()
867
+ else:
868
+ # Get all signals from last 30 days
869
+ with memory._get_db() as conn:
870
+ cursor = conn.execute('''
871
+ SELECT * FROM signals
872
+ WHERE datetime(timestamp) > datetime('now', '-30 days')
873
+ ORDER BY timestamp DESC
874
+ ''')
875
+ signals = []
876
+ for row in cursor.fetchall():
877
+ signals.append({
878
+ 'id': row['id'],
879
+ 'signal_type': row['signal_type'],
880
+ 'action': row['action'],
881
+ 'risk_score': row['risk_score'],
882
+ 'metadata': json.loads(row['metadata']),
883
+ 'timestamp': row['timestamp'],
884
+ 'contacted': bool(row['contacted'])
885
+ })
886
+
887
+ return {"signals": signals, "count": len(signals)}
888
+
889
+ @app.post("/api/v1/enterprise/signals/{signal_id}/contact")
890
+ async def mark_signal_contacted(signal_id: str):
891
+ """Mark a lead signal as contacted"""
892
+ memory.mark_contacted(signal_id)
893
+ return {"status": "success", "message": "Signal marked as contacted"}
894
+
895
+ @app.get("/api/v1/memory/similar")
896
+ async def get_similar_actions(action: str, limit: int = 5):
897
+ """Find similar historical actions"""
898
+ similar = memory.find_similar(action, limit=limit)
899
+ return {"similar": similar, "count": len(similar)}
900
+
901
+ @app.post("/api/v1/feedback")
902
+ async def record_outcome(action: str, success: bool):
903
+ """
904
+ Record actual outcome for Bayesian updating
905
+ This is how ARF learns
906
+ """
907
+ risk_engine.record_outcome(action, success)
908
+ return {"status": "success", "message": "Outcome recorded"}
909
 
910
  @app.get("/health")
911
+ async def health_check():
912
+ """Health check endpoint"""
913
  return {
914
  "status": "healthy",
915
+ "version": "3.3.9",
916
+ "edition": "OSS",
917
+ "memory_entries": len(memory.get_uncontacted_signals()),
918
+ "timestamp": datetime.utcnow().isoformat()
919
  }
920
 
921
+ # ============== GRADIO LEAD GENERATION UI ==============
922
+ def create_lead_gen_ui():
923
+ """Professional lead generation interface"""
924
 
925
+ with gr.Blocks(
926
+ title="ARF OSS - Enterprise Reliability Intelligence",
927
+ theme=gr.themes.Soft(primary_hue="blue", secondary_hue="indigo"),
928
+ css="""
929
+ .gradio-container { max-width: 1200px !important; margin: auto !important; }
930
+ .lead-card {
931
+ padding: 2rem;
932
+ border-radius: 1rem;
933
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
934
+ color: white;
935
+ text-align: center;
936
+ }
937
+ .feature-grid {
938
+ display: grid;
939
+ grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
940
+ gap: 1rem;
941
+ margin: 2rem 0;
942
+ }
943
+ .feature-item {
944
+ padding: 1.5rem;
945
+ border-radius: 0.5rem;
946
+ background: #f8f9fa;
947
+ border-left: 4px solid #667eea;
948
+ }
949
+ .cta-button {
950
+ background: white;
951
+ color: #667eea;
952
+ padding: 1rem 2rem;
953
+ border-radius: 2rem;
954
+ font-weight: bold;
955
+ text-decoration: none;
956
+ display: inline-block;
957
+ margin: 0.5rem;
958
+ transition: transform 0.2s;
959
+ }
960
+ .cta-button:hover {
961
+ transform: translateY(-2px);
962
+ box-shadow: 0 10px 20px rgba(0,0,0,0.2);
963
+ }
964
+ """
965
+ ) as ui:
966
 
967
+ # Header
968
+ gr.HTML(f"""
969
+ <div class="lead-card">
970
+ <h1 style="font-size: 3em; margin-bottom: 0.5rem;">๐Ÿค– ARF OSS v3.3.9</h1>
971
+ <h2 style="font-size: 1.5em; font-weight: 300; margin-bottom: 2rem;">
972
+ Real Bayesian Reliability Intelligence
973
  </h2>
974
+ <div style="display: inline-block; background: rgba(255,255,255,0.2); padding: 0.5rem 1rem;
975
+ border-radius: 2rem; margin-bottom: 2rem;">
976
+ โšก Running REAL ARF OSS Components โ€ข No Simulation
977
  </div>
978
  </div>
979
  """)
980
 
981
+ # Value Proposition
982
  with gr.Row():
983
  with gr.Column():
984
  gr.HTML("""
985
+ <div style="text-align: center; padding: 2rem;">
986
+ <h3 style="color: #333; font-size: 2em;">From Bayesian Analysis to Autonomous Execution</h3>
987
+ <p style="color: #666; font-size: 1.2em; max-width: 800px; margin: 1rem auto;">
988
+ This demo uses real ARF OSS components for risk assessment.
989
  Enterprise adds mechanical gates, learning loops, and governed execution.
990
  </p>
991
  </div>
992
  """)
993
 
994
+ # Features Grid
995
+ gr.HTML("""
996
+ <div class="feature-grid">
997
+ <div class="feature-item">
998
+ <h4>๐Ÿงฎ True Bayesian Inference</h4>
999
+ <p>Beta-Binomial conjugate priors with evidence updates</p>
1000
+ </div>
1001
+ <div class="feature-item">
1002
+ <h4>๐Ÿ›ก๏ธ Deterministic Policies</h4>
1003
+ <p>5 mechanical gates with live configuration</p>
1004
+ </div>
1005
+ <div class="feature-item">
1006
+ <h4>๐Ÿ’พ Persistent RAG Memory</h4>
1007
+ <p>SQLite + vector embeddings for incident recall</p>
1008
+ </div>
1009
+ <div class="feature-item">
1010
+ <h4>๐Ÿ“Š Lead Intelligence</h4>
1011
+ <p>Automatic enterprise signal detection</p>
1012
+ </div>
1013
+ </div>
1014
+ """)
1015
+
1016
+ # Live Demo Stats
1017
  with gr.Row():
1018
+ with gr.Column():
1019
+ demo_stats = gr.JSON(
1020
+ label="๐Ÿ“Š Live Demo Statistics",
1021
+ value={
1022
+ "active_since": datetime.utcnow().strftime("%Y-%m-%d %H:%M"),
1023
+ "bayesian_prior": "Beta(2.0, 5.0)",
1024
+ "memory_size": len(memory.get_uncontacted_signals()),
1025
+ "enterprise_signals": len(memory.get_uncontacted_signals())
1026
+ }
1027
+ )
 
 
 
 
 
1028
 
1029
+ # CTA Section
1030
+ gr.HTML(f"""
1031
+ <div style="margin: 3rem 0; padding: 3rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
1032
+ border-radius: 1rem; text-align: center; color: white;">
1033
+ <h2 style="font-size: 2.5em; margin-bottom: 1rem;">๐Ÿš€ Ready for Autonomous Operations?</h2>
1034
+ <p style="font-size: 1.3em; margin-bottom: 2rem;">
1035
  See ARF Enterprise with mechanical gates and execution
1036
  </p>
1037
 
1038
+ <div style="display: flex; gap: 1rem; justify-content: center; flex-wrap: wrap;">
1039
+ <a href="mailto:{settings.LEAD_EMAIL}?subject=ARF%20Enterprise%20Demo%20Request&body=I%20saw%20the%20real%20ARF%20OSS%20demo%20and%20would%20like%20to%20discuss%20Enterprise%20capabilities."
1040
+ class="cta-button">
1041
+ ๐Ÿ“ง {settings.LEAD_EMAIL}
 
1042
  </a>
1043
+ <a href="{settings.CALENDLY_URL}" target="_blank" class="cta-button" style="background: #FFD700; color: #333;">
1044
+ ๐Ÿ“… Schedule Technical Demo
 
 
 
1045
  </a>
1046
  </div>
1047
 
1048
+ <p style="margin-top: 2rem; font-size: 0.9em; opacity: 0.9;">
1049
+ โšก 30-min technical deep-dive โ€ข Live autonomous execution โ€ข Enterprise pricing<br>
1050
+ ๐Ÿ”’ All demos confidential and tailored to your infrastructure
1051
  </p>
1052
  </div>
1053
  """)
1054
 
1055
+ # Footer
1056
+ gr.HTML(f"""
1057
+ <div style="text-align: center; padding: 2rem; color: #666; border-top: 1px solid #eee;">
1058
+ <p>
1059
+ ๐Ÿ“ง <a href="mailto:{settings.LEAD_EMAIL}" style="color: #667eea;">{settings.LEAD_EMAIL}</a> โ€ข
1060
+ ๐Ÿ™ <a href="https://github.com/petterjuan/agentic-reliability-framework" style="color: #667eea;">GitHub</a> โ€ข
1061
+ ๐Ÿ’ผ <a href="#" style="color: #667eea;">LinkedIn</a>
1062
+ </p>
1063
+ <p style="font-size: 0.9rem;">
1064
+ ยฉ 2026 ARF - Open Source Intelligence, Enterprise Execution<br>
1065
+ <span style="font-size: 0.8rem; color: #999;">
1066
+ v3.3.9 โ€ข Real Bayesian Inference โ€ข Persistent RAG โ€ข Lead Intelligence
1067
+ </span>
1068
+ </p>
1069
  </div>
1070
  """)
1071
+
1072
+ # Auto-refresh stats every 30 seconds
1073
+ demo_stats.change(
1074
+ fn=lambda: {
1075
+ "active_since": datetime.utcnow().strftime("%Y-%m-%d %H:%M"),
1076
+ "bayesian_prior": "Beta(2.0, 5.0)",
1077
+ "memory_size": len(memory.get_uncontacted_signals()),
1078
+ "enterprise_signals": len(memory.get_uncontacted_signals())
1079
+ },
1080
+ inputs=[],
1081
+ outputs=[demo_stats],
1082
+ every=30
1083
+ )
1084
 
1085
+ return ui
1086
 
1087
+ # ============== MOUNT GRADIO ON FASTAPI ==============
1088
+ gradio_ui = create_lead_gen_ui()
1089
+ app = mount_gradio_app(app, gradio_ui, path="/")
 
 
 
 
 
1090
 
1091
+ # ============== MAIN ENTRY POINT ==============
1092
  if __name__ == "__main__":
1093
  import uvicorn
1094
  port = int(os.environ.get('PORT', 7860))
1095
+
1096
+ logger.info("="*60)
1097
+ logger.info("๐Ÿš€ ARF OSS v3.3.9 Starting")
1098
+ logger.info(f"๐Ÿ“Š Data directory: {settings.DATA_DIR}")
1099
+ logger.info(f"๐Ÿ“ง Lead email: {settings.LEAD_EMAIL}")
1100
+ logger.info(f"๐Ÿ”‘ API Key: {settings.API_KEY[:8]}... (set in HF secrets)")
1101
+ logger.info("="*60)
1102
+
1103
+ uvicorn.run(
1104
+ app,
1105
+ host="0.0.0.0",
1106
+ port=port,
1107
+ log_level="info"
1108
+ )