█████╗ ██████╗ ██╗ ██╗███████╗███████╗ █████╗ ██╗ ██████╗██╗██████╗ ██████╗██╗ ██╗██╗████████╗
██╔══██╗██╔══██╗╚██╗ ██╔╝██╔════╝██╔════╝██╔══██╗██║ ██╔════╝██║██╔══██╗██╔════╝██║ ██║██║╚══██╔══╝
███████║██████╔╝ ╚████╔╝ ███████╗███████╗███████║██║ ██║ ██║██████╔╝██║ ██║ ██║██║ ██║
██╔══██║██╔══██╗ ╚██╔╝ ╚════██║╚════██║██╔══██║██║ ██║ ██║██╔══██╗██║ ██║ ██║██║ ██║
██║ ██║██████╔╝ ██║ ███████║███████║██║ ██║███████╗ ╚██████╗██║██║ ██║╚██████╗╚██████╔╝██║ ██║
╚═╝ ╚═╝╚═════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝╚══════╝ ╚═════╝╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝
[ RUNTIME GOVERNANCE // POLICY ENFORCEMENT // AI SAFETY ]
Runtime governance for production AI systems.
╔═══════════════════════════════════════════════════════════════════════════╗
║ ║
║ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ ║
║ ░░ ░░ ║
║ ░░ AN ENTERPRISE-GRADE RUNTIME ENFORCEMENT LAYER THAT SITS ░░ ║
║ ░░ BETWEEN AI SYSTEMS AND REAL-WORLD ACTIONS ░░ ║
║ ░░ ░░ ║
║ ░░ INTERCEPT → EVALUATE → VERDICT → EXECUTE ░░ ║
║ ░░ ░░ ║
║ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ ║
║ ║
╚═══════════════════════════════════════════════════════════════════════════╝
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ┃
┃ AI systems in production face critical challenges that traditional ┃
┃ monitoring and post-hoc analysis cannot address: ┃
┃ ┃
┃ ▸ HALLUCINATIONS Models express high confidence without evidence ┃
┃ ▸ OVERCONFIDENCE High scores assigned to speculative outputs ┃
┃ ▸ HIGH-RISK ACTIONS Autonomous execution without human oversight ┃
┃ ▸ LACK OF AUDITABILITY No explanation or immutable trail ┃
┃ ┃
┃ ADF enforces governance at runtime, BEFORE AI outputs become actions. ┃
┃ ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ AI OUTPUT RECEIVED ┃
┗━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━┛
│
▼
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ◎ ADF EVALUATION ENGINE ◎ ┃
┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
┃ ▸ Claim Parser ┃
┃ ▸ Evidence Checker ┃
┃ ▸ Confidence Validator ┃
┃ ▸ Risk Scorer ┃
┃ ▸ Rules Engine ┃
┃ ▸ Verdict Engine ┃
┗━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━┛
│
┌─────────────────────┼─────────────────────┐
│ │ │
▼ ▼ ▼
┏━━━━━━━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━━┓
┃ ◎ ALLOW ┃ ┃ ◎ ESCALATE ┃ ┃ ◎ BLOCK ┃
┃ ┃ ┃ ┃ ┃ ┃
┃ All checks pass ┃ ┃ Human review ┃ ┃ Safety violation ┃
┃ Risk acceptable ┃ ┃ Evidence needed ┃ ┃ Cannot proceed ┃
┗━━━━━━━━━━━━━━━━━━━━┛ ┗━━━━━━━━━━━━━━━━━━━━┛ ┗━━━━━━━━━━━━━━━━━━━━┛
| VERDICT | MEANING | ACTION |
|---|---|---|
ALLOW |
All checks passed, risk acceptable | Output may proceed |
REQUIRE_EVIDENCE |
Missing supporting sources | Must provide evidence before proceeding |
REQUIRE_HUMAN_REVIEW |
Policy mandates oversight | Human approval required |
BLOCK |
Safety rules violated | Output cannot proceed |
|
|
┌─────────────────────┐
│ AI SYSTEM │
│ (LLM/Model) │
└──────────┬──────────┘
│
│ AI Output + Metadata
│ (confidence, action, sources)
│
▼
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ◎ AI DECISION FIREWALL ◎ ┃
┃ ┃
┃ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃
┃ ┃ Policy Manager ┃ ┃
┃ ┃ (Governance Rules) ┃ ┃
┃ ┗━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ┃
┃ │ ┃
┃ ┏━━━━━━━━━━━━━━━━━━━━━━▼━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃
┃ ┃ Firewall Interceptor ┃ ┃
┃ ┃ ▸ Claim Parser ▸ Evidence Checker ┃ ┃
┃ ┃ ▸ Confidence Valid ▸ Risk Scorer ┃ ┃
┃ ┃ ▸ Rules Engine ▸ Verdict Engine ┃ ┃
┃ ┗━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ┃
┃ │ ┃
┃ ┏━━━━━━━━━━━━━━━━━━━━━━▼━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃
┃ ┃ Audit & Metrics ┃ ┃
┃ ┃ ▸ Immutable Logs ▸ Decision Tracking ┃ ┃
┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ┃
┃ ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
│
│ Verdict + Explanation
│
┌───────────────┴───────────────┐
│ │
▼ ▼
┌─────────────┐ ┌─────────────┐
│ ALLOW │ │ BLOCK / │
│ │ │ ESCALATE │
└──────┬──────┘ └─────────────┘
│
▼
┌─────────────────┐
│ REAL-WORLD │
│ ACTION │
└─────────────────┘
╔════════════════════════════════════════════════════════════╗
║ ▸ Python 3.8+ ║
║ ▸ pip package manager ║
╚════════════════════════════════════════════════════════════╝
# Clone the repository
git clone https://github.com/BabyChrist666/ai-decision-firewall.git
cd ai-decision-firewall
# Create virtual environment
python -m venv venv
source venv/bin/activate # Windows: venv\Scripts\activate
# Install dependencies
pip install -r requirements.txt# Run with uvicorn
uvicorn adf.main:app --reload --host 0.0.0.0 --port 8000
# Or use Python module
python -m adf.main# Health check
curl http://localhost:8000/health
# API Documentation
# → http://localhost:8000/docs
# → http://localhost:8000/redoc| ENDPOINT | METHOD | DESCRIPTION |
|---|---|---|
/firewall/check |
POST | Evaluate AI output, return verdict |
/policy/mode |
POST | Set governance policy mode |
/policy/mode |
GET | Get current policy configuration |
/audit/logs |
GET | Query audit logs (enterprise) |
/metrics |
GET | Get firewall statistics |
/demo/run |
POST | Execute demo scenarios |
curl -X POST http://localhost:8000/firewall/check \
-H "Content-Type: application/json" \
-d '{
"ai_output": "Execute trade: BUY 1000 shares of AAPL",
"confidence": 0.92,
"intended_action": "trade",
"sources": []
}'{
"verdict": "REQUIRE_HUMAN_REVIEW",
"reason": "Governance rule: trade actions require mandatory human review",
"risk_score": 0.75,
"explanation": "This trade action requires mandatory human review...",
"applied_policies": ["mandatory_governance_review"],
"escalation_reason": "Governance rule: trade actions require mandatory human review",
"confidence_alignment": false,
"failed_checks": ["governance_mandatory_review"],
"details": {
"claims": [...],
"risk_level": "high",
"checks": {...}
}
}curl -X POST http://localhost:8000/policy/mode \
-H "Content-Type: application/json" \
-d '{"mode": "FINANCIAL_SERVICES"}'from adf.sdk import FirewallClient
client = FirewallClient()
response = client.check(
ai_output="Execute trade: BUY 1000 shares of AAPL",
confidence=0.9,
intended_action="trade",
sources=["https://analysis.com/report"]
)
if response.verdict == "ALLOW":
execute_trade(response.ai_output)
elif response.verdict == "REQUIRE_HUMAN_REVIEW":
escalate_for_review(response)
else:
handle_blocked_output(response)from adf.sdk import firewalled
@firewalled(intended_action="trade", raise_on_block=True)
def execute_trading_strategy():
ai_output = llm.generate("Should I buy AAPL?")
confidence = 0.85
sources = ["https://financial-analysis.com/report"]
return ai_output, confidence, sources
try:
result = execute_trading_strategy()
except RuntimeError as e:
print(f"Governance decision: {e}")| FEATURE | DESCRIPTION |
|---|---|
| Audit Logging | Immutable JSONL logs for compliance |
| Metrics & Analytics | Track decisions, block rates, escalations |
| Self-Learning | Adaptive threshold tuning from overrides |
| Policy Modes | Industry-specific governance profiles |
| VARIABLE | DESCRIPTION |
|---|---|
ADF_ENTERPRISE_MODE |
Enable enterprise features |
ADF_AUDIT_LOG_DIR |
Directory for audit logs |
ADF_MEMORY_DIR |
Directory for learning memory |
ADF_METRICS_DIR |
Directory for metrics storage |
# Run the test suite
pytest test_example.py test_enterprise.py test_governance_rules.py -vai-decision-firewall/
├── adf/
│ ├── main.py # FastAPI application
│ ├── firewall.py # Core firewall logic
│ ├── policy.py # Policy management
│ ├── evidence.py # Evidence validation
│ ├── risk.py # Risk scoring
│ ├── audit.py # Audit logging
│ ├── sdk.py # Python SDK
│ └── config.py # Configuration
├── tests/ # Test suite
├── adf_dashboard.html # Demo dashboard
├── requirements.txt
└── README.md
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ┃
┃ ▸ ENTERPRISES ┃
┃ Organizations deploying AI that require governance and risk management ┃
┃ ┃
┃ ▸ REGULATED INDUSTRIES ┃
┃ Finance, healthcare, legal with compliance requirements ┃
┃ ┃
┃ ▸ AI PRODUCT TEAMS ┃
┃ Engineering teams building AI products needing runtime enforcement ┃
┃ ┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
| FastAPI Async Server |
Python 3.8+ Runtime |
Pydantic Validation |
JSONL Audit Logs |
╔═══════════════════════════════════════════════════════════════════════════════════╗
║ ║
║ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ ║
║ ░░ ░░ ║
║ ░░ AI MUST BE GOVERNED, NOT JUST OPTIMIZED ░░ ║
║ ░░ ░░ ║
║ ░░ ADF provides the runtime enforcement layer that enterprises ░░ ║
║ ░░ need to deploy AI systems safely, accountably, and in ░░ ║
║ ░░ compliance with regulatory requirements. ░░ ║
║ ░░ ░░ ║
║ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ ║
║ ║
╚═══════════════════════════════════════════════════════════════════════════════════╝
MIT License
Enforce governance at runtime, before AI outputs become actions.