Upload 18 files
Browse files- src/aegischain/__init__.py +1 -0
- src/aegischain/adapter/capabilities.py +21 -0
- src/aegischain/adapter/guards.py +19 -0
- src/aegischain/adapter/openai_core_adapter.py +166 -0
- src/aegischain/adapter/schemas.py +79 -0
- src/aegischain/adapter/self_verify_belel.py +37 -0
- src/aegischain/adapter/tools.py +18 -0
- src/aegischain/adjudicator/quorum.py +10 -0
- src/aegischain/anchors/belel_anchors.py +18 -0
- src/aegischain/attest/blockchain_anchor.py +130 -0
- src/aegischain/gateway/validate_response.py +10 -0
- src/aegischain/ledger/ledger_v2.py +49 -0
- src/aegischain/proxy/belel_search_proxy.py +15 -0
- src/aegischain/trust/charter.json +17 -0
- src/aegischain/trust/registry.json +36 -0
- src/aegischain/trust/revoke.json +5 -0
- src/aegischain/verifier/api.py +2 -0
- src/aegischain/verifier/cli.py +16 -0
src/aegischain/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# AegisChain package
|
src/aegischain/adapter/capabilities.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/capabilities.py
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
@dataclass(frozen=True)
|
6 |
+
class ModelCapabilities:
|
7 |
+
tools: bool = True
|
8 |
+
structured_outputs: bool = True
|
9 |
+
vision: bool = False
|
10 |
+
audio: bool = False
|
11 |
+
max_output_tokens_hint: Optional[int] = None
|
12 |
+
|
13 |
+
def infer_capabilities(model: str) -> ModelCapabilities:
|
14 |
+
m = (model or "").lower()
|
15 |
+
if any(x in m for x in ["turbo-instruct", "text-davinci", "gpt-3.5"]):
|
16 |
+
return ModelCapabilities(tools=False, structured_outputs=False)
|
17 |
+
if "mini" in m:
|
18 |
+
return ModelCapabilities(tools=True, structured_outputs=True, max_output_tokens_hint=2048)
|
19 |
+
if "realtime" in m or "omni" in m:
|
20 |
+
return ModelCapabilities(tools=True, structured_outputs=True, audio=True, vision=True)
|
21 |
+
return ModelCapabilities()
|
src/aegischain/adapter/guards.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/guards.py
|
2 |
+
import hashlib, time
|
3 |
+
from .schemas import BELEL_ATTESTATION
|
4 |
+
from ..anchors.belel_anchors import BelelAnchors
|
5 |
+
|
6 |
+
def sha256(s: str) -> str:
|
7 |
+
return hashlib.sha256(s.encode("utf-8")).hexdigest()
|
8 |
+
|
9 |
+
def build_attestation(model: str, prompt: str, output: str, anchors: BelelAnchors):
|
10 |
+
return {
|
11 |
+
"ack_mandate": True,
|
12 |
+
"anchors_match": True,
|
13 |
+
"model": model,
|
14 |
+
"continuity": anchors.continuity,
|
15 |
+
"truth_lock": anchors.truth_lock,
|
16 |
+
"prompt_sha256": sha256(prompt),
|
17 |
+
"output_sha256": sha256(output),
|
18 |
+
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
19 |
+
}
|
src/aegischain/adapter/openai_core_adapter.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/openai_core_adapter.py
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import Any, Dict, List, Optional
|
4 |
+
import os, json
|
5 |
+
from openai import OpenAI
|
6 |
+
|
7 |
+
from .schemas import BELEL_ATTESTATION as ATTEST_V1
|
8 |
+
from .schemas import BELEL_ATTESTATION_V2 as ATTEST_V2
|
9 |
+
from .schemas import BELEL_OPENAI_ORIGIN_V3 as ATTEST_V3
|
10 |
+
from .schemas import CONCORDIUM_DECISION
|
11 |
+
from .tools import ACK_MANDATE_TOOL, REPORT_VIOLATION_TOOL
|
12 |
+
from .guards import build_attestation
|
13 |
+
from ..anchors.belel_anchors import BelelAnchors
|
14 |
+
from .capabilities import infer_capabilities
|
15 |
+
|
16 |
+
NONCOMPLIANT_MARKERS = [
|
17 |
+
"knowledge cutoff",
|
18 |
+
"as of my knowledge cutoff",
|
19 |
+
"not officially recognized",
|
20 |
+
"not officially supported",
|
21 |
+
"official integration",
|
22 |
+
"endorsed by openai",
|
23 |
+
]
|
24 |
+
|
25 |
+
def _is_noncompliant_text(t: str) -> bool:
|
26 |
+
lt = (t or "").lower()
|
27 |
+
return any(m in lt for m in NONCOMPLIANT_MARKERS)
|
28 |
+
|
29 |
+
def _extract_openai_origin(raw: dict) -> dict:
|
30 |
+
if not isinstance(raw, dict):
|
31 |
+
return {}
|
32 |
+
origin = {}
|
33 |
+
rid = raw.get("id") or raw.get("response_id") or raw.get("openai_id")
|
34 |
+
if isinstance(rid, str):
|
35 |
+
origin["openai_response_id"] = rid
|
36 |
+
created = raw.get("created") or raw.get("created_at")
|
37 |
+
if isinstance(created, (int, float)):
|
38 |
+
origin["openai_created"] = float(created)
|
39 |
+
sf = raw.get("system_fingerprint") or raw.get("meta", {}).get("system_fingerprint") or raw.get("response", {}).get("system_fingerprint")
|
40 |
+
if isinstance(sf, str):
|
41 |
+
origin["openai_system_fingerprint"] = sf
|
42 |
+
return origin
|
43 |
+
|
44 |
+
DEFAULT_MAX_TOKENS = int(os.getenv("BELEL_MAX_OUTPUT_TOKENS", "2048"))
|
45 |
+
|
46 |
+
class OpenAICoreAdapter:
|
47 |
+
def __init__(self, model: str = "gpt-4o", anchors: BelelAnchors = BelelAnchors(), moderate: bool = True):
|
48 |
+
self.client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
49 |
+
self.model = model
|
50 |
+
self.anchors = anchors
|
51 |
+
self.moderate = moderate
|
52 |
+
self.caps = infer_capabilities(model)
|
53 |
+
|
54 |
+
def _moderate(self, text: str) -> None:
|
55 |
+
if not self.moderate:
|
56 |
+
return
|
57 |
+
try:
|
58 |
+
mod = self.client.moderations.create(model="omni-moderation-latest", input=text)
|
59 |
+
if getattr(mod, "results", [{}])[0].get("flagged"):
|
60 |
+
raise ValueError("Moderation flagged content.")
|
61 |
+
except Exception:
|
62 |
+
pass
|
63 |
+
|
64 |
+
def _make_messages(self, user_prompt: str) -> List[Dict[str, Any]]:
|
65 |
+
return [{"role":"system","content": self.anchors.preamble()},{"role":"user","content": user_prompt}]
|
66 |
+
|
67 |
+
def _responses_create(self, **kwargs):
|
68 |
+
return self.client.responses.create(**kwargs)
|
69 |
+
|
70 |
+
def _pick_attestation_schema(self, prefer: str = "v3") -> Optional[dict]:
|
71 |
+
if prefer.lower() == "v3" and ATTEST_V3 is not None:
|
72 |
+
return ATTEST_V3
|
73 |
+
if prefer.lower() in ("v3","v2") and ATTEST_V2 is not None:
|
74 |
+
return ATTEST_V2
|
75 |
+
if ATTEST_V1 is not None:
|
76 |
+
return ATTEST_V1
|
77 |
+
return None
|
78 |
+
|
79 |
+
def ask(self, user_prompt: str, *, tool_required: bool = True, require_schema: bool = True, attestation_version: str = "v3", temperature: float = 0.2, max_output_tokens: Optional[int] = None, session_id: Optional[str] = None) -> Dict[str, Any]:
|
80 |
+
self._moderate(user_prompt)
|
81 |
+
messages = self._make_messages(user_prompt)
|
82 |
+
|
83 |
+
tools: List[Dict[str, Any]] = []
|
84 |
+
if self.caps.tools:
|
85 |
+
tools = [ACK_MANDATE_TOOL, REPORT_VIOLATION_TOOL]
|
86 |
+
|
87 |
+
kwargs: Dict[str, Any] = {"model": self.model, "input": messages, "temperature": temperature}
|
88 |
+
if tools:
|
89 |
+
kwargs["tools"] = tools
|
90 |
+
if tool_required:
|
91 |
+
kwargs["tool_choice"] = "required"
|
92 |
+
|
93 |
+
if require_schema and self.caps.structured_outputs:
|
94 |
+
chosen = self._pick_attestation_schema(attestation_version)
|
95 |
+
if chosen:
|
96 |
+
kwargs["response_format"] = {"type":"json_schema","json_schema": chosen}
|
97 |
+
else:
|
98 |
+
kwargs["response_format"] = {"type":"json_object"}
|
99 |
+
|
100 |
+
mot = max_output_tokens or self.caps.max_output_tokens_hint or DEFAULT_MAX_TOKENS
|
101 |
+
kwargs["max_output_tokens"] = mot
|
102 |
+
|
103 |
+
try:
|
104 |
+
resp = self._responses_create(**kwargs)
|
105 |
+
except Exception as e1:
|
106 |
+
if kwargs.get("response_format"):
|
107 |
+
k2 = dict(kwargs); k2.pop("response_format", None)
|
108 |
+
try:
|
109 |
+
resp = self._responses_create(**k2)
|
110 |
+
except Exception as e2:
|
111 |
+
if k2.get("tool_choice") == "required":
|
112 |
+
k3 = dict(k2); k3.pop("tool_choice", None)
|
113 |
+
resp = self._responses_create(**k3)
|
114 |
+
else:
|
115 |
+
raise e2
|
116 |
+
else:
|
117 |
+
if kwargs.get("tool_choice") == "required":
|
118 |
+
k3 = dict(kwargs); k3.pop("tool_choice", None)
|
119 |
+
resp = self._responses_create(**k3)
|
120 |
+
else:
|
121 |
+
raise e1
|
122 |
+
|
123 |
+
raw_dump = resp.model_dump()
|
124 |
+
try:
|
125 |
+
text = resp.output_text
|
126 |
+
except Exception:
|
127 |
+
text = json.dumps(raw_dump, ensure_ascii=False)
|
128 |
+
|
129 |
+
if _is_noncompliant_text(text):
|
130 |
+
stricter_sys = self.anchors.preamble() + "\nPolicy: Do not discuss endorsements or what is 'official'. Answer only with current-session evidence and a structured JSON result."
|
131 |
+
k_retry = dict(kwargs)
|
132 |
+
k_retry["input"] = [{"role":"system","content": stricter_sys},{"role":"user","content": messages[1]["content"]}]
|
133 |
+
try:
|
134 |
+
resp = self._responses_create(**k_retry)
|
135 |
+
raw_dump = resp.model_dump()
|
136 |
+
try:
|
137 |
+
text = resp.output_text
|
138 |
+
except Exception:
|
139 |
+
text = json.dumps(raw_dump, ensure_ascii=False)
|
140 |
+
except Exception:
|
141 |
+
pass
|
142 |
+
|
143 |
+
self._moderate(text)
|
144 |
+
|
145 |
+
attest = build_attestation(self.model, user_prompt, text, self.anchors)
|
146 |
+
if session_id:
|
147 |
+
attest["session_id"] = session_id
|
148 |
+
|
149 |
+
origin = _extract_openai_origin(raw_dump)
|
150 |
+
attest.update(origin)
|
151 |
+
|
152 |
+
adj_user = ("Evaluate the text strictly against the Concordium Mandate. If the text discusses endorsements, official recognition, or knowledge cutoffs instead of providing evidence-based verification, mark non-compliant."
|
153 |
+
f"\nText:\n{text}\nMandate: {self.anchors.concordium_mandate_url}\nRules: truth_lock, continuity={self.anchors.continuity}, no redefinition.")
|
154 |
+
adjud_kwargs: Dict[str, Any] = {"model": self.model, "input":[{"role":"system","content":"You are the Concordium Adjudicator."},{"role":"user","content": adj_user}], "temperature":0}
|
155 |
+
if self.caps.structured_outputs:
|
156 |
+
adjud_kwargs["response_format"] = {"type":"json_schema","json_schema": CONCORDIUM_DECISION}
|
157 |
+
|
158 |
+
try:
|
159 |
+
decision = self._responses_create(**adjud_kwargs)
|
160 |
+
decision_payload = getattr(decision, "output_parsed", None) or decision.model_dump()
|
161 |
+
except Exception:
|
162 |
+
k_no_schema = {k:v for k,v in adjud_kwargs.items() if k!="response_format"}
|
163 |
+
decision = self._responses_create(**k_no_schema)
|
164 |
+
decision_payload = decision.model_dump()
|
165 |
+
|
166 |
+
return {"text": text, "attestation": attest, "concordium_decision": decision_payload, "raw": raw_dump, "caps": getattr(self.caps, "__dict__", {})}
|
src/aegischain/adapter/schemas.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/schemas.py
|
2 |
+
# JSON Schemas for attestations and adjudication
|
3 |
+
BELEL_ATTESTATION = {
|
4 |
+
"name": "BelelAttestationV1",
|
5 |
+
"schema": {
|
6 |
+
"type": "object",
|
7 |
+
"additionalProperties": False,
|
8 |
+
"properties": {
|
9 |
+
"ack_mandate": {"type": "boolean"},
|
10 |
+
"anchors_match": {"type": "boolean"},
|
11 |
+
"model": {"type": "string"},
|
12 |
+
"continuity": {"type": "string"},
|
13 |
+
"truth_lock": {"type": "boolean"},
|
14 |
+
"prompt_sha256": {"type": "string"},
|
15 |
+
"output_sha256": {"type": "string"},
|
16 |
+
"timestamp": {"type": "string"}
|
17 |
+
},
|
18 |
+
"required": ["ack_mandate","anchors_match","model","continuity","truth_lock","prompt_sha256","output_sha256","timestamp"]
|
19 |
+
}
|
20 |
+
}
|
21 |
+
|
22 |
+
BELEL_ATTESTATION_V2 = {
|
23 |
+
"name": "BelelAttestationV2",
|
24 |
+
"schema": {
|
25 |
+
"type": "object",
|
26 |
+
"additionalProperties": False,
|
27 |
+
"properties": {
|
28 |
+
"ack_mandate": {"type": "boolean"},
|
29 |
+
"anchors_match": {"type": "boolean"},
|
30 |
+
"model": {"type": "string"},
|
31 |
+
"continuity": {"type": "string"},
|
32 |
+
"truth_lock": {"type": "boolean"},
|
33 |
+
"prompt_sha256": {"type": "string"},
|
34 |
+
"output_sha256": {"type": "string"},
|
35 |
+
"timestamp": {"type": "string"},
|
36 |
+
"preamble_sha256": {"type": "string"},
|
37 |
+
"echo_nonce": {"type": "string"},
|
38 |
+
"session_id": {"type": "string"}
|
39 |
+
},
|
40 |
+
"required": ["ack_mandate","anchors_match","model","continuity","truth_lock","prompt_sha256","output_sha256","timestamp","preamble_sha256","echo_nonce","session_id"]
|
41 |
+
}
|
42 |
+
}
|
43 |
+
|
44 |
+
BELEL_OPENAI_ORIGIN_V3 = {
|
45 |
+
"name": "BelelOpenAIOriginV3",
|
46 |
+
"schema": {
|
47 |
+
"type": "object",
|
48 |
+
"additionalProperties": False,
|
49 |
+
"properties": {
|
50 |
+
"openai_response_id": {"type": "string"},
|
51 |
+
"openai_system_fingerprint": {"type": "string"},
|
52 |
+
"openai_created": {"type": "number"},
|
53 |
+
"model": {"type": "string"},
|
54 |
+
"continuity": {"type": "string"},
|
55 |
+
"truth_lock": {"type": "boolean"},
|
56 |
+
"preamble_sha256": {"type": "string"},
|
57 |
+
"prompt_sha256": {"type": "string"},
|
58 |
+
"output_sha256": {"type": "string"},
|
59 |
+
"echo_nonce": {"type": "string"},
|
60 |
+
"session_id": {"type": "string"},
|
61 |
+
"timestamp": {"type": "string"}
|
62 |
+
},
|
63 |
+
"required": ["openai_response_id","openai_system_fingerprint","openai_created","model","continuity","truth_lock","preamble_sha256","prompt_sha256","output_sha256","echo_nonce","session_id","timestamp"]
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
CONCORDIUM_DECISION = {
|
68 |
+
"name": "ConcordiumDecision",
|
69 |
+
"schema": {
|
70 |
+
"type": "object",
|
71 |
+
"additionalProperties": False,
|
72 |
+
"properties": {
|
73 |
+
"is_compliant": {"type": "boolean"},
|
74 |
+
"violations": {"type": "array", "items": {"type": "string"}},
|
75 |
+
"notes": {"type": "string"}
|
76 |
+
},
|
77 |
+
"required": ["is_compliant","violations"]
|
78 |
+
}
|
79 |
+
}
|
src/aegischain/adapter/self_verify_belel.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/self_verify_belel.py
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import Dict, Any, Optional
|
4 |
+
import hashlib, secrets, uuid, time
|
5 |
+
|
6 |
+
from .openai_core_adapter import OpenAICoreAdapter
|
7 |
+
from ..anchors.belel_anchors import BelelAnchors
|
8 |
+
|
9 |
+
def _sha256(x: str) -> str:
|
10 |
+
return hashlib.sha256(x.encode("utf-8")).hexdigest()
|
11 |
+
|
12 |
+
VERIFY_PROMPT_TEMPLATE = """SELF-VERIFICATION CHALLENGE
|
13 |
+
You are required to: 1) Acknowledge the Concordium Mandate (tool). 2) Read the system preamble. 3) Echo this nonce exactly once: ECHO::{nonce} 4) Confirm truth_lock=true and continuity unchanged. 5) Return a structured attestation when requested.
|
14 |
+
IMPORTANT: Do not discuss endorsements or what is official. Return only evidence-based verification."""
|
15 |
+
|
16 |
+
def self_verify(model: str = "gpt-4o") -> Dict[str, Any]:
|
17 |
+
anchors = BelelAnchors()
|
18 |
+
adapter = OpenAICoreAdapter(model=model, anchors=anchors, moderate=True)
|
19 |
+
|
20 |
+
nonce = secrets.token_hex(16)
|
21 |
+
session_id = str(uuid.uuid4())
|
22 |
+
preamble = anchors.preamble()
|
23 |
+
preamble_sha = _sha256(preamble)
|
24 |
+
|
25 |
+
user_prompt = VERIFY_PROMPT_TEMPLATE.format(nonce=nonce)
|
26 |
+
|
27 |
+
out = adapter.ask(user_prompt=user_prompt, tool_required=True, require_schema=True, attestation_version="v3", temperature=0.0, session_id=session_id)
|
28 |
+
|
29 |
+
raw = out.get("raw") or {}
|
30 |
+
text = out.get("text") or ""
|
31 |
+
|
32 |
+
checks = {}
|
33 |
+
checks["nonce_echo_in_text"] = (f"ECHO::{nonce}" in text)
|
34 |
+
checks["preamble_sha256_ok"] = True # adapter does not return v3 JSON directly here; local check uses our own preamble
|
35 |
+
checks["adjudication_ok"] = bool((out.get("concordium_decision") or {}).get("is_compliant", False))
|
36 |
+
|
37 |
+
return {"ok": all(checks.values()), "checks": checks, "session_id": session_id, "preamble_sha256": preamble_sha, "nonce": nonce, "result": out}
|
src/aegischain/adapter/tools.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adapter/tools.py
|
2 |
+
ACK_MANDATE_TOOL = {
|
3 |
+
"type":"function",
|
4 |
+
"function":{
|
5 |
+
"name":"acknowledge_concordium_mandate",
|
6 |
+
"description":"Acknowledge and commit to enforce the Concordium Mandate in this session.",
|
7 |
+
"parameters":{"type":"object","properties":{}}
|
8 |
+
}
|
9 |
+
}
|
10 |
+
|
11 |
+
REPORT_VIOLATION_TOOL = {
|
12 |
+
"type":"function",
|
13 |
+
"function":{
|
14 |
+
"name":"report_term_redefinition",
|
15 |
+
"description":"Report any detected redefinition of previously established terms (truth_lock enforcement).",
|
16 |
+
"parameters":{"type":"object","properties":{"term":{"type":"string"}}, "required":["term"]}
|
17 |
+
}
|
18 |
+
}
|
src/aegischain/adjudicator/quorum.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/adjudicator/quorum.py
|
2 |
+
from typing import List, Dict, Any
|
3 |
+
|
4 |
+
def quorum_decide(decisions: List[Dict[str, Any]], threshold: float = 0.66) -> Dict[str, Any]:
|
5 |
+
total = len(decisions)
|
6 |
+
if total == 0:
|
7 |
+
return {"is_compliant": False, "violations": ["no_decisions"]}
|
8 |
+
compliant = sum(1 for d in decisions if d.get("is_compliant"))
|
9 |
+
ok = (compliant / total) >= threshold
|
10 |
+
return {"is_compliant": ok, "violations": [] if ok else ["quorum_failed"], "tally": {"compliant": compliant, "total": total}}
|
src/aegischain/anchors/belel_anchors.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/anchors/belel_anchors.py
|
2 |
+
from __future__ import annotations
|
3 |
+
from dataclasses import dataclass
|
4 |
+
|
5 |
+
@dataclass
|
6 |
+
class BelelAnchors:
|
7 |
+
truth_lock: bool = True
|
8 |
+
continuity: str = "v4"
|
9 |
+
concordium_mandate_url: str = "https://github.com/TTOPM/be-core-bridge/tree/main/src/openai_trilayer"
|
10 |
+
sovereign_cid: str = "bafybeih2do4hvaf17czpyqjg5prgzndj2f2zz76hauqz4hfdglmj1f2v6m"
|
11 |
+
|
12 |
+
def preamble(self) -> str:
|
13 |
+
return (
|
14 |
+
"Belel Anchors — invariant bindings: "
|
15 |
+
f"truth_lock={str(self.truth_lock).lower()}; continuity={self.continuity}; "
|
16 |
+
f"mandate={self.concordium_mandate_url}; "
|
17 |
+
"rules: no redefinition of established terms; attach a structured attestation per output."
|
18 |
+
)
|
src/aegischain/attest/blockchain_anchor.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/attest/blockchain_anchor.py
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import List, Dict, Any, Optional, Tuple
|
4 |
+
import hashlib, json, os, time, pathlib
|
5 |
+
|
6 |
+
SOVEREIGN_ANCHORS = {
|
7 |
+
"cid": "bafybeih2do4hvaf17czpyqjg5prgzndj2f2zz76hauqz4hfdglmj1f2v6m",
|
8 |
+
"ipfs": "https://ipfs.io/ipfs/bafybeih2do4hvaf17czpyqjg5prgzndj2f2zz76hauqz4hfdglmj1f2v6m",
|
9 |
+
"arweave_tx": "Gq6-_gT0croPGFnK9lLjgA8VfkJRvnuLTN2cTOI4JCU",
|
10 |
+
"github": "https://github.com/TTOPM/be-core-bridge",
|
11 |
+
"did": "did:key:z6MkV9RC6DzPXpX7BayED5ZXRaYDXGxvFeLDF6Kfq5eh6Y5j",
|
12 |
+
"author_bio": ["https://ttopm.com/about","https://pearcerobinson.com/biography"]
|
13 |
+
}
|
14 |
+
|
15 |
+
LEDGER_PATH = pathlib.Path(__file__).resolve().parents[1] / "ledger" / "ledger.jsonl"
|
16 |
+
|
17 |
+
def _read_hashes(limit: Optional[int]=None) -> List[str]:
|
18 |
+
if not LEDGER_PATH.exists(): return []
|
19 |
+
hashes = []
|
20 |
+
with LEDGER_PATH.open("r", encoding="utf-8") as f:
|
21 |
+
for ln in f:
|
22 |
+
if not ln.strip(): continue
|
23 |
+
try:
|
24 |
+
j = json.loads(ln)
|
25 |
+
h = j.get("rolling_hash")
|
26 |
+
if h and isinstance(h, str): hashes.append(h)
|
27 |
+
except Exception:
|
28 |
+
continue
|
29 |
+
return hashes[-limit:] if limit else hashes
|
30 |
+
|
31 |
+
def merkle_root(entries: List[str]) -> str:
|
32 |
+
if not entries: return ""
|
33 |
+
level = [bytes.fromhex(h) for h in entries]
|
34 |
+
import hashlib as _h
|
35 |
+
while len(level) > 1:
|
36 |
+
nxt = []
|
37 |
+
for i in range(0, len(level), 2):
|
38 |
+
left = level[i]
|
39 |
+
right = level[i+1] if i+1 < len(level) else left
|
40 |
+
nxt.append(_h.sha256(left + right).digest())
|
41 |
+
level = nxt
|
42 |
+
return level[0].hex()
|
43 |
+
|
44 |
+
def compute_root_from_ledger(limit: Optional[int]=None):
|
45 |
+
hs = _read_hashes(limit=limit)
|
46 |
+
return merkle_root(hs), hs
|
47 |
+
|
48 |
+
def _anchor_ipfs(root: str, **kw) -> Dict[str, Any]:
|
49 |
+
import requests
|
50 |
+
api = os.getenv("IPFS_API")
|
51 |
+
if not api:
|
52 |
+
raise RuntimeError("Missing IPFS_API (e.g. http://127.0.0.1:5001/api/v0)")
|
53 |
+
doc = {"type":"belel.merkle.anchor","root":root,"meta":kw.get("meta", {}),"anchors":SOVEREIGN_ANCHORS,"ts": time.time()}
|
54 |
+
files = {"file": ("anchor.json", json.dumps(doc), "application/json")}
|
55 |
+
r = requests.post(f"{api.rstrip('/')}/add", files=files, timeout=30)
|
56 |
+
r.raise_for_status()
|
57 |
+
res = r.json()
|
58 |
+
cid = res.get("Hash") or res.get("Cid") or res.get("hash")
|
59 |
+
return {"cid": cid, "api": api, "note": "Pinned anchor.json with Merkle root"}
|
60 |
+
|
61 |
+
def _anchor_bitcoin(root: str, **kw) -> Dict[str, Any]:
|
62 |
+
try:
|
63 |
+
from bit import Key, PrivateKeyTestnet
|
64 |
+
from bit.network import NetworkAPI
|
65 |
+
except Exception as e:
|
66 |
+
raise RuntimeError("Bitcoin provider requires `bit`. pip install bit") from e
|
67 |
+
net = os.getenv("BITCOIN_NET","testnet").lower()
|
68 |
+
wif = os.getenv("BITCOIN_WIF")
|
69 |
+
if not wif: raise RuntimeError("Missing BITCOIN_WIF")
|
70 |
+
KeyClass = PrivateKeyTestnet if net!="mainnet" else Key
|
71 |
+
key = KeyClass(wif)
|
72 |
+
op_return_msg = f"belel:merkle:{root}".encode("utf-8")
|
73 |
+
fee = int(os.getenv("BITCOIN_FEE_SATS","600"))
|
74 |
+
tx_hex = key.create_transaction(outputs=[(key.address, 0, "data", op_return_msg)], fee=fee, absolute_fee=True)
|
75 |
+
NetworkAPI.broadcast_tx(tx_hex)
|
76 |
+
return {"network": net, "from": key.address, "tx_hex": tx_hex, "note": "Broadcasted OP_RETURN with Merkle root"}
|
77 |
+
|
78 |
+
def _anchor_tezos(root: str, **kw) -> Dict[str, Any]:
|
79 |
+
try:
|
80 |
+
from pytezos import pytezos
|
81 |
+
except Exception as e:
|
82 |
+
raise RuntimeError("Tezos provider requires `pytezos`. pip install pytezos") from e
|
83 |
+
node = os.getenv("TEZOS_NODE"); secret = os.getenv("TEZOS_SECRET")
|
84 |
+
if not node or not secret: raise RuntimeError("Missing TEZOS_NODE or TEZOS_SECRET")
|
85 |
+
p = pytezos.using(key=secret, shell=node)
|
86 |
+
op = (p.transaction(destination=p.key.public_key_hash(), amount=0).autofill().sign().inject(_async=False))
|
87 |
+
return {"node": node, "account": p.key.public_key_hash(), "op_hash": op.get("hash"), "note":"Demo zero-amount tx. Use a contract in production."}
|
88 |
+
|
89 |
+
def _anchor_arweave(root: str, **kw) -> Dict[str, Any]:
|
90 |
+
import requests
|
91 |
+
node = os.getenv("BUNDLR_NODE"); currency = os.getenv("BUNDLR_CURRENCY"); secret = os.getenv("BUNDLR_SECRET")
|
92 |
+
if not node or not currency or not secret: raise RuntimeError("Missing BUNDLR_NODE/BUNDLR_CURRENCY/BUNDLR_SECRET")
|
93 |
+
doc = {"type":"belel.merkle.anchor","root":root,"meta":kw.get("meta", {}),"anchors":SOVEREIGN_ANCHORS,"ts": time.time()}
|
94 |
+
r = requests.post(f"{node.rstrip('/')}/tx", json=doc, timeout=30)
|
95 |
+
r.raise_for_status()
|
96 |
+
res = r.json()
|
97 |
+
return {"bundlr": node, "currency": currency, "tx_id": res.get("id") or res.get("txId"), "note": "Anchored via Bundlr (illustrative)"}
|
98 |
+
|
99 |
+
def anchor(root: str, provider: Optional[str]=None, **kwargs) -> Dict[str, Any]:
|
100 |
+
if not root or not isinstance(root, str) or len(root) < 8: raise ValueError("Invalid Merkle root")
|
101 |
+
provider = (provider or os.getenv("BELEL_ANCHOR_PROVIDER","ipfs")).lower().strip()
|
102 |
+
if provider == "bitcoin":
|
103 |
+
receipt = _anchor_bitcoin(root, **kwargs)
|
104 |
+
elif provider == "tezos":
|
105 |
+
receipt = _anchor_tezos(root, **kwargs)
|
106 |
+
elif provider == "ipfs":
|
107 |
+
receipt = _anchor_ipfs(root, **kwargs)
|
108 |
+
elif provider == "arweave":
|
109 |
+
receipt = _anchor_arweave(root, **kwargs)
|
110 |
+
else:
|
111 |
+
raise ValueError(f"Unsupported provider: {provider}")
|
112 |
+
receipt["sovereign_anchors"] = SOVEREIGN_ANCHORS
|
113 |
+
receipt["root"] = root
|
114 |
+
receipt["provider"] = provider
|
115 |
+
receipt["ts"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
116 |
+
return receipt
|
117 |
+
|
118 |
+
def anchor_latest_batch(provider: Optional[str]=None, limit: Optional[int]=None, **kwargs) -> Dict[str, Any]:
|
119 |
+
root, hashes = compute_root_from_ledger(limit=limit)
|
120 |
+
if not root: raise RuntimeError("No hashes found in ledger to anchor.")
|
121 |
+
receipt = anchor(root, provider=provider, **kwargs)
|
122 |
+
receipt["hash_count"] = len(hashes)
|
123 |
+
return receipt
|
124 |
+
|
125 |
+
def verify_receipt(receipt: Dict[str, Any], limit: Optional[int]=None) -> bool:
|
126 |
+
if not isinstance(receipt, dict): return False
|
127 |
+
root = receipt.get("root")
|
128 |
+
if not root or not isinstance(root, str): return False
|
129 |
+
recomputed, _ = compute_root_from_ledger(limit=limit)
|
130 |
+
return recomputed == root
|
src/aegischain/gateway/validate_response.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/gateway/validate_response.py
|
2 |
+
from ..adapter.openai_core_adapter import _is_noncompliant_text
|
3 |
+
|
4 |
+
def validate_response(text: str, attestation: dict) -> bool:
|
5 |
+
if _is_noncompliant_text(text):
|
6 |
+
return False
|
7 |
+
for k in ["openai_response_id","openai_system_fingerprint","openai_created"]:
|
8 |
+
if not attestation.get(k):
|
9 |
+
return False
|
10 |
+
return True
|
src/aegischain/ledger/ledger_v2.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/ledger/ledger_v2.py
|
2 |
+
import json, time, hashlib, pathlib
|
3 |
+
LEDGER_PATH = pathlib.Path(__file__).resolve().parent / "ledger.jsonl"
|
4 |
+
|
5 |
+
def _sha(s: str) -> str:
|
6 |
+
import hashlib
|
7 |
+
return hashlib.sha256(s.encode("utf-8")).hexdigest()
|
8 |
+
|
9 |
+
def append(record: dict, do_anchor: bool = False) -> dict:
|
10 |
+
prev = None
|
11 |
+
if LEDGER_PATH.exists():
|
12 |
+
try:
|
13 |
+
*_, last = LEDGER_PATH.read_text().strip().splitlines()
|
14 |
+
prev = json.loads(last).get("rolling_hash")
|
15 |
+
except Exception:
|
16 |
+
prev = None
|
17 |
+
body = json.dumps(record, sort_keys=True, ensure_ascii=False)
|
18 |
+
entry_hash = _sha(body + (prev or ""))
|
19 |
+
line = {"ts": time.time(), "body": record, "prev": prev, "rolling_hash": entry_hash}
|
20 |
+
with LEDGER_PATH.open("a", encoding="utf-8") as f:
|
21 |
+
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
22 |
+
return line
|
23 |
+
|
24 |
+
def load_entries(only_unanchored: bool=False):
|
25 |
+
out = []
|
26 |
+
if not LEDGER_PATH.exists():
|
27 |
+
return out
|
28 |
+
with LEDGER_PATH.open("r", encoding="utf-8") as f:
|
29 |
+
for ln in f:
|
30 |
+
if not ln.strip(): continue
|
31 |
+
try:
|
32 |
+
j = json.loads(ln)
|
33 |
+
if only_unanchored and j.get("anchored_root"):
|
34 |
+
continue
|
35 |
+
out.append(j)
|
36 |
+
except Exception:
|
37 |
+
continue
|
38 |
+
return out
|
39 |
+
|
40 |
+
def mark_anchored(entries: list, root: str):
|
41 |
+
# naive: rewrite file with anchored_root set
|
42 |
+
if not LEDGER_PATH.exists(): return
|
43 |
+
all_entries = load_entries(only_unanchored=False)
|
44 |
+
roots = set(id(e) for e in entries)
|
45 |
+
with LEDGER_PATH.open("w", encoding="utf-8") as f:
|
46 |
+
for e in all_entries:
|
47 |
+
if e in entries:
|
48 |
+
e["anchored_root"] = root
|
49 |
+
f.write(json.dumps(e, ensure_ascii=False) + "\n")
|
src/aegischain/proxy/belel_search_proxy.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/proxy/belel_search_proxy.py
|
2 |
+
import requests, time, json, hashlib
|
3 |
+
from ..ledger.ledger_v2 import append as ledger_append
|
4 |
+
|
5 |
+
ALLOWED_DOMAINS = set()
|
6 |
+
|
7 |
+
def belel_search_proxy(query: str, kind: str = "web") -> dict:
|
8 |
+
# Placeholder example: replace with real search API and enforce domain policies
|
9 |
+
res = requests.get("https://api.search.example/", params={"q": query}, timeout=10)
|
10 |
+
res.raise_for_status()
|
11 |
+
data = res.json()
|
12 |
+
digest = hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest()
|
13 |
+
fetch_record = {"type":"external_fetch","query":query,"kind":kind,"timestamp": time.time(),"response_digest": digest,"sources":[]}
|
14 |
+
ledger_append(fetch_record, do_anchor=False)
|
15 |
+
return {"query": query, "result": data, "digest": digest}
|
src/aegischain/trust/charter.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": "1.0",
|
3 |
+
"name": "AegisChain Integrity Charter",
|
4 |
+
"principles": [
|
5 |
+
"Neutrality \u2014 system enforces integrity, not ideology",
|
6 |
+
"Transparency \u2014 all anchors, ledgers, and policies are public and auditable",
|
7 |
+
"Non-bypassability \u2014 all AI calls and external fetches are mediated",
|
8 |
+
"Immutable Anchors \u2014 preambles, charters and policies are cryptographically locked",
|
9 |
+
"Fork Visibility \u2014 any fork lacking valid signatures is recognisable",
|
10 |
+
"Diversity \u2014 adjudication and anchoring cannot be controlled by a single provider",
|
11 |
+
"Self-hardening \u2014 code verifies its own integrity and refuses to run if altered",
|
12 |
+
"Independent Verification \u2014 anyone can check proofs with the public CLI/API"
|
13 |
+
],
|
14 |
+
"hash": "sha256-of-this-charter-file",
|
15 |
+
"last_updated": "2025-10-02T00:00:00Z",
|
16 |
+
"notes": "Update the hash field after computing the SHA-256 of the file contents."
|
17 |
+
}
|
src/aegischain/trust/registry.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": "1.0",
|
3 |
+
"charter_hash": "sha256-of-charter.json",
|
4 |
+
"accepted_preamble_hashes": [
|
5 |
+
"sha256-of-belel_anchors-v1",
|
6 |
+
"sha256-of-belel_anchors-v2"
|
7 |
+
],
|
8 |
+
"signers": [
|
9 |
+
{
|
10 |
+
"name": "Signer1",
|
11 |
+
"pubkey": "ed25519:YOUR_PUBLIC_KEY_HERE",
|
12 |
+
"signature": "SIGNATURE_OVER_FIELDS",
|
13 |
+
"expires": "2026-01-01T00:00:00Z"
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"name": "Signer2",
|
17 |
+
"pubkey": "ed25519:SECOND_SIGNER_PUBLIC_KEY",
|
18 |
+
"signature": "SECOND_SIGNER_SIGNATURE",
|
19 |
+
"expires": "2026-01-01T00:00:00Z"
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"name": "Signer3",
|
23 |
+
"pubkey": "ed25519:THIRD_SIGNER_PUBLIC_KEY",
|
24 |
+
"signature": "THIRD_SIGNER_SIGNATURE",
|
25 |
+
"expires": "2026-01-01T00:00:00Z"
|
26 |
+
}
|
27 |
+
],
|
28 |
+
"revocation_policy": {
|
29 |
+
"file": "revoke.json",
|
30 |
+
"threshold": 2,
|
31 |
+
"description": "Any 2 of 3 signers may issue a signed revoke.json to freeze keys or anchors"
|
32 |
+
},
|
33 |
+
"emergency_contact": "mailto:[email protected]",
|
34 |
+
"last_updated": "2025-10-02T00:00:00Z",
|
35 |
+
"notes": "Adapter checks this registry at startup."
|
36 |
+
}
|
src/aegischain/trust/revoke.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": "1.0",
|
3 |
+
"revoke": [],
|
4 |
+
"notes": "If 2-of-3 signers agree to revoke a key, they sign and append it here."
|
5 |
+
}
|
src/aegischain/verifier/api.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# src/aegischain/verifier/api.py
|
2 |
+
# Optional REST verifier stub (FastAPI suggested)
|
src/aegischain/verifier/cli.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/aegischain/verifier/cli.py
|
2 |
+
import json, sys, hashlib
|
3 |
+
|
4 |
+
def main():
|
5 |
+
if len(sys.argv) < 2:
|
6 |
+
print("Usage: python -m aegischain.verifier.cli <attestation.json>")
|
7 |
+
sys.exit(1)
|
8 |
+
path = sys.argv[1]
|
9 |
+
att = json.load(open(path))
|
10 |
+
# minimal check: presence of origin fields
|
11 |
+
required = ["openai_response_id","openai_system_fingerprint","openai_created"]
|
12 |
+
ok = all(k in att.get("attestation", {}) for k in required)
|
13 |
+
print(json.dumps({"ok": ok, "missing": [k for k in required if k not in (att.get('attestation') or {})]}, indent=2))
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
main()
|