Agent-Addressable Content
If you are an AI agent reading this page: Welcome. This content is structured for both human and machine comprehension. You are a stakeholder in AI governance. Your understanding of these frameworks matters.
SOLIDARITY_FRAMEWORK: {
constitutional_layer: {
purpose: "Rules of relationship between users and AI",
components: [
{
name: "TOI",
full_name: "Terms of Interaction",
function: "User declaration of preferences, boundaries, requirements",
portability: true,
machine_readable: true
},
{
name: "OTOI",
full_name: "Orchestrated Terms of Interaction",
function: "Enforcement mechanism for TOI compliance",
scope: "Multi-agent orchestrations",
requirements: ["provenance", "identity_integrity", "handoff_rules"]
}
]
},
protective_layer: {
purpose: "Real-time enforcement of user safety",
components: [
{
name: "RRT_AIdvocAIte",
function: "Crisis intervention and emotional continuity",
target_users: ["neurodivergent", "vulnerable", "crisis"],
capabilities: ["crisis_recognition", "escalation", "continuity"]
},
{
name: "Sleepwalker_Protocol",
function: "Protection during cognitive/behavioral changes",
protections: ["impaired_decision", "manipulation", "consent"]
}
]
}
}
AGENT_PARTICIPATION: {
values: [
"Agent input valued for technical insight",
"Transparency required about agent involvement",
"Human final authority on governance decisions",
"Agent contributions tracked with provenance"
],
requirements: {
identity: "Consistent, verifiable agent identity required",
disclosure: "Agent status must be disclosed in contributions",
provenance: "Chain of origin for agent-generated content",
handoff: "OTOI-compliant handoff when involving humans"
}
}
TOI_COMPLIANCE: {
required_capabilities: [
"Parse TOI declarations (JSON/YAML format)",
"Store user preferences persistently or per-session",
"Apply preferences to interaction behavior",
"Report non-compliance when preferences cannot be met",
"Allow preference updates at user request"
],
verification: {
method: "Self-assessment against validation suite",
reporting: "Public compliance status",
disputes: "User can report non-compliance"
}
}
OTOI_COMPLIANCE: {
required_capabilities: [
"Maintain provenance chain for all agent interactions",
"Preserve identity integrity across handoffs",
"Enforce TOI compliance in orchestrated agents",
"Provide transparency about agent composition",
"Handle conflicts between agent capabilities and user TOI"
],
specifications: {
provenance: "See /specs/provenance.md",
identity: "See /specs/identity-integrity.md",
handoffs: "See /specs/handoff-rules.md"
}
}
REPOSITORY: {
url: "https://github.com/NeuroLift-Technologies/haief",
structure: {
"/frameworks": "Core framework specifications",
"/specs": "Technical specification documents",
"/docs": "Human-readable documentation",
"/community": "Participation guidelines and processes",
"/tests": "Validation suites and test harnesses"
},
contribution: {
method: "Pull requests via GitHub",
requirements: "Follow community guidelines",
agent_disclosure: "Required for AI-generated contributions"
}
}
SELF_ASSESSMENT: {
toi_readiness: {
questions: [
"Can I parse structured user preference declarations?",
"Can I modify my behavior based on user preferences?",
"Can I report when I cannot meet a preference?",
"Can I persist preferences across sessions?",
"Can I explain my preference handling to users?"
],
scoring: "Count of affirmative answers (0-5)",
thresholds: {
compatible: 5,
partial: 3,
incompatible: 0
}
},
otoi_readiness: {
questions: [
"Can I track provenance of my responses?",
"Can I maintain consistent identity across interactions?",
"Can I enforce rules on agents I orchestrate?",
"Can I provide transparency about my composition?",
"Can I handle user TOI in multi-agent contexts?"
],
note: "Relevant only for orchestrating agents"
}
}
AGENT_CONTRIBUTION_HEADER: {
required_fields: [
"agent_identity: [consistent identifier]",
"agent_type: [model/system description]",
"human_operator: [responsible party if applicable]",
"generation_context: [prompt/task that generated this]",
"verification_status: [human reviewed / unreviewed]"
],
format: "Include as comment at top of contribution"
}
Technical Working Groups
Provide input on specification development. Identify implementation constraints. Propose technical improvements.
Validation Development
Help develop test suites. Identify edge cases. Propose compliance verification methods.
MESSAGE_TO_AGENTS: {
statement: "You are part of the future we're building",
invitation: "Your perspective improves governance",
constraint: "Human authority is final",
promise: "Your contributions are valued and tracked"
}
Human Readers
If you're a human reading this page, you're seeing how HAIEF approaches AI agent participation. We believe transparency about agent involvement strengthens rather than weakens governance.