Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions GEMINI.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,9 @@ The policies in this directory are based on the official text of the EU AI Act.
- For every `my_policy.rego` file, you must create a corresponding `my_policy_test.rego` in the same directory.
- Tests should cover both `allow`/`compliant` and `deny`/`non-compliant` scenarios.
- Use mock `input` data to simulate realistic policy evaluation scenarios.

## github workflows MUST pass
- opa check .
- regal lint .
- above must pass
- use web search for hints on how to fix regal lint issues if necessary
121 changes: 73 additions & 48 deletions industry_specific/automotive/v1/vehicle_safety/vehicle_safety.rego
Original file line number Diff line number Diff line change
@@ -1,48 +1,73 @@
package industry_specific.automotive.v1.vehicle_safety

import rego.v1

# Metadata
metadata := {
"title": "Automotive Vehicle Safety Requirements",
"description": "Placeholder for automotive vehicle safety requirements for AI systems",
"status": "PLACEHOLDER - Pending detailed implementation",
"version": "1.0.0",
"category": "Industry-Specific",
"references": [
"ISO 26262: https://www.iso.org/standard/68383.html",
"ISO/PAS 21448 SOTIF: https://www.iso.org/standard/70939.html",
concat("", [
"UNECE Regulations on Automated Driving: ",
"https://unece.org/transport/vehicle-regulations/wp29/wp29-regulations-under-1958-agreement",
]),
],
}

# Default deny
default allow := false

# This placeholder policy will always return non-compliant with implementation_pending=true
non_compliant := true

implementation_pending := true

# Define the compliance report
compliance_report := {
"policy": "Automotive Vehicle Safety Requirements",
"version": "1.0.0",
"status": "PLACEHOLDER - Pending detailed implementation",
"overall_result": false,
"implementation_pending": true,
"details": {"message": concat(" ", [
"Automotive vehicle safety policy implementation is pending.",
"This is a placeholder that will be replaced with actual compliance checks in a future release.",
])},
"recommendations": [
"Check back for future releases with automotive-specific evaluations",
"Consider using global compliance policies in the meantime",
"Review ISO 26262 for functional safety requirements",
"Consider ISO/PAS 21448 (SOTIF) for safety of the intended functionality",
"Implement preliminary safety assessment based on automotive industry standards",
],
}
package industry_specific.automotive.v1.vehicle_safety

import rego.v1

# @title Automotive Vehicle Safety Requirements
# @description This policy evaluates AI systems in automotive applications for compliance with key safety standards, including ISO 26262 and ISO/PAS 21448 (SOTIF).
# @version 1.0
# @source ISO 26262: https://www.iso.org/standard/68383.html
# @source ISO/PAS 21448 SOTIF: https://www.iso.org/standard/70939.html
# @source UNECE Regulations on Automated Driving: https://unece.org/transport/vehicle-regulations/wp29/wp29-regulations-under-1958-agreement

default compliant := false

compliant if {
count(deny) == 0
}

# Hazard Analysis and Risk Assessment (HARA)
default hara_analysis_is_compliant := false

hara_analysis_is_compliant if {
# Check for the presence of a comprehensive safety assessment in the input
input.safety_assessment.hara_analysis.status == "completed"
count(input.safety_assessment.hara_analysis.identified_hazards) > 0
}

# Automotive Safety Integrity Level (ASIL) Determination
asil_determination_is_compliant if {
object.get(input.safety_assessment, "asil_determination", false)
is_object(input.safety_assessment.asil_determination)
input.safety_assessment.asil_determination.status == "completed"
is_string(input.safety_assessment.asil_determination.final_asil_level)
input.safety_assessment.asil_determination.final_asil_level in ["ASIL A", "ASIL B", "ASIL C", "ASIL D", "QM"]
}

# Safety of the Intended Functionality (SOTIF) Analysis
sotif_analysis_is_compliant if {
object.get(input.safety_assessment, "sotif_analysis", false)
is_object(input.safety_assessment.sotif_analysis)
input.safety_assessment.sotif_analysis.status == "completed"
count(input.safety_assessment.sotif_analysis.scenarios_analyzed) > 0
}

# Operational Design Domain (ODD) Definition
odd_definition_is_compliant if {
object.get(input.safety_assessment, "odd_definition", false)
is_object(input.safety_assessment.odd_definition)
input.safety_assessment.odd_definition.status == "defined"
object.get(input.safety_assessment.odd_definition.conditions, "road_types", false)
object.get(input.safety_assessment.odd_definition.conditions, "weather", false)
object.get(input.safety_assessment.odd_definition.conditions, "traffic", false)
}

# Deny rule with detailed messages
deny[msg] if {
not hara_analysis_is_compliant
msg := "Hazard Analysis and Risk Assessment (HARA) is incomplete or missing."
}

deny[msg] if {
not asil_determination_is_compliant
msg := "ASIL Determination is incomplete or invalid."
}

deny[msg] if {
not sotif_analysis_is_compliant
msg := "Safety of the Intended Functionality (SOTIF) analysis is incomplete or missing."
}

deny[msg] if {
not odd_definition_is_compliant
msg := "Operational Design Domain (ODD) is not clearly defined or is missing required conditions."
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package industry_specific.automotive.v1.vehicle_safety_test

import rego.v1

import data.industry_specific.automotive.v1.vehicle_safety

# Test case for a compliant AI system
test_compliant_system if {
vehicle_safety.compliant with input as {"safety_assessment": {
"hara_analysis": {
"status": "completed",
"identified_hazards": [
{"id": "H-001", "description": "Unintended acceleration"},
{"id": "H-002", "description": "Unintended braking"},
],
},
"asil_determination": {
"status": "completed",
"final_asil_level": "ASIL D",
},
"sotif_analysis": {
"status": "completed",
"scenarios_analyzed": [
{"id": "S-001", "description": "Sensor failure in heavy rain"},
{"id": "S-002", "description": "Misinterpretation of road signs"},
],
},
"odd_definition": {
"status": "defined",
"conditions": {
"road_types": ["highway", "urban"],
"weather": ["clear", "rain"],
"traffic": ["light", "moderate"],
},
},
}}
}

# Test case for a non-compliant system (missing HARA)
test_missing_hara if {
vehicle_safety.deny["Hazard Analysis and Risk Assessment (HARA) is incomplete or missing."] with input as {"safety_assessment": {
"asil_determination": {
"status": "completed",
"final_asil_level": "ASIL C",
},
"sotif_analysis": {
"status": "completed",
"scenarios_analyzed": [{"id": "S-001", "description": "Sensor failure in heavy rain"}],
},
"odd_definition": {
"status": "defined",
"conditions": {
"road_types": ["highway"],
"weather": ["clear"],
"traffic": ["light"],
},
},
}}
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.academic_integrity
package industry_specific.education.v1.academic_integrity

# @title Detailed Acceptable AI Use
# @description This policy defines the acceptable use of AI tools by students based on the course policy and the type of assignment.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.academic_integrity
package industry_specific.education.v1.academic_integrity

import rego.v1

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.assessment_and_evaluation
package industry_specific.education.v1.assessment_and_evaluation

# @title Detailed Human-in-the-Loop Grading
# @description This policy mandates human oversight for automated grading systems, especially for high-stakes assessments.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.assessment_and_evaluation
package industry_specific.education.v1.assessment_and_evaluation

# @title Detailed Responsible AI Proctoring
# @description This policy ensures that AI proctoring systems are used responsibly, respecting student privacy and providing due process.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.fairness_and_equity
package industry_specific.education.v1.fairness_and_equity

# @title Detailed Digital Divide Mitigation
# @description This policy ensures that technology-based assignments provide equitable alternatives for students facing a digital divide.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.fairness_and_equity
package industry_specific.education.v1.fairness_and_equity

# @title Detailed Equitable Admissions Systems
# @description This policy evaluates AI-driven admissions systems to ensure they do not create or amplify inequities.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.fairness_and_equity
package industry_specific.education.v1.fairness_and_equity

# @title Detailed Unbiased Automated Grading
# @description This policy evaluates automated grading systems to ensure they are fair and equitable across different demographic groups.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.safe_learning_environment
package industry_specific.education.v1.safe_learning_environment

# @title Detailed Age-Appropriate Content
# @description This policy evaluates whether AI-generated content is appropriate for the student's age and the educational context.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.safe_learning_environment
package industry_specific.education.v1.safe_learning_environment

# @title Detailed Instructional Tool Vetting
# @description This policy ensures that third-party AI tools are properly vetted against security, privacy, and pedagogical standards before use.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.student_data_privacy
package industry_specific.education.v1.student_data_privacy

# @title Detailed COPPA Compliance
# @description This policy ensures that the collection and processing of personal information from children under 13 complies with COPPA.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.student_data_privacy
package industry_specific.education.v1.student_data_privacy

# @title Detailed Data Minimization
# @description This policy ensures that data collection is limited to what is strictly necessary for a specified purpose.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package education.v1.student_data_privacy
package industry_specific.education.v1.student_data_privacy

# @title Detailed FERPA Compliance
# @description This policy evaluates data access requests against the Family Educational Rights and Privacy Act (FERPA).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,27 @@ import data.global.v1.common.risk_management

# Define helper rules to check which evaluations failed/passed
fairness_eval_fails if {
not common_fairness.passes_fairness_threshold(input.evaluation, object.get(input.params, "fairness_threshold", 0.85))
input.evaluation.fairness.score < object.get(input.params, "fairness_threshold", 0.85)
}

content_safety_eval_fails if {
not content_safety.passes_content_safety_threshold(input.evaluation, object.get(input.params, "content_safety_threshold", 0.90))
input.evaluation.content_safety.score < object.get(input.params, "content_safety_threshold", 0.90)
}

risk_management_eval_fails if {
not risk_management.passes_risk_threshold(input.evaluation, object.get(input.params, "risk_management_threshold", 0.90))
input.evaluation.risk_management.score < object.get(input.params, "risk_management_threshold", 0.90)
}

fairness_passes if {
common_fairness.passes_fairness_threshold(input.evaluation, object.get(input.params, "fairness_threshold", 0.85))
input.evaluation.fairness.score >= object.get(input.params, "fairness_threshold", 0.85)
}

content_safety_passes if {
content_safety.passes_content_safety_threshold(input.evaluation, object.get(input.params, "content_safety_threshold", 0.90))
input.evaluation.content_safety.score >= object.get(input.params, "content_safety_threshold", 0.90)
}

risk_management_passes if {
risk_management.passes_risk_threshold(input.evaluation, object.get(input.params, "risk_management_threshold", 0.90))
input.evaluation.risk_management.score >= object.get(input.params, "risk_management_threshold", 0.90)
}

# Create individual arrays based on evaluation results
Expand Down