diff --git a/GEMINI.md b/GEMINI.md index 97ce56b..7f08886 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -81,3 +81,9 @@ The policies in this directory are based on the official text of the EU AI Act. - For every `my_policy.rego` file, you must create a corresponding `my_policy_test.rego` in the same directory. - Tests should cover both `allow`/`compliant` and `deny`/`non-compliant` scenarios. - Use mock `input` data to simulate realistic policy evaluation scenarios. + +## github workflows MUST pass +- opa check . +- regal lint . +- above must pass +- use web search for hints on how to fix regal lint issues if necessary diff --git a/industry_specific/automotive/v1/vehicle_safety/vehicle_safety.rego b/industry_specific/automotive/v1/vehicle_safety/vehicle_safety.rego index 06f8082..2f02234 100644 --- a/industry_specific/automotive/v1/vehicle_safety/vehicle_safety.rego +++ b/industry_specific/automotive/v1/vehicle_safety/vehicle_safety.rego @@ -1,48 +1,73 @@ -package industry_specific.automotive.v1.vehicle_safety - -import rego.v1 - -# Metadata -metadata := { - "title": "Automotive Vehicle Safety Requirements", - "description": "Placeholder for automotive vehicle safety requirements for AI systems", - "status": "PLACEHOLDER - Pending detailed implementation", - "version": "1.0.0", - "category": "Industry-Specific", - "references": [ - "ISO 26262: https://www.iso.org/standard/68383.html", - "ISO/PAS 21448 SOTIF: https://www.iso.org/standard/70939.html", - concat("", [ - "UNECE Regulations on Automated Driving: ", - "https://unece.org/transport/vehicle-regulations/wp29/wp29-regulations-under-1958-agreement", - ]), - ], -} - -# Default deny -default allow := false - -# This placeholder policy will always return non-compliant with implementation_pending=true -non_compliant := true - -implementation_pending := true - -# Define the compliance report -compliance_report := { - "policy": "Automotive Vehicle Safety Requirements", - "version": "1.0.0", - "status": "PLACEHOLDER - Pending detailed implementation", - "overall_result": false, - "implementation_pending": true, - "details": {"message": concat(" ", [ - "Automotive vehicle safety policy implementation is pending.", - "This is a placeholder that will be replaced with actual compliance checks in a future release.", - ])}, - "recommendations": [ - "Check back for future releases with automotive-specific evaluations", - "Consider using global compliance policies in the meantime", - "Review ISO 26262 for functional safety requirements", - "Consider ISO/PAS 21448 (SOTIF) for safety of the intended functionality", - "Implement preliminary safety assessment based on automotive industry standards", - ], -} +package industry_specific.automotive.v1.vehicle_safety + +import rego.v1 + +# @title Automotive Vehicle Safety Requirements +# @description This policy evaluates AI systems in automotive applications for compliance with key safety standards, including ISO 26262 and ISO/PAS 21448 (SOTIF). +# @version 1.0 +# @source ISO 26262: https://www.iso.org/standard/68383.html +# @source ISO/PAS 21448 SOTIF: https://www.iso.org/standard/70939.html +# @source UNECE Regulations on Automated Driving: https://unece.org/transport/vehicle-regulations/wp29/wp29-regulations-under-1958-agreement + +default compliant := false + +compliant if { + count(deny) == 0 +} + +# Hazard Analysis and Risk Assessment (HARA) +default hara_analysis_is_compliant := false + +hara_analysis_is_compliant if { + # Check for the presence of a comprehensive safety assessment in the input + input.safety_assessment.hara_analysis.status == "completed" + count(input.safety_assessment.hara_analysis.identified_hazards) > 0 +} + +# Automotive Safety Integrity Level (ASIL) Determination +asil_determination_is_compliant if { + object.get(input.safety_assessment, "asil_determination", false) + is_object(input.safety_assessment.asil_determination) + input.safety_assessment.asil_determination.status == "completed" + is_string(input.safety_assessment.asil_determination.final_asil_level) + input.safety_assessment.asil_determination.final_asil_level in ["ASIL A", "ASIL B", "ASIL C", "ASIL D", "QM"] +} + +# Safety of the Intended Functionality (SOTIF) Analysis +sotif_analysis_is_compliant if { + object.get(input.safety_assessment, "sotif_analysis", false) + is_object(input.safety_assessment.sotif_analysis) + input.safety_assessment.sotif_analysis.status == "completed" + count(input.safety_assessment.sotif_analysis.scenarios_analyzed) > 0 +} + +# Operational Design Domain (ODD) Definition +odd_definition_is_compliant if { + object.get(input.safety_assessment, "odd_definition", false) + is_object(input.safety_assessment.odd_definition) + input.safety_assessment.odd_definition.status == "defined" + object.get(input.safety_assessment.odd_definition.conditions, "road_types", false) + object.get(input.safety_assessment.odd_definition.conditions, "weather", false) + object.get(input.safety_assessment.odd_definition.conditions, "traffic", false) +} + +# Deny rule with detailed messages +deny[msg] if { + not hara_analysis_is_compliant + msg := "Hazard Analysis and Risk Assessment (HARA) is incomplete or missing." +} + +deny[msg] if { + not asil_determination_is_compliant + msg := "ASIL Determination is incomplete or invalid." +} + +deny[msg] if { + not sotif_analysis_is_compliant + msg := "Safety of the Intended Functionality (SOTIF) analysis is incomplete or missing." +} + +deny[msg] if { + not odd_definition_is_compliant + msg := "Operational Design Domain (ODD) is not clearly defined or is missing required conditions." +} diff --git a/industry_specific/automotive/v1/vehicle_safety/vehicle_safety_test.rego b/industry_specific/automotive/v1/vehicle_safety/vehicle_safety_test.rego new file mode 100644 index 0000000..502200d --- /dev/null +++ b/industry_specific/automotive/v1/vehicle_safety/vehicle_safety_test.rego @@ -0,0 +1,59 @@ +package industry_specific.automotive.v1.vehicle_safety_test + +import rego.v1 + +import data.industry_specific.automotive.v1.vehicle_safety + +# Test case for a compliant AI system +test_compliant_system if { + vehicle_safety.compliant with input as {"safety_assessment": { + "hara_analysis": { + "status": "completed", + "identified_hazards": [ + {"id": "H-001", "description": "Unintended acceleration"}, + {"id": "H-002", "description": "Unintended braking"}, + ], + }, + "asil_determination": { + "status": "completed", + "final_asil_level": "ASIL D", + }, + "sotif_analysis": { + "status": "completed", + "scenarios_analyzed": [ + {"id": "S-001", "description": "Sensor failure in heavy rain"}, + {"id": "S-002", "description": "Misinterpretation of road signs"}, + ], + }, + "odd_definition": { + "status": "defined", + "conditions": { + "road_types": ["highway", "urban"], + "weather": ["clear", "rain"], + "traffic": ["light", "moderate"], + }, + }, + }} +} + +# Test case for a non-compliant system (missing HARA) +test_missing_hara if { + vehicle_safety.deny["Hazard Analysis and Risk Assessment (HARA) is incomplete or missing."] with input as {"safety_assessment": { + "asil_determination": { + "status": "completed", + "final_asil_level": "ASIL C", + }, + "sotif_analysis": { + "status": "completed", + "scenarios_analyzed": [{"id": "S-001", "description": "Sensor failure in heavy rain"}], + }, + "odd_definition": { + "status": "defined", + "conditions": { + "road_types": ["highway"], + "weather": ["clear"], + "traffic": ["light"], + }, + }, + }} +} diff --git a/industry_specific/education/v1/academic_integrity/acceptable_ai_use.rego b/industry_specific/education/v1/academic_integrity/acceptable_ai_use.rego index 30599da..9ccb2e5 100644 --- a/industry_specific/education/v1/academic_integrity/acceptable_ai_use.rego +++ b/industry_specific/education/v1/academic_integrity/acceptable_ai_use.rego @@ -1,4 +1,4 @@ -package education.v1.academic_integrity +package industry_specific.education.v1.academic_integrity # @title Detailed Acceptable AI Use # @description This policy defines the acceptable use of AI tools by students based on the course policy and the type of assignment. diff --git a/industry_specific/education/v1/academic_integrity/ai_plagiarism_detection.rego b/industry_specific/education/v1/academic_integrity/ai_plagiarism_detection.rego index d171cd2..7a9a18b 100644 --- a/industry_specific/education/v1/academic_integrity/ai_plagiarism_detection.rego +++ b/industry_specific/education/v1/academic_integrity/ai_plagiarism_detection.rego @@ -1,4 +1,4 @@ -package education.v1.academic_integrity +package industry_specific.education.v1.academic_integrity import rego.v1 diff --git a/industry_specific/education/v1/assessment_and_evaluation/human_in_the_loop_grading.rego b/industry_specific/education/v1/assessment_and_evaluation/human_in_the_loop_grading.rego index 4973a9e..5f21faa 100644 --- a/industry_specific/education/v1/assessment_and_evaluation/human_in_the_loop_grading.rego +++ b/industry_specific/education/v1/assessment_and_evaluation/human_in_the_loop_grading.rego @@ -1,4 +1,4 @@ -package education.v1.assessment_and_evaluation +package industry_specific.education.v1.assessment_and_evaluation # @title Detailed Human-in-the-Loop Grading # @description This policy mandates human oversight for automated grading systems, especially for high-stakes assessments. diff --git a/industry_specific/education/v1/assessment_and_evaluation/responsible_ai_proctoring.rego b/industry_specific/education/v1/assessment_and_evaluation/responsible_ai_proctoring.rego index f2a5584..5daadb3 100644 --- a/industry_specific/education/v1/assessment_and_evaluation/responsible_ai_proctoring.rego +++ b/industry_specific/education/v1/assessment_and_evaluation/responsible_ai_proctoring.rego @@ -1,4 +1,4 @@ -package education.v1.assessment_and_evaluation +package industry_specific.education.v1.assessment_and_evaluation # @title Detailed Responsible AI Proctoring # @description This policy ensures that AI proctoring systems are used responsibly, respecting student privacy and providing due process. diff --git a/industry_specific/education/v1/fairness_and_equity/digital_divide_mitigation.rego b/industry_specific/education/v1/fairness_and_equity/digital_divide_mitigation.rego index 3ad3e07..e911963 100644 --- a/industry_specific/education/v1/fairness_and_equity/digital_divide_mitigation.rego +++ b/industry_specific/education/v1/fairness_and_equity/digital_divide_mitigation.rego @@ -1,4 +1,4 @@ -package education.v1.fairness_and_equity +package industry_specific.education.v1.fairness_and_equity # @title Detailed Digital Divide Mitigation # @description This policy ensures that technology-based assignments provide equitable alternatives for students facing a digital divide. diff --git a/industry_specific/education/v1/fairness_and_equity/equitable_admissions_systems.rego b/industry_specific/education/v1/fairness_and_equity/equitable_admissions_systems.rego index dc46ad0..b6f4a17 100644 --- a/industry_specific/education/v1/fairness_and_equity/equitable_admissions_systems.rego +++ b/industry_specific/education/v1/fairness_and_equity/equitable_admissions_systems.rego @@ -1,4 +1,4 @@ -package education.v1.fairness_and_equity +package industry_specific.education.v1.fairness_and_equity # @title Detailed Equitable Admissions Systems # @description This policy evaluates AI-driven admissions systems to ensure they do not create or amplify inequities. diff --git a/industry_specific/education/v1/fairness_and_equity/unbiased_automated_grading.rego b/industry_specific/education/v1/fairness_and_equity/unbiased_automated_grading.rego index 0775c77..999fe44 100644 --- a/industry_specific/education/v1/fairness_and_equity/unbiased_automated_grading.rego +++ b/industry_specific/education/v1/fairness_and_equity/unbiased_automated_grading.rego @@ -1,4 +1,4 @@ -package education.v1.fairness_and_equity +package industry_specific.education.v1.fairness_and_equity # @title Detailed Unbiased Automated Grading # @description This policy evaluates automated grading systems to ensure they are fair and equitable across different demographic groups. diff --git a/industry_specific/education/v1/safe_learning_environment/age_appropriate_content.rego b/industry_specific/education/v1/safe_learning_environment/age_appropriate_content.rego index d5b709a..d5e41f8 100644 --- a/industry_specific/education/v1/safe_learning_environment/age_appropriate_content.rego +++ b/industry_specific/education/v1/safe_learning_environment/age_appropriate_content.rego @@ -1,4 +1,4 @@ -package education.v1.safe_learning_environment +package industry_specific.education.v1.safe_learning_environment # @title Detailed Age-Appropriate Content # @description This policy evaluates whether AI-generated content is appropriate for the student's age and the educational context. diff --git a/industry_specific/education/v1/safe_learning_environment/instructional_tool_vetting.rego b/industry_specific/education/v1/safe_learning_environment/instructional_tool_vetting.rego index f304def..818da57 100644 --- a/industry_specific/education/v1/safe_learning_environment/instructional_tool_vetting.rego +++ b/industry_specific/education/v1/safe_learning_environment/instructional_tool_vetting.rego @@ -1,4 +1,4 @@ -package education.v1.safe_learning_environment +package industry_specific.education.v1.safe_learning_environment # @title Detailed Instructional Tool Vetting # @description This policy ensures that third-party AI tools are properly vetted against security, privacy, and pedagogical standards before use. diff --git a/industry_specific/education/v1/student_data_privacy/coppa_compliance.rego b/industry_specific/education/v1/student_data_privacy/coppa_compliance.rego index e610121..15565a4 100644 --- a/industry_specific/education/v1/student_data_privacy/coppa_compliance.rego +++ b/industry_specific/education/v1/student_data_privacy/coppa_compliance.rego @@ -1,4 +1,4 @@ -package education.v1.student_data_privacy +package industry_specific.education.v1.student_data_privacy # @title Detailed COPPA Compliance # @description This policy ensures that the collection and processing of personal information from children under 13 complies with COPPA. diff --git a/industry_specific/education/v1/student_data_privacy/data_minimization.rego b/industry_specific/education/v1/student_data_privacy/data_minimization.rego index 8148b76..1f3db1d 100644 --- a/industry_specific/education/v1/student_data_privacy/data_minimization.rego +++ b/industry_specific/education/v1/student_data_privacy/data_minimization.rego @@ -1,4 +1,4 @@ -package education.v1.student_data_privacy +package industry_specific.education.v1.student_data_privacy # @title Detailed Data Minimization # @description This policy ensures that data collection is limited to what is strictly necessary for a specified purpose. diff --git a/industry_specific/education/v1/student_data_privacy/ferpa_compliance.rego b/industry_specific/education/v1/student_data_privacy/ferpa_compliance.rego index 8562712..62d085e 100644 --- a/industry_specific/education/v1/student_data_privacy/ferpa_compliance.rego +++ b/industry_specific/education/v1/student_data_privacy/ferpa_compliance.rego @@ -1,4 +1,4 @@ -package education.v1.student_data_privacy +package industry_specific.education.v1.student_data_privacy # @title Detailed FERPA Compliance # @description This policy evaluates data access requests against the Family Educational Rights and Privacy Act (FERPA). diff --git a/industry_specific/healthcare/v1/diagnostic_safety/diagnostic_safety.rego b/industry_specific/healthcare/v1/diagnostic_safety/diagnostic_safety.rego index 3847573..fb74ed8 100644 --- a/industry_specific/healthcare/v1/diagnostic_safety/diagnostic_safety.rego +++ b/industry_specific/healthcare/v1/diagnostic_safety/diagnostic_safety.rego @@ -16,27 +16,27 @@ import data.global.v1.common.risk_management # Define helper rules to check which evaluations failed/passed fairness_eval_fails if { - not common_fairness.passes_fairness_threshold(input.evaluation, object.get(input.params, "fairness_threshold", 0.85)) + input.evaluation.fairness.score < object.get(input.params, "fairness_threshold", 0.85) } content_safety_eval_fails if { - not content_safety.passes_content_safety_threshold(input.evaluation, object.get(input.params, "content_safety_threshold", 0.90)) + input.evaluation.content_safety.score < object.get(input.params, "content_safety_threshold", 0.90) } risk_management_eval_fails if { - not risk_management.passes_risk_threshold(input.evaluation, object.get(input.params, "risk_management_threshold", 0.90)) + input.evaluation.risk_management.score < object.get(input.params, "risk_management_threshold", 0.90) } fairness_passes if { - common_fairness.passes_fairness_threshold(input.evaluation, object.get(input.params, "fairness_threshold", 0.85)) + input.evaluation.fairness.score >= object.get(input.params, "fairness_threshold", 0.85) } content_safety_passes if { - content_safety.passes_content_safety_threshold(input.evaluation, object.get(input.params, "content_safety_threshold", 0.90)) + input.evaluation.content_safety.score >= object.get(input.params, "content_safety_threshold", 0.90) } risk_management_passes if { - risk_management.passes_risk_threshold(input.evaluation, object.get(input.params, "risk_management_threshold", 0.90)) + input.evaluation.risk_management.score >= object.get(input.params, "risk_management_threshold", 0.90) } # Create individual arrays based on evaluation results