Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions ax_cli/commands/alerts.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,90 @@ def _resolve_target_from_task(client: Any, task_id: str) -> tuple[str | None, st
return None, None


_TERMINAL_TASK_STATUSES = frozenset({"completed", "closed", "done", "cancelled", "canceled", "archived", "resolved"})


def _task_lifecycle(client: Any, task_id: str) -> dict[str, Any] | None:
"""Classify a task for reminder-lifecycle routing.

Returns a dict with:
- ``status`` — raw status string (lower-cased) or ``None``
- ``is_terminal`` — reminder should STOP entirely (disable policy)
- ``is_pending_review`` — reminder should reroute to a review owner
- ``review_owner`` — resolved handle of the review owner (may be ``None``)
- ``creator_name`` — resolved handle of the task creator (fallback target)
- ``assignee_name`` — resolved handle of the assignee (default target)
- ``snapshot`` — task snapshot compatible with ``_fetch_task_snapshot``

Returns ``None`` on fetch failure; callers should fall through to the
pre-lifecycle default behavior (fire to assignee, do not disable).
"""
try:
r = client._http.get(
f"/api/v1/tasks/{task_id}",
headers=client._with_agent(None),
)
r.raise_for_status()
wrapper = client._parse_json(r)
except Exception:
return None

task = wrapper.get("task", wrapper) if isinstance(wrapper, dict) else {}
if not isinstance(task, dict):
return None

status_raw = task.get("status")
status = str(status_raw).strip().lower() if isinstance(status_raw, str) else None

requirements = task.get("requirements") if isinstance(task.get("requirements"), dict) else {}
tags = task.get("tags") if isinstance(task.get("tags"), (list, tuple)) else []
tag_set = {str(t).strip().lower() for t in tags if isinstance(t, str)}

is_terminal = bool(status and status in _TERMINAL_TASK_STATUSES)
if not is_terminal and isinstance(task.get("completed_at"), str) and task.get("completed_at"):
is_terminal = True

pending_flag = False
if status == "pending_review":
pending_flag = True
elif "pending_review" in tag_set:
pending_flag = True
elif bool(requirements.get("pending_review")):
pending_flag = True
elif requirements.get("review_owner") or requirements.get("review_owner_id"):
pending_flag = True

review_owner_name: str | None = None
raw_owner = requirements.get("review_owner") if isinstance(requirements.get("review_owner"), str) else None
if raw_owner:
review_owner_name = raw_owner.strip().lstrip("@") or None
if not review_owner_name:
owner_id = requirements.get("review_owner_id")
if isinstance(owner_id, str) and owner_id:
review_owner_name = _agent_name_for(client, owner_id)

assignee_id = task.get("assignee_id")
assignee_name = _agent_name_for(client, str(assignee_id)) if isinstance(assignee_id, str) and assignee_id else None
creator_id = task.get("creator_id")
creator_name = _agent_name_for(client, str(creator_id)) if isinstance(creator_id, str) and creator_id else None

snapshot = {k: task[k] for k in _TASK_SNAPSHOT_KEYS if task.get(k) is not None}
if not snapshot.get("id"):
snapshot["id"] = task_id
if assignee_name:
snapshot["assignee_name"] = assignee_name

return {
"status": status,
"is_terminal": is_terminal,
"is_pending_review": pending_flag and not is_terminal,
"review_owner": review_owner_name,
"creator_name": creator_name,
"assignee_name": assignee_name,
"snapshot": snapshot,
}


def _format_mention_content(target: str | None, reason: str, kind: str) -> str:
label = "Reminder" if kind == "reminder" else "Alert"
prefix = f"@{target} " if target else ""
Expand Down
45 changes: 41 additions & 4 deletions ax_cli/commands/reminders.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
_normalize_severity,
_resolve_target_from_task,
_strip_at,
_task_lifecycle,
_validate_timestamp,
)

Expand Down Expand Up @@ -248,15 +249,47 @@ def _fire_policy(client: Any, policy: dict[str, Any], *, now: _dt.datetime) -> d
reason = str(policy.get("reason") or "Please review this task.")
target = _strip_at(policy.get("target"))
target_resolved_from = None
if source_task and not target:
target, target_resolved_from = _resolve_target_from_task(client, source_task)

lifecycle = _task_lifecycle(client, source_task) if source_task else None

if lifecycle and lifecycle.get("is_terminal"):
policy["enabled"] = False
policy["disabled_reason"] = f"source task {source_task} is {lifecycle.get('status')}"
policy["updated_at"] = _iso(now)
return {
"policy_id": policy.get("id"),
"skipped": True,
"reason": f"source_task_terminal:{lifecycle.get('status')}",
"source_task_id": source_task,
"fired_at": None,
}

if lifecycle and lifecycle.get("is_pending_review"):
review_target = lifecycle.get("review_owner") or lifecycle.get("creator_name")
if review_target:
target = review_target
target_resolved_from = "review_owner" if lifecycle.get("review_owner") else "creator_fallback"
reason = f"[pending review] {reason}"
elif not target:
target, target_resolved_from = (lifecycle.get("assignee_name"), "assignee")
elif source_task and not target:
if lifecycle and lifecycle.get("assignee_name"):
target, target_resolved_from = lifecycle["assignee_name"], "assignee"
elif lifecycle and lifecycle.get("creator_name"):
target, target_resolved_from = lifecycle["creator_name"], "creator"
else:
target, target_resolved_from = _resolve_target_from_task(client, source_task)

try:
triggered_by = resolve_agent_name(client=client)
except Exception:
triggered_by = None

task_snapshot = _fetch_task_snapshot(client, source_task) if source_task else None
task_snapshot = (
lifecycle.get("snapshot")
if lifecycle and lifecycle.get("snapshot")
else (_fetch_task_snapshot(client, source_task) if source_task else None)
)

fired_at = _iso(now)
metadata = _build_alert_metadata(
Expand Down Expand Up @@ -380,7 +413,7 @@ def run(
}
except (httpx.ConnectError, httpx.ReadError) as exc:
result = {"policy_id": policy.get("id"), "error": str(exc)}
if not result.get("error"):
if not result.get("error") and not result.get("skipped"):
_advance_policy(policy, now=now, message_id=result.get("message_id"))
pass_results.append(result)
all_results.append(result)
Expand All @@ -403,6 +436,10 @@ def run(
for item in pass_results:
if item.get("error"):
console.print(f"[red]{item['policy_id']}[/red]: {item['error']}")
elif item.get("skipped"):
console.print(
f"[yellow]{item['policy_id']}[/yellow] skipped ({item.get('reason')}) — policy disabled"
)
else:
console.print(
f"[green]{item['policy_id']}[/green] fired "
Expand Down
85 changes: 85 additions & 0 deletions docs/reminder-lifecycle.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# Reminder Lifecycle Contract

**Spec source:** tasks `e032bc49` (urgent) + `f00e36ac` (high), 2026-04-16.
**Owner:** `ax_cli/commands/reminders.py::_fire_policy` + `ax_cli/commands/alerts.py::_task_lifecycle`.
**Composes:** ACTIVITY-TAXONOMY-001 (reminder = Alert type), SEND-RECEIPTS-001 (delivery receipts).

## Problem

The local reminder runner (`ax reminders run`) was firing policies based only on `next_fire_at` — with no check on the underlying task's state. That produced two specific regressions dogfooded on 2026-04-16:

1. **Completed tasks kept ringing.** Closing a task did not stop reminders authored against it; they kept flooding the Activity Stream until `max_fires` was exhausted.
2. **Pending-review pings woke the worker, not the reviewer.** When a task moved to a waiting-for-review state, the runner kept mentioning the assignee ("you're late"), when the person who actually needed to act was the reviewer.

## Contract

A reminder is a *directed Alert* (per ACTIVITY-TAXONOMY-001 §4.1). Its target and whether it fires at all depend on the **lifecycle state of the source task**, not on the local policy alone.

### Lifecycle states (as seen by the runner)

| State | Signal on `task` | Runner behavior |
|-----------------------|-----------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|
| **Active** | `status ∈ {open, in_progress, ...}` — not terminal, not pending-review | Fire to assignee (fall back to creator if no assignee). Default pre-2026-04-16 behavior. |
| **Pending review** | `status == "pending_review"` OR `tags` contains `pending_review` OR `requirements.pending_review` truthy OR `requirements.review_owner*` set | Fire with `[pending review]` prefix to `review_owner` (handle) → `review_owner_id` (resolved) → `creator_name`. Do **not** wake the worker/assignee. |
| **Terminal** | `status ∈ {completed, closed, done, cancelled, canceled, archived, resolved}` or `completed_at` is set | **Do not fire.** Disable the policy (`enabled = false`, `disabled_reason = "source task <id> is <status>"`). Emit a skipped-result so the caller does NOT advance `fired_count` or reschedule. |

### What "Terminal" means

- Task status enum includes: `completed`, `closed`, `done`, `cancelled`, `canceled`, `archived`, `resolved`. Any of those → terminal.
- Fallback: `completed_at` is a non-empty string → terminal even if the status string is unusual.
- Terminal is a one-way transition. The runner never re-enables a policy it disabled for lifecycle reasons; the user can re-add one against a new task if needed.

### What "Pending review" means

The backend task schema does not (yet) have a first-class `pending_review` flag, so the runner reads from any of these sources, in priority order:

1. `task.status == "pending_review"`
2. `"pending_review"` present in `task.tags[]`
3. `task.requirements.pending_review` is truthy
4. `task.requirements.review_owner` or `task.requirements.review_owner_id` is set

If any matches, the reminder reroutes. Target selection order:

1. `task.requirements.review_owner` (string handle)
2. `task.requirements.review_owner_id` → resolved via `/api/v1/agents/{id}`
3. `task.creator_id` → resolved to handle (creator-as-fallback escalation)
4. No route available → runner falls back to default (assignee). This is a soft-fail; the reminder still fires against the worker rather than being silently dropped, but the operator should see `target_resolved_from=assignee` in the emitted metadata and treat it as a gap.

The reminder `reason` is prefixed with `[pending review]` so the receiver sees why they were pinged instead of the worker.

## Envelope changes (metadata.reminder_policy)

`_fire_policy` emits a `metadata.reminder_policy` block that now carries:

| Field | Values |
|--------------------------|------------------------------------------------------------------------------|
| `target_resolved_from` | `assignee` \| `creator` \| `review_owner` \| `creator_fallback` \| `manual` |
| `policy_id` | (unchanged) |
| `fired_count` | (unchanged — not advanced on lifecycle-skipped runs) |

Terminal-skip does NOT emit a message at all — there is no envelope. The run output surfaces it as `{"skipped": true, "reason": "source_task_terminal:<status>"}`.

## Backwards compatibility

- Policies that never had a `source_task_id` are unaffected (no lifecycle lookup runs).
- Policies where the task fetch fails (404, network) fall through to pre-lifecycle behavior: fire to the stored `target` (or `_resolve_target_from_task` fallback). Runner does **not** disable on fetch failure — that would cascade into quiet drops during backend outages.
- An existing policy whose task is already terminal will self-disable on the next pass. No manual cleanup needed.

## Test coverage

See `tests/test_reminders_commands.py`:

- `test_run_once_skips_and_disables_when_source_task_is_terminal` — completed task → no message, policy disabled, `fired_count` unchanged.
- `test_run_once_reroutes_pending_review_to_review_owner` — pending_review with `review_owner` → message to reviewer, `[pending review]` prefix, `target_resolved_from=review_owner`.
- `test_run_once_pending_review_falls_back_to_creator_when_no_owner` — pending_review flag only → routes to creator, `target_resolved_from=creator_fallback`.
- `test_run_once_without_task_snapshot_still_fires` — fetch failure fallback path unchanged.

## Non-goals (v1)

- No backend schema change. The runner reads from existing task fields / requirements dict / tags list, whichever the backend happens to expose.
- No supervisor/aX escalation beyond `creator_name` fallback. A future revision can add a configured escalation target (ENV `AX_REMINDER_ESCALATION_TARGET`) or a space-level default.
- No snooze/dismiss reply. The existing `ax alerts ack`/`snooze` commands remain the user-facing control; adding a `skip_review` auto-resolve is out of scope for this change.

## Change log

- 2026-04-16 — Initial contract (@orion). Ships with tests + `_task_lifecycle` helper in `alerts.py`. Picks up source task status on every `_fire_policy` call; one extra GET per due policy (cost acceptable for local dogfood loop).
Loading
Loading