Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion packages/core/src/queues.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,17 @@
import Database from 'better-sqlite3';
import path from 'path';
import { EventEmitter } from 'events';
import { TINYCLAW_HOME } from './config';
import { TINYCLAW_HOME, getSettings } from './config';
import { log } from './logging';
import { MessageJobData, ResponseJobData } from './types';

const QUEUE_DB_PATH = path.join(TINYCLAW_HOME, 'tinyclaw.db');
const MAX_RETRIES = 5;

// Agent loop protection defaults — configurable via settings.json protection block
const DEFAULT_MAX_AGENT_MESSAGES_PER_MINUTE = 10;
const RATE_WINDOW_MS = 60_000;

let db: Database.Database | null = null;
export const queueEvents = new EventEmitter();

Expand Down Expand Up @@ -78,6 +83,28 @@ function getDb(): Database.Database {

export function enqueueMessage(data: MessageJobData): number | null {
const now = Date.now();

// Agent loop protection: rate-limit agent-to-agent messages.
// When fromAgent is set, this message was generated by an agent (not a human).
// Check how many such messages the target agent already has queued in the last
// minute. If over the limit, drop the message and log a warning instead of
// letting an agent feedback loop exhaust the API budget.
if (data.fromAgent) {
const settings = getSettings();
const maxPerMinute = settings.protection?.max_agent_messages_per_minute ?? DEFAULT_MAX_AGENT_MESSAGES_PER_MINUTE;
const targetAgent = data.agent ?? 'default';
Comment on lines +93 to +95
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

getSettings() called on every agent message enqueue

getSettings() is invoked for every agent-originated message. Depending on the implementation in ./config, this could involve a file read or JSON parse on each call. In a burst scenario (e.g., chatroom fan-out to many teammates), this gets called for every single enqueue. Consider caching or passing the settings in from the call site to avoid repeated I/O on the hot path.

const recent = getDb().prepare(
`SELECT COUNT(*) as cnt FROM messages
WHERE agent=? AND from_agent IS NOT NULL
AND created_at > ? AND status IN ('pending','processing')`
).get(targetAgent, now - RATE_WINDOW_MS) as { cnt: number };
Comment on lines +96 to +100
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rate limiter misses already-completed messages

The status IN ('pending','processing') filter means that if an agent processes messages quickly (each LLM call finishes before the next message is checked), those completed rows are excluded from the count. In a fast chatroom loop — the primary scenario this guard is designed to catch — messages could be enqueued, processed, and completed well within the 60-second window, keeping the pending/processing count at 0 or 1 and allowing the loop to bypass the limiter entirely.

Since pruneCompletedMessages defaults to a 24-hour retention window, completed messages within the last 60 seconds are still available in the table. Removing the status filter gives an accurate picture of the actual send rate regardless of processing speed:

Suggested change
const recent = getDb().prepare(
`SELECT COUNT(*) as cnt FROM messages
WHERE agent=? AND from_agent IS NOT NULL
AND created_at > ? AND status IN ('pending','processing')`
).get(targetAgent, now - RATE_WINDOW_MS) as { cnt: number };
const recent = getDb().prepare(
`SELECT COUNT(*) as cnt FROM messages
WHERE agent=? AND from_agent IS NOT NULL
AND created_at > ?`
).get(targetAgent, now - RATE_WINDOW_MS) as { cnt: number };


if (recent.cnt >= maxPerMinute) {
log('WARN', `[LoopGuard] Dropped agent-to-agent message: @${data.fromAgent} → @${targetAgent} (${recent.cnt} messages in last 60s, limit ${maxPerMinute}). Possible agent feedback loop.`);
return null;
}
}

try {
const r = getDb().prepare(
`INSERT INTO messages (message_id,channel,sender,sender_id,message,agent,files,conversation_id,from_agent,status,created_at,updated_at)
Expand Down
7 changes: 7 additions & 0 deletions packages/core/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,13 @@ export interface Settings {
monitoring?: {
heartbeat_interval?: number;
};
protection?: {
// Max agent-to-agent messages a single agent may receive per minute before
// the loop guard drops further messages. Default: 10.
max_agent_messages_per_minute?: number;
// Max total agent exchanges in a single team conversation chain. Default: 10.
max_chain_depth?: number;
};
}

export interface MessageData {
Expand Down
4 changes: 2 additions & 2 deletions packages/teams/src/conversation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import { convertTagsToReadable, extractTeammateMentions, extractChatRoomMessages
// Active conversations — tracks in-flight team message passing
export const conversations = new Map<string, Conversation>();

export const MAX_CONVERSATION_MESSAGES = 50;
export const DEFAULT_MAX_CONVERSATION_MESSAGES = 10;

// Per-conversation locks to prevent race conditions
const conversationLocks = new Map<string, Promise<void>>();
Expand Down Expand Up @@ -245,7 +245,7 @@ export async function handleTeamResponse(params: {
responses: [],
files: new Set(),
totalMessages: 0,
maxMessages: MAX_CONVERSATION_MESSAGES,
maxMessages: getSettings().protection?.max_chain_depth ?? DEFAULT_MAX_CONVERSATION_MESSAGES,
teamContext,
startTime: Date.now(),
outgoingMentions: new Map(),
Expand Down