From 218792fdadab0dc4c3c1fdfbd9df66dd5dc142d1 Mon Sep 17 00:00:00 2001 From: Context7 Bot Date: Tue, 7 Apr 2026 13:19:52 +0000 Subject: [PATCH] docs: add auto-generated code documentation --- docs/codedocs/api-reference/debounce.md | 78 ++++++++++++ docs/codedocs/api-reference/lock.md | 131 ++++++++++++++++++++ docs/codedocs/architecture.md | 53 ++++++++ docs/codedocs/distributed-debounce.md | 94 ++++++++++++++ docs/codedocs/guides/critical-section.md | 107 ++++++++++++++++ docs/codedocs/guides/serverless-debounce.md | 98 +++++++++++++++ docs/codedocs/index.md | 109 ++++++++++++++++ docs/codedocs/lease-and-retry.md | 78 ++++++++++++ docs/codedocs/lock-lifecycle.md | 114 +++++++++++++++++ docs/codedocs/types.md | 68 ++++++++++ 10 files changed, 930 insertions(+) create mode 100644 docs/codedocs/api-reference/debounce.md create mode 100644 docs/codedocs/api-reference/lock.md create mode 100644 docs/codedocs/architecture.md create mode 100644 docs/codedocs/distributed-debounce.md create mode 100644 docs/codedocs/guides/critical-section.md create mode 100644 docs/codedocs/guides/serverless-debounce.md create mode 100644 docs/codedocs/index.md create mode 100644 docs/codedocs/lease-and-retry.md create mode 100644 docs/codedocs/lock-lifecycle.md create mode 100644 docs/codedocs/types.md diff --git a/docs/codedocs/api-reference/debounce.md b/docs/codedocs/api-reference/debounce.md new file mode 100644 index 0000000..9a51266 --- /dev/null +++ b/docs/codedocs/api-reference/debounce.md @@ -0,0 +1,78 @@ +--- +title: "Debounce" +description: "API reference for the Debounce class implemented in src/debounce.ts." +--- + +The `Debounce` class provides a distributed debounce across instances using a shared Redis counter. It is implemented in `src/debounce.ts`. In this repository, it is not re-exported from `src/index.ts`, so verify your package exports before importing from the root. + +## Constructor +```typescript +new Debounce(config: DebounceConfig) +``` + +| Parameter | Type | Default | Description | +| --- | --- | --- | --- | +| `id` | `string` | — | Unique Redis key for the debounce window. | +| `redis` | `Redis` | — | Upstash Redis client instance. | +| `wait` | `number` | `1000` | Window size in milliseconds. | +| `callback` | `(...args: any[]) => any` | — | Function to run after the debounce window. | + +**Example** +```typescript filename="debounce-ctor.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const debounced = new Debounce({ + id: "events:search", + redis: Redis.fromEnv(), + wait: 1000, + callback: (query: string) => { + console.log("search", query); + }, +}); +``` + +## Methods + +### `call` +Triggers the debounce flow. The callback runs only if this call is still the most recent after `wait` milliseconds. + +```typescript +call(...args: any[]): Promise +``` + +**Example** +```typescript filename="debounce-call.ts" +await debounced.call("upstash"); +await debounced.call("lock"); +``` + +## Behavior Notes +`call()` always waits the full `wait` duration before deciding whether to execute the callback. If you need immediate execution on the first call and suppression of subsequent calls, you should wrap the callback with your own “leading edge” logic. The implementation is intentionally minimal and uses Redis `INCR` and `GET` only, which keeps the number of round-trips low but provides no visibility into how many calls were suppressed. Also note that the last invocation wins: the arguments used to execute the callback are the ones provided by the most recent call that survives the window. + +**Example: passing multiple arguments** +```typescript filename="debounce-multi-args.ts" +await debounced.call({ id: "evt_1" }, "webhook"); +``` + +## Usage Pattern +```typescript filename="debounce-pattern.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); +const debounced = new Debounce({ + id: "webhooks:ingest", + redis, + wait: 2000, + callback: async (payload: { id: string }) => { + await ingestWebhook(payload); + }, +}); + +export async function handleWebhook(payload: { id: string }) { + await debounced.call(payload); +} +``` + +Related pages: [Distributed Debounce](../distributed-debounce), [Types](../types). diff --git a/docs/codedocs/api-reference/lock.md b/docs/codedocs/api-reference/lock.md new file mode 100644 index 0000000..a1bfcc0 --- /dev/null +++ b/docs/codedocs/api-reference/lock.md @@ -0,0 +1,131 @@ +--- +title: "Lock" +description: "API reference for the Lock class in @upstash/lock." +--- + +The `Lock` class provides a best-effort distributed lock backed by Upstash Redis. It is defined in `src/lock.ts` and re-exported from `src/index.ts`. + +## Constructor +```typescript +new Lock(config: LockCreateConfig) +``` + +| Parameter | Type | Default | Description | +| --- | --- | --- | --- | +| `id` | `string` | — | Unique Redis key for the lock. Use a stable, namespaced identifier. | +| `redis` | `Redis` | — | Upstash Redis client instance. | +| `lease` | `number` | `10000` | Lease duration in milliseconds. | +| `retry.attempts` | `number` | `3` | Number of acquire attempts before giving up. | +| `retry.delay` | `number` | `100` | Delay between attempts in milliseconds. | + +**Example** +```typescript filename="lock-ctor.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const lock = new Lock({ + id: "jobs:cleanup", + redis: Redis.fromEnv(), + lease: 5_000, + retry: { attempts: 2, delay: 250 }, +}); +``` + +## Methods + +### `acquire` +Attempts to acquire the lock. Returns `true` on success, `false` otherwise. + +```typescript +acquire(config?: LockAcquireConfig): Promise +``` + +| Parameter | Type | Default | Description | +| --- | --- | --- | --- | +| `config.lease` | `number \| undefined` | — | Overrides the instance lease for this acquisition. | +| `config.retry.attempts` | `number \| undefined` | — | Overrides retry attempts for this acquisition. | +| `config.retry.delay` | `number \| undefined` | — | Overrides retry delay for this acquisition. | +| `config.uuid` | `string \| undefined` | — | UUID to use instead of `crypto.randomUUID()`. | + +**Example** +```typescript filename="lock-acquire.ts" +const acquired = await lock.acquire({ + lease: 15_000, + retry: { attempts: 5, delay: 200 }, +}); +``` + +### `release` +Safely releases the lock if the UUID matches the stored Redis value. Returns `true` if the key was deleted. + +```typescript +release(): Promise +``` + +**Example** +```typescript filename="lock-release.ts" +try { + await doWork(); +} finally { + await lock.release(); +} +``` + +### `extend` +Extends the current lease by the given amount in milliseconds. Returns `true` if the TTL was updated. + +```typescript +extend(amt: number): Promise +``` + +**Example** +```typescript filename="lock-extend.ts" +const ok = await lock.extend(10_000); +if (!ok) { + throw new Error("lost lock"); +} +``` + +### `getStatus` +Returns `"ACQUIRED"` if the lock’s UUID matches Redis, otherwise `"FREE"`. + +```typescript +getStatus(): Promise +``` + +**Example** +```typescript filename="lock-status.ts" +const status = await lock.getStatus(); +if (status === "FREE") { + console.log("not held"); +} +``` + +### `id` +Read-only property returning the Redis key for this lock. + +```typescript +const key: string = lock.id; +``` + +## Usage Pattern +```typescript filename="lock-pattern.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export async function runOnce() { + const lock = new Lock({ id: "jobs:daily", redis, lease: 10_000 }); + + if (!(await lock.acquire())) return; + + try { + await doWork(); + } finally { + await lock.release(); + } +} +``` + +Related pages: [Lock Lifecycle](../lock-lifecycle), [Lease and Retry](../lease-and-retry), [Types](../types). diff --git a/docs/codedocs/architecture.md b/docs/codedocs/architecture.md new file mode 100644 index 0000000..a24d411 --- /dev/null +++ b/docs/codedocs/architecture.md @@ -0,0 +1,53 @@ +--- +title: "Architecture" +description: "Internal structure of @upstash/lock and how locking and debouncing operations flow through Redis." +--- + +This library is intentionally small. The implementation lives in two core classes and a shared types module, with a single entry point that re-exports the public surface. The design prioritizes predictable Redis operations and minimal moving parts so the behavior is easy to reason about. + +```mermaid +graph TD + A[index.ts] --> B[Lock] + A --> C[Types] + B --> D[Redis SET NX PX] + B --> E[Redis EVAL: release] + B --> F[Redis EVAL: extend] + C --> B + G[Debounce] --> H[Redis INCR] + G --> I[Redis GET] +``` + +**Key Design Decisions** +- **Single-key lock per critical section.** In `src/lock.ts`, each lock instance operates on exactly one Redis key (`config.id`). This keeps state minimal and allows `SET NX PX` to be the authoritative acquisition check. The key name becomes the coordination point for all instances. +- **UUID ownership tracking.** The lock stores a UUID in Redis when acquired and keeps the same UUID in memory (`config.UUID`). `release()` and `extend()` use that UUID to guard against releasing or extending another instance’s lock. You can see this in the Lua scripts inside `src/lock.ts`. +- **Best-effort safety, not consensus.** The README explicitly discourages using this for correctness guarantees like leader election. The code mirrors that: no fencing tokens, no quorum, and no clock synchronization. This is a pragmatic trade-off for simplicity and serverless suitability. +- **Lua for atomic operations.** Redis does not provide a built-in “compare-and-delete” or “compare-and-extend” command. The library uses `eval` scripts to ensure those operations are atomic (`release()` and `extend()` in `src/lock.ts`). +- **Distributed debounce via a counter.** `src/debounce.ts` uses `INCR` and `GET` with a wait delay to determine the “last” invocation in a window. It is intentionally simple and does not track per-caller state. + +**How the Pieces Fit Together** +The public entry point is `src/index.ts`, which re-exports the `Lock` class and all exported types from `src/types.ts`. The `Lock` class is the primary API. `Debounce` is defined in `src/debounce.ts` and uses the same Redis client type, but it is not re-exported from `src/index.ts` in this repository. + +**Lock lifecycle data flow** +1. The caller constructs a `Lock` with a Redis client and key name. The constructor normalizes defaults (`lease`, `retry.attempts`, `retry.delay`) in `src/lock.ts`. +2. `acquire()` attempts a Redis `SET` with `NX` (only set if the key does not exist) and `PX` (set TTL in milliseconds). If it returns `OK`, the lock is acquired and the UUID is stored in memory. +3. If acquisition fails, the method waits `retry.delay` milliseconds and tries again up to `retry.attempts` times. +4. `release()` executes a Lua script that deletes the key only if the stored UUID matches the instance UUID, preventing accidental release by a different instance. +5. `extend()` executes a Lua script that reads the current TTL and extends it by a requested amount only if ownership still matches. + +**Debounce data flow** +1. Each call to `Debounce.call()` increments a counter at the debounce key using `INCR`. +2. The method sleeps for the configured `wait` duration. +3. It reads the current value from Redis and compares it to the value returned by `INCR` earlier. If the value changed, another call happened, so the callback is skipped. If it is unchanged, the callback runs. + +**Why this architecture works for serverless** +- No in-memory coordination; all coordination happens in Redis and survives cold starts. +- One key per lock/debounce, so the Redis footprint is predictable. +- A single `Redis` client instance (from `@upstash/redis`) is sufficient across all operations. + +If you want to see how these primitives map to user workflows, the next pages cover the lock lifecycle and debounce mechanics in detail. + + + How acquisition, release, and status checking work internally. + Configuration trade-offs that impact reliability. + How the debounce counter algorithm works. + diff --git a/docs/codedocs/distributed-debounce.md b/docs/codedocs/distributed-debounce.md new file mode 100644 index 0000000..af0c1a4 --- /dev/null +++ b/docs/codedocs/distributed-debounce.md @@ -0,0 +1,94 @@ +--- +title: "Distributed Debounce" +description: "How the shared counter algorithm collapses bursts of calls across instances." +--- + +A distributed debounce ensures that a callback runs only once after a burst of calls, even when those calls happen on different machines or serverless invocations. The implementation in `src/debounce.ts` uses a single Redis counter key to decide which call gets to run the callback. + +```mermaid +sequenceDiagram + participant A as Instance A + participant B as Instance B + participant Redis as Upstash Redis + A->>Redis: INCR debounce:key (returns 41) + B->>Redis: INCR debounce:key (returns 42) + A->>A: wait 1000ms + B->>B: wait 1000ms + A->>Redis: GET debounce:key (returns 42) + A-->>A: skip callback + B->>Redis: GET debounce:key (returns 42) + B-->>B: run callback +``` + +**What the concept is** +A distributed debounce is a time-windowed gating mechanism. It does not enforce mutual exclusion; instead, it ensures only the last call in a window executes a callback. This is useful for collapsing bursts of events like webhooks, user typing, or repetitive cron triggers. + +**Why it exists** +In serverless and multi-instance environments, local debounce utilities do not coordinate across instances. This implementation uses Redis as a shared counter so that all callers can observe the same “latest” invocation. + +**How it works internally** +- `call()` increments a counter with `INCR` and stores the returned integer in a local variable (`thisTaskIncr`). +- It sleeps for the configured `wait` duration using `setTimeout`. +- After the wait, it reads the current counter with `GET`. +- If the counter value is still equal to `thisTaskIncr`, this call was the most recent, and it executes the callback. If the counter changed, a newer call happened and this call exits without running the callback. +- The code is intentionally minimal and relies on Redis atomicity for `INCR` and consistent `GET` reads. + +**How it relates to other concepts** +- The **Lock Lifecycle** concept guarantees mutual exclusion per key. Debounce does not and does not track ownership. +- The **Lease and Retry** concept is about acquiring a lock under contention. Debounce is about deferring execution until a quiet period. + +**Basic usage** +```typescript filename="debounce-basic.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +const debounced = new Debounce({ + id: "events:search", + redis, + wait: 1000, + callback: (query: string) => { + console.log("search", query); + }, +}); + +await debounced.call("upstash"); +``` + +**Advanced usage: async callback and multiple arguments** +```typescript filename="debounce-advanced.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +const debounced = new Debounce({ + id: "events:ingest", + redis, + wait: 1500, + callback: async (payload: { id: string }, source: string) => { + await writeToWarehouse(payload, source); + }, +}); + +await debounced.call({ id: "evt_123" }, "webhook"); +await debounced.call({ id: "evt_456" }, "webhook"); +``` + + +In this repository, `Debounce` is defined in `src/debounce.ts` but not re-exported from `src/index.ts`. If your published package does not expose `Debounce`, you will need to import it from the build output or upgrade to a version that re-exports it. Verify your version’s exports before relying on this API. + + + +The callback always runs after a full `wait` delay, even for the last call. If you need immediate execution followed by suppression, this is not the right algorithm. + + + + +A counter-based debounce is very cheap: one `INCR` and one `GET` per call. It does not require clock synchronization or server time, which makes it a good fit for serverless runtimes. The trade-off is that it cannot tell you how much time has passed since the last call, only whether a newer call happened in the window. A timestamp-based approach can provide richer metrics but is more complex and needs additional logic to handle clock skew. + + +Using one shared key for all debounced actions is simple but will merge unrelated events into a single debounce window. For real applications, you should use a key that includes the logical scope, such as a user ID, job type, or tenant. This increases Redis key count but avoids accidental suppression across unrelated work. Keep keys short and predictable to make monitoring easier. + + diff --git a/docs/codedocs/guides/critical-section.md b/docs/codedocs/guides/critical-section.md new file mode 100644 index 0000000..8b127d3 --- /dev/null +++ b/docs/codedocs/guides/critical-section.md @@ -0,0 +1,107 @@ +--- +title: "Protect a Critical Section" +description: "Use a lock to ensure an expensive task runs once across multiple instances." +--- + +This guide shows how to guard a critical section so only one instance performs the work at a time. The example uses a background job that may be triggered by multiple servers, but must run at most once per minute. + +**Problem** +Multiple workers may run the same job concurrently, doubling cost and causing conflicting updates. + +**Solution** +Use a Redis-backed lock with a lease and a safe release sequence to ensure only one worker proceeds. + + + +### Initialize Redis and the lock +```typescript filename="job-lock.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export function createJobLock(jobName: string) { + return new Lock({ + id: `jobs:${jobName}`, + redis, + lease: 20_000, + retry: { attempts: 4, delay: 250 }, + }); +} +``` + + +### Acquire the lock before doing work +```typescript filename="run-job.ts" +import { createJobLock } from "./job-lock"; + +export async function runJobOnce() { + const lock = createJobLock("daily-report"); + const acquired = await lock.acquire(); + + if (!acquired) { + return { ok: false, reason: "busy" }; + } + + try { + await generateReport(); + return { ok: true }; + } finally { + await lock.release(); + } +} +``` + + +### Extend the lock for long tasks +```typescript filename="run-job-extend.ts" +import { createJobLock } from "./job-lock"; + +export async function runLongJob() { + const lock = createJobLock("daily-report"); + const acquired = await lock.acquire(); + if (!acquired) return; + + try { + const extendInterval = setInterval(async () => { + await lock.extend(10_000); + }, 5_000); + + await generateLargeReport(); + clearInterval(extendInterval); + } finally { + await lock.release(); + } +} +``` + + + +**Complete runnable example** +```typescript filename="cli.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +async function main() { + const lock = new Lock({ id: "jobs:daily-report", redis, lease: 15_000 }); + + if (!(await lock.acquire())) { + console.log("busy"); + return; + } + + try { + console.log("running"); + await new Promise((r) => setTimeout(r, 1000)); + } finally { + await lock.release(); + console.log("done"); + } +} + +main(); +``` + +This pattern makes the job resilient to concurrency without building a full queueing system. Use unique, stable lock IDs and always release in a `finally` block. If your tasks are long-running, use periodic `extend()` calls to keep the lease alive and reduce the chance of overlapping runs. diff --git a/docs/codedocs/guides/serverless-debounce.md b/docs/codedocs/guides/serverless-debounce.md new file mode 100644 index 0000000..cbad9a5 --- /dev/null +++ b/docs/codedocs/guides/serverless-debounce.md @@ -0,0 +1,98 @@ +--- +title: "Debounce Serverless Events" +description: "Collapse bursts of events across instances with a distributed debounce." +--- + +This guide demonstrates how to debounce repeated webhook or API calls that may arrive in bursts. The goal is to execute the expensive processing only once per quiet period, even when multiple serverless instances are involved. + +**Problem** +Webhook providers sometimes retry quickly or send multiple events in a short period. Running the same expensive processing for every event wastes resources and can trigger rate limits downstream. + +**Solution** +Use the distributed debounce utility so only the last event in a time window triggers the callback. The callback receives the last call’s arguments. + + + +### Create a debounced handler +```typescript filename="debounce-handler.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export function createDebouncedHandler() { + return new Debounce({ + id: "webhooks:ingest", + redis, + wait: 2000, + callback: async (payload: { id: string; type: string }) => { + await ingestWebhook(payload); + }, + }); +} +``` + + +### Use it in an API route +```typescript filename="route.ts" +import { createDebouncedHandler } from "./debounce-handler"; + +const debounced = createDebouncedHandler(); + +export async function POST(req: Request) { + const payload = await req.json(); + + await debounced.call(payload); + return new Response("accepted", { status: 202 }); +} +``` + + +### Tune the debounce window +```typescript filename="debounce-config.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +// Longer wait reduces duplicate work but increases latency +export const debounced = new Debounce({ + id: "webhooks:ingest", + redis, + wait: 5_000, + callback: async (payload: { id: string; type: string }) => { + await ingestWebhook(payload); + }, +}); +``` + + + +**Complete runnable example** +```typescript filename="server.ts" +import { Debounce } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; +import { serve } from "bun"; + +const redis = Redis.fromEnv(); +const debounced = new Debounce({ + id: "events:burst", + redis, + wait: 1500, + callback: async (payload: any) => { + console.log("processing", payload.id); + }, +}); + +serve({ + port: 3000, + async fetch(req) { + if (req.method !== "POST") return new Response("method not allowed", { status: 405 }); + const payload = await req.json(); + await debounced.call(payload); + return new Response("queued", { status: 202 }); + }, +}); +``` + +This approach keeps your handler fast and reduces redundant work. Use a key that scopes the debounce window appropriately; if you need per-user debouncing, include the user ID in the key. diff --git a/docs/codedocs/index.md b/docs/codedocs/index.md new file mode 100644 index 0000000..664b249 --- /dev/null +++ b/docs/codedocs/index.md @@ -0,0 +1,109 @@ +--- +title: "Upstash Lock" +description: "Distributed locking and debouncing utilities backed by Upstash Redis for best-effort mutual exclusion and cross-instance rate control." +--- + +@upstash/lock provides a small, focused set of primitives for distributed locking and debouncing on top of Upstash Redis. + +**The Problem** +- Multiple app instances can run the same expensive work at the same time, wasting resources. +- Coordinating a critical section across serverless or multi-region deployments is hard without a shared state. +- Local in-process locks and debouncers do not work across horizontally scaled deployments. +- You need a best-effort lock with a bounded lease, not heavy-weight consensus. + +**The Solution** +The library uses Redis atomic commands and Lua scripts to implement a best-effort lock with an expiring lease and a distributed debounce that collapses bursts across instances. + +```typescript filename="app.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export async function runJob() { + const lock = new Lock({ + id: "jobs:nightly-report", + redis, + lease: 15_000, + }); + + if (await lock.acquire()) { + try { + await generateReport(); + } finally { + await lock.release(); + } + } +} +``` + +**Installation** + + +```bash +npm install @upstash/lock @upstash/redis +``` + + +```bash +pnpm add @upstash/lock @upstash/redis +``` + + +```bash +yarn add @upstash/lock @upstash/redis +``` + + +```bash +bun add @upstash/lock @upstash/redis +``` + + + +**Quick Start** +```typescript filename="quickstart.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +async function main() { + const lock = new Lock({ id: "example:lock", redis }); + const acquired = await lock.acquire(); + + if (!acquired) { + console.log("lock-busy"); + return; + } + + try { + console.log("lock-acquired"); + // your critical section + } finally { + await lock.release(); + console.log("lock-released"); + } +} + +main(); +``` + +Expected output: +``` +lock-acquired +lock-released +``` + +**Key Features** +- Best-effort distributed lock with Redis `SET NX PX` semantics. +- Safe release and lease extension via Lua scripts. +- Retry configuration for acquisition attempts and delays. +- Distributed debounce built on a shared Redis counter. +- Minimal surface area that works in serverless and edge runtimes. + + + How modules interact and how data flows through Redis. + Understand the lock lifecycle and lease behavior. + Full details of public classes and methods. + diff --git a/docs/codedocs/lease-and-retry.md b/docs/codedocs/lease-and-retry.md new file mode 100644 index 0000000..6f45e22 --- /dev/null +++ b/docs/codedocs/lease-and-retry.md @@ -0,0 +1,78 @@ +--- +title: "Lease and Retry" +description: "How lease duration and retry policy shape lock behavior and reliability." +--- + +The lock’s behavior is controlled primarily by two knobs: **lease duration** and **retry policy**. These affect how long a lock is held, how quickly others can recover from a crash, and how much load you place on Redis when contending for the same key. + +```mermaid +flowchart TD + A[Acquire called] --> B{SET NX PX success?} + B -->|Yes| C[Store UUID in memory] + B -->|No| D[Wait retry.delay] + D --> E{Attempts remaining?} + E -->|Yes| B + E -->|No| F[Return false] +``` + +**What the concept is** +A **lease** is the TTL applied to the lock key when it is acquired. A **retry policy** is the number of attempts and delay between attempts when acquisition fails. These values are configured in the `Lock` constructor and can be overridden for a specific `acquire()` call. Both are defined in `src/types.ts` and applied in `src/lock.ts`. + +**Why it exists** +Distributed locks need a safety valve: if the owner crashes, the lock should eventually become free. That is the role of a lease. Retries provide a controlled way to contend for the lock without hammering Redis or introducing tight loops. + +**How it works internally** +- The constructor sets default values for `lease`, `retry.attempts`, and `retry.delay` (`DEFAULT_LEASE_MS = 10000`, `DEFAULT_RETRY_ATTEMPTS = 3`, `DEFAULT_RETRY_DELAY_MS = 100`) in `src/lock.ts`. +- `acquire()` reads optional overrides from `LockAcquireConfig`, then mutates the instance configuration to reflect the final lease. +- Each failed attempt uses `setTimeout` to sleep for the configured delay. This is a simple backoff mechanism; it is not exponential and does not jitter. +- The lease only affects Redis. The local instance does not track remaining TTL except when `extend()` is called. + +**How it relates to other concepts** +- The **Lock Lifecycle** concept shows where lease and retry are applied in the overall flow. +- The **Distributed Debounce** concept also relies on a wait period, but it is tied to debounce windows rather than ownership leases. + +**Basic usage: use defaults** +```typescript filename="defaults.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const lock = new Lock({ id: "batch:cleanup", redis: Redis.fromEnv() }); +await lock.acquire(); +``` + +**Advanced usage: per-call overrides** +```typescript filename="overrides.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); +const lock = new Lock({ + id: "batch:cleanup", + redis, + lease: 5_000, + retry: { attempts: 2, delay: 250 }, +}); + +// Temporarily use a longer lease and more retries for a burst period +const acquired = await lock.acquire({ + lease: 20_000, + retry: { attempts: 8, delay: 200 }, +}); +``` + + +Very short leases can cause your lock to expire mid-task. If another instance acquires the lock after expiry, both may run concurrently. Choose a lease that safely exceeds your typical task duration or call `extend()` periodically. + + + +Retrying with very small delays can spike Redis traffic under contention. This library does not implement exponential backoff or jitter; if you need that, add it in your caller and keep `retry.attempts` low. + + + + +A short lease improves recovery when a worker crashes because the lock becomes available quickly. The downside is an increased chance of accidental overlap if your task runs longer than expected. A long lease reduces overlap but increases the window where a crashed worker blocks others. If you cannot confidently estimate task duration, consider a medium lease with a periodic `extend()` call to refresh ownership while the task is healthy. + + +More retries increase the chance you will eventually acquire the lock, but they also reduce fairness for other contenders and can cause bursts of contention. Fewer retries reduce contention but may cause callers to give up too quickly and skip work. Because the retry delay is constant, synchronized callers may retry together and create waves; staggering retries in your calling code can improve fairness. For busy keys, treat the lock as a hint and accept that you might not acquire it every time. + + diff --git a/docs/codedocs/lock-lifecycle.md b/docs/codedocs/lock-lifecycle.md new file mode 100644 index 0000000..21f4d85 --- /dev/null +++ b/docs/codedocs/lock-lifecycle.md @@ -0,0 +1,114 @@ +--- +title: "Lock Lifecycle" +description: "How a lock is acquired, extended, released, and verified in @upstash/lock." +--- + +A lock in this library is a single Redis key whose value is a UUID owned by one instance. The lifecycle centers around acquiring that key with a lease, keeping the UUID in memory, and using Lua scripts to ensure safe release and extension. + +```mermaid +sequenceDiagram + participant App as App Instance + participant Redis as Upstash Redis + App->>Redis: SET key uuid NX PX lease + alt OK + Redis-->>App: OK + App->>Redis: EVAL release.lua (key, uuid) + Redis-->>App: 1 or 0 + else busy + Redis-->>App: null + App->>App: wait retry.delay + App->>Redis: SET key uuid NX PX lease + end +``` + +**What the lock is** +A `Lock` is an object created with a unique `id` (the Redis key) and a `redis` client. Internally, the instance tracks the lease duration, retry behavior, and the UUID of the current ownership. This is defined in `src/lock.ts`, using types from `src/types.ts`. + +**Why it exists** +Distributed systems often need a “best-effort” mutual exclusion mechanism to avoid double work. This library gives you that in a small, serverless-friendly package. It is not a consensus system; it is a coordination helper for tasks that can tolerate occasional overlap during failures. + +**How it works internally** +- `acquire()` generates or accepts a UUID and tries `SET key uuid NX PX lease`. Only the first caller wins. If `SET` does not return `OK`, it waits and retries according to `retry.attempts` and `retry.delay`. +- On success, the UUID is stored on the instance (`config.UUID`). This value is the only proof of ownership the lock has. +- `release()` runs a Lua script that checks if the stored UUID matches the Redis value and deletes the key if it does. This prevents another instance from accidentally releasing your lock. +- `extend()` runs a Lua script that checks ownership, reads the current TTL, and extends it by the provided amount. The script uses `TTL` and `EXPIRE` to apply the new lease length. +- `getStatus()` compares the stored UUID to the value in Redis. If they match, the lock is still acquired; otherwise it is considered free. + +**How it relates to other concepts** +- The **Lease and Retry** concept explains how lease duration and retry policy affect lock reliability and resource usage. +- The **Distributed Debounce** concept uses Redis but does not track ownership; it solves a different problem and does not require a UUID. + +**Basic usage** +```typescript filename="basic-lock.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export async function runOnce() { + const lock = new Lock({ id: "jobs:daily", redis, lease: 10_000 }); + + if (!(await lock.acquire())) { + return "skipped"; + } + + try { + await doWork(); + return "done"; + } finally { + await lock.release(); + } +} +``` + +**Advanced usage: custom UUID and status checks** +```typescript filename="advanced-lock.ts" +import { Lock } from "@upstash/lock"; +import { Redis } from "@upstash/redis"; + +const redis = Redis.fromEnv(); + +export async function guardedJob(jobId: string) { + const lock = new Lock({ + id: `jobs:${jobId}`, + redis, + lease: 5_000, + retry: { attempts: 5, delay: 250 }, + }); + + const acquired = await lock.acquire({ uuid: `job-${jobId}` }); + if (!acquired) { + return { ok: false, reason: "busy" }; + } + + const status = await lock.getStatus(); + if (status !== "ACQUIRED") { + await lock.release(); + return { ok: false, reason: "lost" }; + } + + try { + await doWork(); + return { ok: true }; + } finally { + await lock.release(); + } +} +``` + + +If your runtime does not provide `crypto.randomUUID()`, you must pass `uuid` in `acquire()` or the call will throw. This is common in older Node versions or constrained runtimes. Always verify your runtime has a UUID source before relying on the default. + + + +A lock can be acquired by more than one client during network partitions or replication lag. Do not use this to guarantee correctness for leader election or exactly-once processing. + + + + +This implementation favors simplicity and latency over quorum-based consensus. A single key means one round-trip to Redis and a small state surface. In return, it does not protect against split-brain scenarios, and it cannot provide the formal safety of a Redlock-style algorithm. If your system requires strict correctness, you should use a consensus system or a properly audited Redlock implementation. For cost-sensitive or best-effort tasks, the single-key approach is easier to operate and more predictable. + + +Leases prevent indefinite lock ownership during crashes, but they also mean long tasks may outlive the lease. The `extend()` method is a manual way to deal with that, yet it requires that your worker keeps running and can communicate with Redis. A short lease reduces the risk of dead locks but increases the chance of premature expiry; a long lease reduces churn but increases recovery time after a crash. Consider a heartbeat or periodic `extend()` if your task duration is variable. + + diff --git a/docs/codedocs/types.md b/docs/codedocs/types.md new file mode 100644 index 0000000..76bebeb --- /dev/null +++ b/docs/codedocs/types.md @@ -0,0 +1,68 @@ +--- +title: "Types" +description: "Exported TypeScript types and what they represent in @upstash/lock." +--- + +This page lists the exported TypeScript types from `src/types.ts` and explains how to use them. These types describe the configuration objects and return values used by the `Lock` and `Debounce` classes. + +## Type Definitions +```typescript +import type { Redis } from "@upstash/redis"; + +export type RetryConfig = { + attempts: number; + delay: number; +}; + +export type LockAcquireConfig = { + lease?: number; + retry?: RetryConfig; + uuid?: string; +}; + +export type LockConfig = { + redis: Redis; + id: string; + lease: number; + UUID: string | null; + retry: RetryConfig; +}; + +export type LockCreateConfig = { + id: string; + redis: Redis; + lease?: number; + retry?: RetryConfig; +}; + +export type LockStatus = "ACQUIRED" | "FREE"; + +export type DebounceConfig = { + redis: Redis; + id: string; + wait: number; + callback: (...args: any[]) => any; +}; +``` + +## Explanations + +**`RetryConfig`** +Use this to tune acquisition behavior under contention. `attempts` is how many times `acquire()` will try before giving up. `delay` is the pause between attempts in milliseconds. These are wired directly into the retry loop in `src/lock.ts`. + +**`LockAcquireConfig`** +This is an optional override for a single acquisition. It lets you change the lease length and retry policy for one call, and it optionally accepts a `uuid` if the runtime does not provide `crypto.randomUUID()`. If `uuid` is omitted and the runtime lacks UUID support, `acquire()` throws an error. + +**`LockConfig`** +This is the internal normalized configuration held by the `Lock` instance. It includes the computed lease and retry values, and a `UUID` field which is `null` until a lock is acquired. You typically do not construct this type directly, but it describes the internal state a `Lock` maintains. + +**`LockCreateConfig`** +This is the public constructor configuration for `Lock`. It requires a Redis client and key `id`, and optionally overrides the default lease and retry settings. This type is the one you will use most frequently in application code. + +**`LockStatus`** +The status returned by `getStatus()` is a simple union of `"ACQUIRED"` and `"FREE"`. It checks whether the local UUID matches the Redis key’s value at the time of the call. + +**`DebounceConfig`** +This config defines a debounce window and callback. It is used by `Debounce` to share a counter across instances. The `callback` can be synchronous or async and is called with the arguments provided to `call()`. + +Related pages: [Lock](./api-reference/lock), [Debounce](./api-reference/debounce), [Lock Lifecycle](./lock-lifecycle).