Loading content...
The transactional outbox solves the atomicity problem—your business data and events are safely committed together. But there's still a critical question: How does the event get from the outbox table to the message broker?
This is the 'last mile' of the Outbox Pattern, and there are two fundamentally different approaches:
Both approaches work. Both have been used successfully in production at massive scale. But they have dramatically different characteristics in terms of latency, operational complexity, resource consumption, and failure modes. Choosing the right approach depends on your specific requirements and constraints.
By the end of this page, you will understand both polling and CDC approaches in depth, their trade-offs, when to use each, and how to implement production-grade versions of both. You'll also learn hybrid approaches that combine the best of both worlds.
The polling approach is the simpler of the two options. A publisher service runs in a loop, periodically querying the outbox table for events that haven't been published yet.
Basic Flow:
┌──────────────────────────────────────────────────────────────────┐
│ POLLING ARCHITECTURE │
│ │
│ ┌─────────────┐ ┌─────────────────────────────────────┐ │
│ │ Service │ │ Database │ │
│ │ (Orders) │─────▶│ ┌─────────┐ ┌──────────────┐ │ │
│ └─────────────┘ │ │ orders │ │ outbox │ │ │
│ │ └─────────┘ └──────────────┘ │ │
│ └───────────────────────┬─────────────┘ │
│ │ │
│ │ SELECT ... WHERE │
│ │ published_at IS │
│ │ NULL │
│ │ │
│ ┌───────────────────────▼─────────────┐ │
│ │ Outbox Publisher │ │
│ │ (polls every N milliseconds) │ │
│ └───────────────────────┬─────────────┘ │
│ │ │
│ │ publish() │
│ ▼ │
│ ┌─────────────────────────────────────┐ │
│ │ Message Broker │ │
│ │ (Kafka, RabbitMQ, SQS, etc.) │ │
│ └─────────────────────────────────────┘ │
└──────────────────────────────────────────────────────────────────┘
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
// PRODUCTION-GRADE POLLING PUBLISHER interface PollingPublisherConfig { // Polling intervals pollIntervalMs: number; // Time between polls (when events found) emptyPollDelayMs: number; // Time to wait when no events (backoff) maxBackoffMs: number; // Maximum backoff delay // Batch processing batchSize: number; // Events per poll iteration // Reliability maxRetries: number; // Per-event retry attempts retryDelayMs: number; // Delay between retries // Monitoring publishTimeoutMs: number; // Timeout for broker publish metricsEnabled: boolean; // Enable Prometheus metrics} class PollingOutboxPublisher { private isRunning: boolean = false; private currentBackoff: number; private consecutiveEmptyPolls: number = 0; constructor( private db: Database, private messageBroker: MessageBroker, private config: PollingPublisherConfig, private metrics: MetricsRegistry ) { this.currentBackoff = config.pollIntervalMs; } async start(): Promise<void> { this.isRunning = true; console.log('Outbox publisher started'); while (this.isRunning) { const startTime = Date.now(); try { const processedCount = await this.pollAndPublish(); // Record metrics this.metrics.histogram('outbox_poll_duration_ms').observe( Date.now() - startTime ); this.metrics.counter('outbox_events_published').inc(processedCount); // Adaptive backoff if (processedCount > 0) { // Events found - reset backoff, poll immediately this.currentBackoff = this.config.pollIntervalMs; this.consecutiveEmptyPolls = 0; } else { // No events - exponential backoff this.consecutiveEmptyPolls++; this.currentBackoff = Math.min( this.config.emptyPollDelayMs * Math.pow(1.5, this.consecutiveEmptyPolls), this.config.maxBackoffMs ); } await this.sleep(this.currentBackoff); } catch (error) { console.error('Poll iteration failed:', error); this.metrics.counter('outbox_poll_errors').inc(); // Error backoff await this.sleep(this.config.maxBackoffMs); } } } private async pollAndPublish(): Promise<number> { return await this.db.transaction(async (tx) => { // Set isolation level for consistent reads await tx.query('SET TRANSACTION ISOLATION LEVEL READ COMMITTED'); // Fetch batch of unpublished events with row locking const events = await tx.query<OutboxEvent[]>(` SELECT id, sequence_number, aggregate_type, aggregate_id, event_type, event_version, payload, metadata, correlation_id, created_at FROM outbox_events WHERE published_at IS NULL AND ( publish_attempts < $1 OR last_attempt_at < NOW() - INTERVAL '5 minutes' ) ORDER BY sequence_number ASC LIMIT $2 FOR UPDATE SKIP LOCKED `, [this.config.maxRetries, this.config.batchSize]); if (events.length === 0) { return 0; } this.metrics.gauge('outbox_batch_size').set(events.length); const publishedIds: string[] = []; const failedEvents: Map<string, Error> = new Map(); // Publish each event for (const event of events) { try { await this.publishEvent(event); publishedIds.push(event.id); } catch (error) { failedEvents.set(event.id, error as Error); } } // Mark successfully published events if (publishedIds.length > 0) { await tx.query(` UPDATE outbox_events SET published_at = NOW() WHERE id = ANY($1) `, [publishedIds]); } // Update retry info for failed events for (const [eventId, error] of failedEvents) { await tx.query(` UPDATE outbox_events SET publish_attempts = publish_attempts + 1, last_attempt_at = NOW(), last_error = $1 WHERE id = $2 `, [error.message, eventId]); } return publishedIds.length; }); } private async publishEvent(event: OutboxEvent): Promise<void> { const topic = this.buildTopicName(event); const message = { key: event.aggregateId, value: { eventId: event.id, eventType: event.eventType, eventVersion: event.eventVersion, aggregateType: event.aggregateType, aggregateId: event.aggregateId, payload: JSON.parse(event.payload), metadata: JSON.parse(event.metadata), correlationId: event.correlationId, timestamp: event.createdAt.toISOString() }, headers: { 'x-event-id': event.id, 'x-event-type': event.eventType, 'x-correlation-id': event.correlationId, 'x-sequence-number': String(event.sequenceNumber) } }; // Publish with timeout and required acks await Promise.race([ this.messageBroker.publish(topic, message, { acks: 'all' }), this.timeout(this.config.publishTimeoutMs) ]); } private buildTopicName(event: OutboxEvent): string { return `${event.aggregateType.toLowerCase()}.` + `${this.toKebabCase(event.eventType)}`; } private toKebabCase(str: string): string { return str.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase(); } private timeout(ms: number): Promise<never> { return new Promise((_, reject) => setTimeout(() => reject(new Error('Publish timeout')), ms) ); } private sleep(ms: number): Promise<void> { return new Promise(resolve => setTimeout(resolve, ms)); } async stop(): Promise<void> { this.isRunning = false; console.log('Outbox publisher stopping...'); }} // CONFIGURATION RECOMMENDATIONSconst productionConfig: PollingPublisherConfig = { // Balance latency vs database load pollIntervalMs: 100, // 100ms - responsive emptyPollDelayMs: 500, // 500ms - save resources when idle maxBackoffMs: 5000, // 5s max - don't wait too long // Efficient batch processing batchSize: 100, // Process 100 events per poll // Reliability maxRetries: 5, // Retry failed events 5 times retryDelayMs: 1000, // 1s between retries publishTimeoutMs: 10000, // 10s timeout per publish metricsEnabled: true};Change Data Capture (CDC) takes a fundamentally different approach: instead of polling the outbox table, it streams changes from the database's transaction log (WAL in PostgreSQL, binlog in MySQL). This enables near-real-time event publishing with minimal latency.
How CDC Works:
┌──────────────────────────────────────────────────────────────────┐
│ CDC ARCHITECTURE │
│ │
│ ┌─────────────┐ ┌─────────────────────────────────────┐ │
│ │ Service │ │ Database │ │
│ │ (Orders) │─────▶│ ┌─────────┐ ┌──────────────┐ │ │
│ └─────────────┘ │ │ orders │ │ outbox │ │ │
│ │ └─────────┘ └──────────────┘ │ │
│ │ │ │ │
│ │ Transaction Log (WAL) │ │
│ │ │ │ │
│ └─────────────────────────┼────────────┘ │
│ │ │
│ Logical Replication │
│ │ │
│ ┌─────────────────────────▼────────────┐ │
│ │ CDC Connector │ │
│ │ (Debezium, PostgreSQL CDC, etc.) │ │
│ └─────────────────────────┬────────────┘ │
│ │ │
│ ┌─────────────────────────▼────────────┐ │
│ │ Message Broker │ │
│ │ (Kafka, RabbitMQ, SQS, etc.) │ │
│ └─────────────────────────────────────┘ │
└──────────────────────────────────────────────────────────────────┘
Key Insight: The transaction log IS the source of truth for what changed. CDC tools decode this log and emit events, often within milliseconds of the original commit.
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
{ "name": "orders-outbox-connector", "config": { // Debezium PostgreSQL connector "connector.class": "io.debezium.connector.postgresql.PostgresConnector", // Connection settings "database.hostname": "postgres.example.com", "database.port": "5432", "database.user": "debezium", "database.password": "${file:/secrets/password}", "database.dbname": "orders_db", // Replication slot (crucial for exactly-once) "slot.name": "outbox_slot", "publication.name": "outbox_publication", "plugin.name": "pgoutput", // Capture only outbox table "table.include.list": "public.outbox_events", // ========================================= // OUTBOX EVENT ROUTER (Debezium Transform) // ========================================= // This transforms outbox rows into proper events "transforms": "outbox", "transforms.outbox.type": "io.debezium.transforms.outbox.EventRouter", // Which column contains the event routing key "transforms.outbox.route.by.field": "aggregate_type", // Topic naming: {prefix}.{aggregate_type_column_value} "transforms.outbox.route.topic.replacement": "events.${routedByValue}", // Map outbox columns to event fields "transforms.outbox.table.field.event.id": "id", "transforms.outbox.table.field.event.key": "aggregate_id", "transforms.outbox.table.field.event.type": "event_type", "transforms.outbox.table.field.event.payload": "payload", "transforms.outbox.table.field.event.timestamp": "created_at", // Additional fields to include "transforms.outbox.table.fields.additional.placement": "event_version:envelope:eventVersion,correlation_id:header:correlationId", // Delete outbox row after capture (optional) "transforms.outbox.table.expand.json.payload": "true", // ========================================= // Exactly-once semantics // ========================================= // Transaction metadata for ordering "provide.transaction.metadata": "true", // Snapshot mode (crucial for initial sync) "snapshot.mode": "initial", // Heartbeat for slot progression "heartbeat.interval.ms": "10000", // ========================================= // Performance tuning // ========================================= "max.batch.size": "2048", "max.queue.size": "8192", "poll.interval.ms": "100" }}The Debezium Outbox Event Router
Debezium provides a built-in transformation specifically designed for the Outbox Pattern called the EventRouter. This transformation:
This means the events arriving in Kafka are properly formatted business events, not raw database change events.
1234567891011121314151617181920212223242526272829303132333435
-- POSTGRESQL LOGICAL REPLICATION SETUP FOR CDC -- 1. Enable logical replication in postgresql.conf-- wal_level = logical-- max_replication_slots = 4-- max_wal_senders = 4 -- 2. Create replication user with appropriate permissionsCREATE ROLE debezium_user WITH REPLICATION LOGIN PASSWORD 'secure_password';GRANT CONNECT ON DATABASE orders_db TO debezium_user;GRANT USAGE ON SCHEMA public TO debezium_user;GRANT SELECT ON ALL TABLES IN SCHEMA public TO debezium_user; -- 3. Create publication for outbox table onlyCREATE PUBLICATION outbox_publication FOR TABLE outbox_events; -- 4. Create replication slot (Debezium can do this automatically)SELECT pg_create_logical_replication_slot('outbox_slot', 'pgoutput'); -- 5. Verify setupSELECT * FROM pg_replication_slots WHERE slot_name = 'outbox_slot';SELECT * FROM pg_publication_tables WHERE pubname = 'outbox_publication'; -- IMPORTANT: Monitor slot lag to prevent WAL bloat-- If connector is offline, WAL accumulates! SELECT slot_name, pg_size_pretty(pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)) AS slot_lag, activeFROM pg_replication_slotsWHERE slot_name = 'outbox_slot'; -- If lag grows too large (connector offline), you may need to drop and recreate-- SELECT pg_drop_replication_slot('outbox_slot');| Dimension | Polling | CDC |
|---|---|---|
| Latency | 50-500ms (poll interval dependent) | 1-50ms (near real-time) |
| Database Load | Continuous queries; increases with frequency | Single replication connection; minimal load |
| Infrastructure | None beyond app code | CDC platform (Debezium, Kafka Connect) |
| Complexity | Simple; 100-200 lines of code | Moderate; connector configs, monitoring |
| Ordering Guarantees | Easy per-aggregate ordering | Built-in WAL ordering |
| Exactly-Once | Requires careful implementation | Native support in some configurations |
| Debugging | Easy; standard SQL and application logs | Harder; connector logs, Kafka offsets |
| Scalability | SKIP LOCKED for multiple publishers | Single consumer group; Kafka scales consumers |
| Recovery | Mark events as unpublished; retry | Reset connector offset; replay from WAL |
| Database Dependency | Standard SQL; any relational DB | Specific to DB version and replication protocol |
| Operational Risk | Low; app failure doesn't affect writes | WAL bloat if CDC falls behind |
| Best For | Most applications; simplicity priority | Real-time requirements; existing Kafka infrastructure |
Latency Deep Dive
The latency difference is the primary factor in choosing between polling and CDC:
Time from COMMIT to EVENT DELIVERED
POLLING (100ms interval):
─────────────────────────────────────────────────────────────────►
│←────── 0-100ms poll wait ──────►│←─ publish ─►│
COMMIT POLL DELIVERED
(discovers event)
CDC:
─────────────────────────────────────────────────────────────────►
│←─ 1-10ms ─►│←─ publish ─►│
COMMIT STREAM DELIVERED
(pushed immediately)
When Latency Matters:
When Latency Doesn't Matter:
For 80% of applications, polling is the right choice. It's simpler, easier to operate, and the latency is acceptable. Only reach for CDC when you have a concrete requirement for sub-100ms event delivery AND you have the operational capacity to run CDC infrastructure.
Some architectures benefit from combining polling and CDC elements, or using database-specific features to reduce polling latency without full CDC complexity.
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
// HYBRID APPROACH: PostgreSQL LISTEN/NOTIFY + Polling Fallback// // Use LISTEN/NOTIFY for near-instant notification that events exist,// then poll to actually fetch and process them.// // Benefits:// - Near-real-time latency (as fast as CDC)// - Much simpler than Debezium// - Fallback to polling if notifications missed import { Client, Notification } from 'pg'; class HybridOutboxPublisher { private notificationClient: Client; private isRunning: boolean = false; private pendingNotification: boolean = false; constructor( private db: Database, private messageBroker: MessageBroker, private config: HybridPublisherConfig ) { this.notificationClient = new Client(config.connectionString); } async start(): Promise<void> { // Set up notification listener await this.notificationClient.connect(); await this.notificationClient.query('LISTEN outbox_events_channel'); this.notificationClient.on('notification', (msg: Notification) => { console.log('Received notification:', msg.payload); this.pendingNotification = true; }); this.isRunning = true; // Main processing loop while (this.isRunning) { try { // Process if we have a pending notification OR periodically if (this.pendingNotification) { this.pendingNotification = false; await this.pollAndPublish(); } else { // Fallback poll every few seconds (in case notifications missed) await this.sleep(this.config.fallbackPollIntervalMs); await this.pollAndPublish(); } } catch (error) { console.error('Processing error:', error); await this.sleep(this.config.errorDelayMs); } } } private async pollAndPublish(): Promise<number> { // Same polling logic as before return await this.db.transaction(async (tx) => { const events = await tx.query<OutboxEvent[]>(` SELECT * FROM outbox_events WHERE published_at IS NULL ORDER BY sequence_number LIMIT $1 FOR UPDATE SKIP LOCKED `, [this.config.batchSize]); for (const event of events) { await this.publishEvent(event); await tx.query( 'UPDATE outbox_events SET published_at = NOW() WHERE id = $1', [event.id] ); } // If we got a full batch, immediately poll again if (events.length >= this.config.batchSize) { this.pendingNotification = true; } return events.length; }); } private async publishEvent(event: OutboxEvent): Promise<void> { // ... publish logic ... } private sleep(ms: number): Promise<void> { return new Promise(resolve => setTimeout(resolve, ms)); } async stop(): Promise<void> { this.isRunning = false; await this.notificationClient.end(); }} // DATABASE TRIGGER: Send notification on outbox insert// Run this SQL:/*CREATE OR REPLACE FUNCTION notify_outbox_insert()RETURNS TRIGGER AS $$BEGIN PERFORM pg_notify( 'outbox_events_channel', json_build_object( 'id', NEW.id, 'aggregate_type', NEW.aggregate_type, 'event_type', NEW.event_type )::text ); RETURN NEW;END;$$ LANGUAGE plpgsql; CREATE TRIGGER outbox_insert_trigger AFTER INSERT ON outbox_events FOR EACH ROW EXECUTE FUNCTION notify_outbox_insert();*/1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
// PUSH-THEN-POLL: Attempt immediate publish, fall back to outbox// // For latency-sensitive events: try synchronous publish first,// use outbox only as fallback when publish fails.// // Trade-off: Slightly more complexity for lower latency in happy path class PushThenPollOutboxService { constructor( private db: Database, private messageBroker: MessageBroker, private config: PushThenPollConfig ) {} async createOrderWithPushOptimization( command: CreateOrderCommand ): Promise<Order> { const orderId = uuidv4(); const eventId = uuidv4(); // Prepare the event const event = this.buildOrderCreatedEvent(orderId, eventId, command); // Transaction: save order and outbox event const order = await this.db.transaction(async (tx) => { const order = await tx.orders.create({ id: orderId, ...command, status: 'CREATED' }); // Always write to outbox first (guarantees durability) await tx.outbox.create({ id: eventId, aggregateType: 'Order', aggregateId: orderId, eventType: 'OrderCreated', payload: JSON.stringify(event), publishedAt: null // Will update if push succeeds }); return order; }); // After commit: attempt immediate push (non-blocking) this.attemptImmediatePublish(event, eventId).catch(error => { // Log but don't throw - outbox publisher will handle it console.warn('Immediate publish failed, relying on outbox:', error.message); }); return order; } private async attemptImmediatePublish( event: OrderCreatedEvent, eventId: string ): Promise<void> { // Quick timeout - don't wait long const publishPromise = this.messageBroker.publish( 'order.created', event, { acks: 'all' } ); const timeoutPromise = new Promise<never>((_, reject) => setTimeout(() => reject(new Error('Timeout')), 500) ); await Promise.race([publishPromise, timeoutPromise]); // If we get here, publish succeeded - mark in outbox await this.db.outbox.update({ where: { id: eventId }, data: { publishedAt: new Date() } }); console.log(`Event ${eventId} published immediately`); }} // TRADE-OFFS:// // PROS:// - Lowest latency in success case (event published before response returns)// - Outbox still provides guaranteed delivery on failure// - User gets fastest possible confirmation//// CONS:// - Slight risk of duplicate publish (outbox also publishes before marking)// - More complex code path// - Need idempotent consumers regardless//// RECOMMENDATION:// Only use this if sub-100ms latency is business-critical// (e.g., real-time trading confirmations)| Approach | Latency | Complexity | Best For |
|---|---|---|---|
| Pure Polling | 50-500ms | Low | Most applications |
| LISTEN/NOTIFY + Polling | 1-50ms | Low-Medium | PostgreSQL; need low latency without CDC overhead |
| Push-then-Poll | <10ms success, outbox fallback | Medium | Latency-critical happy path |
| Full CDC (Debezium) | 1-50ms | High | Enterprise; existing Kafka infrastructure |
Both polling and CDC require careful operational attention. Different failure modes require different responses.
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
// ESSENTIAL OUTBOX MONITORING // 1. OUTBOX LAG - Most critical metric// How many seconds old is the oldest unpublished event? const outboxLagQuery = ` SELECT EXTRACT(EPOCH FROM (NOW() - MIN(created_at))) AS lag_seconds, COUNT(*) AS pending_count FROM outbox_events WHERE published_at IS NULL`; // Alert if lag > 60 seconds (adjust based on SLA)// This catches:// - Publisher process died// - Message broker unavailable// - Database connection issues // 2. PUBLICATION RATE - Events per secondconst publicationRateQuery = ` SELECT date_trunc('minute', published_at) AS minute, COUNT(*) AS events_published FROM outbox_events WHERE published_at > NOW() - INTERVAL '1 hour' GROUP BY 1 ORDER BY 1 DESC`; // 3. FAILED EVENTS - Events that couldn't be publishedconst failedEventsQuery = ` SELECT aggregate_type, event_type, COUNT(*) AS failed_count, MAX(publish_attempts) AS max_attempts, MAX(last_error) AS sample_error FROM outbox_events WHERE published_at IS NULL AND publish_attempts >= 3 GROUP BY aggregate_type, event_type`; // 4. OUTBOX TABLE SIZE - Growth monitoringconst outboxSizeQuery = ` SELECT pg_size_pretty(pg_total_relation_size('outbox_events')) AS total_size, COUNT(*) FILTER (WHERE published_at IS NULL) AS pending, COUNT(*) FILTER (WHERE published_at IS NOT NULL) AS published FROM outbox_events`; // Prometheus metrics exampleclass OutboxMetrics { private lagGauge = new Gauge({ name: 'outbox_lag_seconds', help: 'Age of oldest unpublished event in seconds' }); private pendingGauge = new Gauge({ name: 'outbox_pending_events', help: 'Number of events pending publication' }); private publishedCounter = new Counter({ name: 'outbox_events_published_total', help: 'Total events published', labelNames: ['aggregate_type', 'event_type'] }); private errorCounter = new Counter({ name: 'outbox_publish_errors_total', help: 'Total publish errors', labelNames: ['error_type'] }); async collectMetrics(db: Database): Promise<void> { const { lag_seconds, pending_count } = await db.queryOne(outboxLagQuery); this.lagGauge.set(lag_seconds || 0); this.pendingGauge.set(pending_count); }} // CDC-Specific monitoring: Replication slot lagconst replicationSlotLagQuery = ` SELECT slot_name, active, pg_size_pretty( pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) ) AS lag, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS lag_bytes FROM pg_replication_slots WHERE slot_name = 'outbox_slot'`; // CRITICAL: Alert if lag_bytes > 1GB// Replication slots prevent WAL cleanup// If CDC falls behind, WAL grows unbounded → disk full123456789101112131415161718192021222324252627282930313233343536373839
-- OUTBOX CLEANUP: Remove old published events-- Run as scheduled job (cron, pg_cron, etc.) -- Option 1: Simple DELETE (works for small volumes)DELETE FROM outbox_eventsWHERE published_at < NOW() - INTERVAL '7 days'; -- Option 2: Batch DELETE (for high-volume tables)-- Avoids long locks and replication lagDO $$DECLARE deleted_count INTEGER; total_deleted INTEGER := 0;BEGIN LOOP DELETE FROM outbox_events WHERE id IN ( SELECT id FROM outbox_events WHERE published_at < NOW() - INTERVAL '7 days' LIMIT 1000 ); GET DIAGNOSTICS deleted_count = ROW_COUNT; total_deleted := total_deleted + deleted_count; EXIT WHEN deleted_count = 0; COMMIT; -- Release locks between batches PERFORM pg_sleep(0.1); -- Brief pause END LOOP; RAISE NOTICE 'Deleted % old outbox events', total_deleted;END $$; -- Option 3: Partitioned table (best for high volume)-- Drop old partitions instead of DELETE (instantaneous)-- See page 2 for partitioning setup -- Keep at least 7 days for debugging and replay capability-- Adjust retention based on your recovery requirementsYou now understand the two primary delivery strategies for the Outbox Pattern—polling and CDC—along with their trade-offs and hybrid approaches. Next, we'll explore the core problem the Outbox Pattern solves: avoiding dual writes and the various failure modes they create.