From 9e9493ec3ae600a5b9da3a91bf58d6c6aa1fe9f2 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:41:20 +0900 Subject: [PATCH 01/14] =?UTF-8?q?feat:=20ApplicationEvent=20=EA=B8=B0?= =?UTF-8?q?=EB=B0=98=20=EC=9D=B4=EB=B2=A4=ED=8A=B8=20=EB=B6=84=EB=A6=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Event record 6개 생성 (Like/Order/ProductViewed) - EventOutbox Entity + Transactional Outbox 패턴 - LikeFacade: incrementLikeCount 제거 → 이벤트 발행 + Outbox 저장 - LikeCountEventListener: AFTER_COMMIT + async executor + TransactionTemplate - CacheEvictionEventListener: 이벤트 수신 → 캐시 무효화 - AsyncConfig: eventExecutor (core=2, max=4, CallerRunsPolicy) - LikeController: 인라인 캐시 무효화 제거 --- .../loopers/application/like/LikeFacade.java | 36 +++++++++++- .../com/loopers/domain/event/EventOutbox.java | 49 ++++++++++++++++ .../domain/event/EventOutboxRepository.java | 5 ++ .../domain/event/LikeCreatedEvent.java | 4 ++ .../domain/event/LikeRemovedEvent.java | 4 ++ .../domain/event/OrderCancelledEvent.java | 6 ++ .../domain/event/OrderCreatedEvent.java | 6 ++ .../domain/event/OrderItemSnapshot.java | 4 ++ .../domain/event/ProductViewedEvent.java | 4 ++ .../event/EventOutboxJpaRepository.java | 8 +++ .../infrastructure/kafka/AsyncConfig.java | 26 +++++++++ .../interfaces/api/like/LikeController.java | 6 -- .../listener/CacheEvictionEventListener.java | 38 ++++++++++++ .../listener/LikeCountEventListener.java | 58 +++++++++++++++++++ 14 files changed, 246 insertions(+), 8 deletions(-) create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutbox.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutboxRepository.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/LikeCreatedEvent.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/LikeRemovedEvent.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCancelledEvent.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCreatedEvent.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/OrderItemSnapshot.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/ProductViewedEvent.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventOutboxJpaRepository.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/AsyncConfig.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/interfaces/listener/CacheEvictionEventListener.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/interfaces/listener/LikeCountEventListener.java diff --git a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java index e34ec3391..e848ab866 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java @@ -1,15 +1,23 @@ package com.loopers.application.like; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; import com.loopers.domain.like.Like; import com.loopers.domain.like.LikeRepository; import com.loopers.domain.product.ProductRepository; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; +import org.springframework.context.ApplicationEventPublisher; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.util.List; +import java.util.Map; import java.util.Optional; @Service @@ -19,6 +27,9 @@ public class LikeFacade { private final LikeRepository likeRepository; private final ProductRepository productRepository; + private final EventOutboxRepository eventOutboxRepository; + private final ApplicationEventPublisher applicationEventPublisher; + private final ObjectMapper objectMapper; @Transactional public void addLike(Long memberId, Long productId) { @@ -30,7 +41,12 @@ public void addLike(Long memberId, Long productId) { } likeRepository.save(new Like(memberId, productId)); - productRepository.incrementLikeCount(productId); + + EventOutbox outbox = EventOutbox.create("catalog", String.valueOf(productId), + "LIKE_CREATED", buildPayload(productId, memberId)); + eventOutboxRepository.save(outbox); + + applicationEventPublisher.publishEvent(new LikeCreatedEvent(productId, memberId)); } @Transactional @@ -41,10 +57,26 @@ public void removeLike(Long memberId, Long productId) { } likeRepository.delete(likeOpt.get()); - productRepository.decrementLikeCount(productId); + + EventOutbox outbox = EventOutbox.create("catalog", String.valueOf(productId), + "LIKE_REMOVED", buildPayload(productId, memberId)); + eventOutboxRepository.save(outbox); + + applicationEventPublisher.publishEvent(new LikeRemovedEvent(productId, memberId)); } public List getLikesByMemberId(Long memberId) { return likeRepository.findAllByMemberId(memberId); } + + private String buildPayload(Long productId, Long memberId) { + try { + return objectMapper.writeValueAsString(Map.of( + "productId", productId, + "memberId", memberId + )); + } catch (JsonProcessingException e) { + throw new RuntimeException("이벤트 페이로드 직렬화 실패", e); + } + } } diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutbox.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutbox.java new file mode 100644 index 000000000..e730b82d5 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutbox.java @@ -0,0 +1,49 @@ +package com.loopers.domain.event; + +import jakarta.persistence.*; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.NoArgsConstructor; + +import java.time.ZonedDateTime; + +@Entity +@Table(name = "event_outbox") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class EventOutbox { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "aggregate_type", nullable = false, length = 50) + private String aggregateType; + + @Column(name = "aggregate_id", nullable = false, length = 100) + private String aggregateId; + + @Column(name = "event_type", nullable = false, length = 50) + private String eventType; + + @Column(name = "payload", columnDefinition = "TEXT", nullable = false) + private String payload; + + @Column(name = "created_at", nullable = false, updatable = false) + private ZonedDateTime createdAt; + + @PrePersist + private void prePersist() { + this.createdAt = ZonedDateTime.now(); + } + + public static EventOutbox create(String aggregateType, String aggregateId, + String eventType, String payload) { + EventOutbox outbox = new EventOutbox(); + outbox.aggregateType = aggregateType; + outbox.aggregateId = aggregateId; + outbox.eventType = eventType; + outbox.payload = payload; + return outbox; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutboxRepository.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutboxRepository.java new file mode 100644 index 000000000..8e122173c --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/EventOutboxRepository.java @@ -0,0 +1,5 @@ +package com.loopers.domain.event; + +public interface EventOutboxRepository { + EventOutbox save(EventOutbox outbox); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeCreatedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeCreatedEvent.java new file mode 100644 index 000000000..8a7928235 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeCreatedEvent.java @@ -0,0 +1,4 @@ +package com.loopers.domain.event; + +public record LikeCreatedEvent(long productId, long memberId) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeRemovedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeRemovedEvent.java new file mode 100644 index 000000000..56d75adef --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/LikeRemovedEvent.java @@ -0,0 +1,4 @@ +package com.loopers.domain.event; + +public record LikeRemovedEvent(long productId, long memberId) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCancelledEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCancelledEvent.java new file mode 100644 index 000000000..84438dd32 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCancelledEvent.java @@ -0,0 +1,6 @@ +package com.loopers.domain.event; + +import java.util.List; + +public record OrderCancelledEvent(long orderId, long memberId, List items) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCreatedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCreatedEvent.java new file mode 100644 index 000000000..12e5f7b00 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderCreatedEvent.java @@ -0,0 +1,6 @@ +package com.loopers.domain.event; + +import java.util.List; + +public record OrderCreatedEvent(long orderId, long memberId, List items) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderItemSnapshot.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderItemSnapshot.java new file mode 100644 index 000000000..9fc2207f1 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/OrderItemSnapshot.java @@ -0,0 +1,4 @@ +package com.loopers.domain.event; + +public record OrderItemSnapshot(long productId, int quantity, int price) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/ProductViewedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/ProductViewedEvent.java new file mode 100644 index 000000000..279670bb7 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/ProductViewedEvent.java @@ -0,0 +1,4 @@ +package com.loopers.domain.event; + +public record ProductViewedEvent(long productId, long memberId) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventOutboxJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventOutboxJpaRepository.java new file mode 100644 index 000000000..513a1ae3a --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventOutboxJpaRepository.java @@ -0,0 +1,8 @@ +package com.loopers.infrastructure.event; + +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import org.springframework.data.jpa.repository.JpaRepository; + +public interface EventOutboxJpaRepository extends JpaRepository, EventOutboxRepository { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/AsyncConfig.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/AsyncConfig.java new file mode 100644 index 000000000..6b61b126d --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/AsyncConfig.java @@ -0,0 +1,26 @@ +package com.loopers.infrastructure.kafka; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.EnableAsync; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +import java.util.concurrent.Executor; +import java.util.concurrent.ThreadPoolExecutor; + +@EnableAsync +@Configuration +public class AsyncConfig { + + @Bean("eventExecutor") + public Executor eventExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(2); + executor.setMaxPoolSize(4); + executor.setQueueCapacity(100); + executor.setThreadNamePrefix("event-async-"); + executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); + executor.initialize(); + return executor; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/like/LikeController.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/like/LikeController.java index 075eefda2..a261247f0 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/like/LikeController.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/like/LikeController.java @@ -1,7 +1,6 @@ package com.loopers.interfaces.api.like; import com.loopers.application.like.LikeFacade; -import com.loopers.application.product.ProductCachePort; import com.loopers.domain.like.Like; import com.loopers.domain.member.Member; import com.loopers.interfaces.api.ApiResponse; @@ -18,21 +17,16 @@ public class LikeController { private final LikeFacade likeFacade; - private final ProductCachePort productCachePort; @PostMapping("/api/v1/products/{productId}/likes") public ApiResponse addLike(@AuthMember Member member, @PathVariable Long productId) { likeFacade.addLike(member.getId(), productId); - productCachePort.evictProductDetail(productId); - productCachePort.evictProductList(); return ApiResponse.success(null); } @DeleteMapping("/api/v1/products/{productId}/likes") public ApiResponse removeLike(@AuthMember Member member, @PathVariable Long productId) { likeFacade.removeLike(member.getId(), productId); - productCachePort.evictProductDetail(productId); - productCachePort.evictProductList(); return ApiResponse.success(null); } diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/CacheEvictionEventListener.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/CacheEvictionEventListener.java new file mode 100644 index 000000000..6d3046f75 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/CacheEvictionEventListener.java @@ -0,0 +1,38 @@ +package com.loopers.interfaces.listener; + +import com.loopers.application.product.ProductCachePort; +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.transaction.event.TransactionPhase; +import org.springframework.transaction.event.TransactionalEventListener; + +@Slf4j +@Component +@RequiredArgsConstructor +public class CacheEvictionEventListener { + + private final ProductCachePort productCachePort; + + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) + public void handleLikeCreated(LikeCreatedEvent event) { + try { + productCachePort.evictProductDetail(event.productId()); + productCachePort.evictProductList(); + } catch (Exception e) { + log.warn("캐시 무효화 실패 — best-effort", e); + } + } + + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) + public void handleLikeRemoved(LikeRemovedEvent event) { + try { + productCachePort.evictProductDetail(event.productId()); + productCachePort.evictProductList(); + } catch (Exception e) { + log.warn("캐시 무효화 실패 — best-effort", e); + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/LikeCountEventListener.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/LikeCountEventListener.java new file mode 100644 index 000000000..4b6dd2d79 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/listener/LikeCountEventListener.java @@ -0,0 +1,58 @@ +package com.loopers.interfaces.listener; + +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; +import com.loopers.domain.product.ProductRepository; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; +import org.springframework.transaction.PlatformTransactionManager; +import org.springframework.transaction.event.TransactionPhase; +import org.springframework.transaction.event.TransactionalEventListener; +import org.springframework.transaction.support.TransactionTemplate; + +import java.util.concurrent.Executor; + +@Slf4j +@Component +public class LikeCountEventListener { + + private final ProductRepository productRepository; + private final Executor eventExecutor; + private final TransactionTemplate transactionTemplate; + + public LikeCountEventListener( + ProductRepository productRepository, + @Qualifier("eventExecutor") Executor eventExecutor, + PlatformTransactionManager transactionManager) { + this.productRepository = productRepository; + this.eventExecutor = eventExecutor; + this.transactionTemplate = new TransactionTemplate(transactionManager); + } + + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) + public void handleLikeCreated(LikeCreatedEvent event) { + eventExecutor.execute(() -> { + try { + transactionTemplate.executeWithoutResult(status -> + productRepository.incrementLikeCount(event.productId()) + ); + } catch (Exception e) { + log.warn("incrementLikeCount 실패 — best-effort", e); + } + }); + } + + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) + public void handleLikeRemoved(LikeRemovedEvent event) { + eventExecutor.execute(() -> { + try { + transactionTemplate.executeWithoutResult(status -> + productRepository.decrementLikeCount(event.productId()) + ); + } catch (Exception e) { + log.warn("decrementLikeCount 실패 — best-effort", e); + } + }); + } +} From 61ac85a47182b93ba16c75c29bc8cc4b12f11815 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:41:39 +0900 Subject: [PATCH 02/14] =?UTF-8?q?feat:=20Kafka=20=EC=9D=B8=ED=94=84?= =?UTF-8?q?=EB=9D=BC=20=EC=84=A4=EC=A0=95=20=EB=B3=B4=EC=99=84=20+=20Debez?= =?UTF-8?q?ium=20CDC=20=EA=B5=AC=EC=84=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - kafka.yml: acks=all, idempotence, lz4 압축, isolation.level=read_committed - KafkaConfig: SINGLE_LISTENER ContainerFactory + DeadLetterPublishingRecoverer - KafkaTopicConfig: catalog-events, order-events, coupon-issue-requests 토픽 - infra-compose.yml: MySQL binlog 활성화 + kafka-connect(Debezium) 서비스 - register-debezium-connector.sh: Debezium MySQL Connector 등록 스크립트 - RedisConfig: commandTimeout 500ms 설정 - commerce-api: kafka 모듈 의존성 추가 --- apps/commerce-api/build.gradle.kts | 1 + .../kafka/KafkaTopicConfig.java | 34 ++++++++++++++ docker/infra-compose.yml | 38 +++++++++++++++- docker/register-debezium-connector.sh | 44 +++++++++++++++++++ .../com/loopers/confg/kafka/KafkaConfig.java | 33 ++++++++++++++ modules/kafka/src/main/resources/kafka.yml | 12 ++++- .../com/loopers/config/redis/RedisConfig.java | 4 +- 7 files changed, 162 insertions(+), 4 deletions(-) create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/KafkaTopicConfig.java create mode 100755 docker/register-debezium-connector.sh diff --git a/apps/commerce-api/build.gradle.kts b/apps/commerce-api/build.gradle.kts index 5700ca70a..ed4688011 100644 --- a/apps/commerce-api/build.gradle.kts +++ b/apps/commerce-api/build.gradle.kts @@ -2,6 +2,7 @@ dependencies { // add-ons implementation(project(":modules:jpa")) implementation(project(":modules:redis")) + implementation(project(":modules:kafka")) implementation(project(":supports:jackson")) implementation(project(":supports:logging")) implementation(project(":supports:monitoring")) diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/KafkaTopicConfig.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/KafkaTopicConfig.java new file mode 100644 index 000000000..aeb32a390 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/kafka/KafkaTopicConfig.java @@ -0,0 +1,34 @@ +package com.loopers.infrastructure.kafka; + +import org.apache.kafka.clients.admin.NewTopic; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.config.TopicBuilder; + +@Configuration +public class KafkaTopicConfig { + + @Bean + public NewTopic catalogEventsTopic() { + return TopicBuilder.name("catalog-events") + .partitions(3) + .replicas(1) + .build(); + } + + @Bean + public NewTopic orderEventsTopic() { + return TopicBuilder.name("order-events") + .partitions(3) + .replicas(1) + .build(); + } + + @Bean + public NewTopic couponIssueRequestsTopic() { + return TopicBuilder.name("coupon-issue-requests") + .partitions(3) + .replicas(1) + .build(); + } +} diff --git a/docker/infra-compose.yml b/docker/infra-compose.yml index 18e5fcf5f..c29f40501 100644 --- a/docker/infra-compose.yml +++ b/docker/infra-compose.yml @@ -2,6 +2,11 @@ version: '3' services: mysql: image: mysql:8.0 + command: + - --log-bin=mysql-bin + - --binlog-format=ROW + - --binlog-row-image=FULL + - --server-id=1 ports: - "3306:3306" environment: @@ -88,6 +93,35 @@ services: timeout: 5s retries: 10 + kafka-connect: + image: debezium/connect:2.5 + container_name: kafka-connect + depends_on: + kafka: + condition: service_healthy + mysql: + condition: service_started + ports: + - "8083:8083" + environment: + GROUP_ID: 1 + BOOTSTRAP_SERVERS: kafka:9092 + CONFIG_STORAGE_TOPIC: _connect_configs + OFFSET_STORAGE_TOPIC: _connect_offsets + STATUS_STORAGE_TOPIC: _connect_status + CONFIG_STORAGE_REPLICATION_FACTOR: 1 + OFFSET_STORAGE_REPLICATION_FACTOR: 1 + STATUS_STORAGE_REPLICATION_FACTOR: 1 + KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + KEY_CONVERTER_SCHEMAS_ENABLE: "false" + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8083/connectors"] + interval: 10s + timeout: 5s + retries: 10 + kafka-ui: image: provectuslabs/kafka-ui:latest container_name: kafka-ui @@ -99,6 +133,8 @@ services: environment: KAFKA_CLUSTERS_0_NAME: local # kafka-ui 에서 보이는 클러스터명 KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 # kafka-ui 가 연겷할 브로커 주소 + KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: debezium + KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect:8083 volumes: mysql-8-data: @@ -108,4 +144,4 @@ volumes: networks: default: - driver: bridge \ No newline at end of file + driver: bridge diff --git a/docker/register-debezium-connector.sh b/docker/register-debezium-connector.sh new file mode 100755 index 000000000..788e750ac --- /dev/null +++ b/docker/register-debezium-connector.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -e + +echo "Waiting for Kafka Connect to be ready..." +until curl -s http://localhost:8083/connectors > /dev/null 2>&1; do + sleep 2 +done + +echo "Registering Debezium MySQL Connector..." +curl -X POST http://localhost:8083/connectors \ + -H "Content-Type: application/json" \ + -d @- << 'EOF' +{ + "name": "loopers-outbox-connector", + "config": { + "connector.class": "io.debezium.connector.mysql.MySqlConnector", + "tasks.max": "1", + "database.hostname": "mysql", + "database.port": "3306", + "database.user": "root", + "database.password": "root", + "database.server.id": "184054", + "topic.prefix": "loopers", + "database.include.list": "loopers", + "table.include.list": "loopers.event_outbox", + "schema.history.internal.kafka.bootstrap.servers": "kafka:9092", + "schema.history.internal.kafka.topic": "_schema_history", + "transforms": "outbox", + "transforms.outbox.type": "io.debezium.transforms.outbox.EventRouter", + "transforms.outbox.table.field.event.id": "id", + "transforms.outbox.table.field.event.key": "aggregate_id", + "transforms.outbox.table.field.event.type": "event_type", + "transforms.outbox.table.field.event.payload": "payload", + "transforms.outbox.route.by.field": "aggregate_type", + "transforms.outbox.route.topic.replacement": "${routedByValue}-events", + "transforms.outbox.table.fields.additional.placement": "event_type:header:eventType", + "tombstones.on.delete": "false" + } +} +EOF + +echo "" +echo "Connector registered successfully!" +curl -s http://localhost:8083/connectors/loopers-outbox-connector/status | python3 -m json.tool diff --git a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java index a73842775..4aae07ac4 100644 --- a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java +++ b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java @@ -10,8 +10,11 @@ import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import org.springframework.kafka.core.*; import org.springframework.kafka.listener.ContainerProperties; +import org.springframework.kafka.listener.DeadLetterPublishingRecoverer; +import org.springframework.kafka.listener.DefaultErrorHandler; import org.springframework.kafka.support.converter.BatchMessagingMessageConverter; import org.springframework.kafka.support.converter.ByteArrayJsonMessageConverter; +import org.springframework.util.backoff.FixedBackOff; import java.util.HashMap; import java.util.Map; @@ -21,6 +24,7 @@ @EnableConfigurationProperties(KafkaProperties.class) public class KafkaConfig { public static final String BATCH_LISTENER = "BATCH_LISTENER_DEFAULT"; + public static final String SINGLE_LISTENER = "SINGLE_LISTENER_DEFAULT"; public static final int MAX_POLLING_SIZE = 3000; // read 3000 msg public static final int FETCH_MIN_BYTES = (1024 * 1024); // 1mb @@ -28,6 +32,7 @@ public class KafkaConfig { public static final int SESSION_TIMEOUT_MS = 60 * 1000; // session timeout = 1m public static final int HEARTBEAT_INTERVAL_MS = 20 * 1000; // heartbeat interval = 20s ( 1/3 of session_timeout ) public static final int MAX_POLL_INTERVAL_MS = 2 * 60 * 1000; // max poll interval = 2m + public static final int SINGLE_MAX_POLL_INTERVAL_MS = 10 * 60 * 1000; // max poll interval = 10m @Bean public ProducerFactory producerFactory(KafkaProperties kafkaProperties) { @@ -51,6 +56,12 @@ public ByteArrayJsonMessageConverter jsonMessageConverter(ObjectMapper objectMap return new ByteArrayJsonMessageConverter(objectMapper); } + @Bean + public DefaultErrorHandler kafkaErrorHandler(KafkaTemplate kafkaTemplate) { + DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(kafkaTemplate); + return new DefaultErrorHandler(recoverer, new FixedBackOff(1000L, 3)); + } + @Bean(name = BATCH_LISTENER) public ConcurrentKafkaListenerContainerFactory defaultBatchListenerContainerFactory( KafkaProperties kafkaProperties, @@ -72,4 +83,26 @@ public ConcurrentKafkaListenerContainerFactory defaultBatchListe factory.setBatchListener(true); return factory; } + + @Bean(name = SINGLE_LISTENER) + public ConcurrentKafkaListenerContainerFactory singleListenerContainerFactory( + KafkaProperties kafkaProperties, + ByteArrayJsonMessageConverter converter, + DefaultErrorHandler kafkaErrorHandler + ) { + Map consumerConfig = new HashMap<>(kafkaProperties.buildConsumerProperties()); + consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1); + consumerConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, SESSION_TIMEOUT_MS); + consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, HEARTBEAT_INTERVAL_MS); + consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, SINGLE_MAX_POLL_INTERVAL_MS); + + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig)); + factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL); + factory.setRecordMessageConverter(converter); + factory.setConcurrency(1); + factory.setBatchListener(false); + factory.setCommonErrorHandler(kafkaErrorHandler); + return factory; + } } diff --git a/modules/kafka/src/main/resources/kafka.yml b/modules/kafka/src/main/resources/kafka.yml index 9609dbf85..0790bfb89 100644 --- a/modules/kafka/src/main/resources/kafka.yml +++ b/modules/kafka/src/main/resources/kafka.yml @@ -14,13 +14,21 @@ spring: producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.springframework.kafka.support.serializer.JsonSerializer - retries: 3 + acks: all + properties: + enable.idempotence: true + delivery.timeout.ms: 120000 + linger.ms: 50 + batch.size: 32768 + compression.type: lz4 consumer: group-id: loopers-default-consumer key-deserializer: org.apache.kafka.common.serialization.StringDeserializer - value-serializer: org.apache.kafka.common.serialization.ByteArrayDeserializer + value-deserializer: org.apache.kafka.common.serialization.ByteArrayDeserializer + auto-offset-reset: earliest properties: enable-auto-commit: false + isolation.level: read_committed listener: ack-mode: manual diff --git a/modules/redis/src/main/java/com/loopers/config/redis/RedisConfig.java b/modules/redis/src/main/java/com/loopers/config/redis/RedisConfig.java index 0a2b614ca..a4ff8e076 100644 --- a/modules/redis/src/main/java/com/loopers/config/redis/RedisConfig.java +++ b/modules/redis/src/main/java/com/loopers/config/redis/RedisConfig.java @@ -13,6 +13,7 @@ import org.springframework.data.redis.core.RedisTemplate; import org.springframework.data.redis.serializer.StringRedisSerializer; +import java.time.Duration; import java.util.List; import java.util.function.Consumer; @@ -75,7 +76,8 @@ private LettuceConnectionFactory lettuceConnectionFactory( List replicas, Consumer customizer ){ - LettuceClientConfiguration.LettuceClientConfigurationBuilder builder = LettuceClientConfiguration.builder(); + LettuceClientConfiguration.LettuceClientConfigurationBuilder builder = LettuceClientConfiguration.builder() + .commandTimeout(Duration.ofMillis(500)); if(customizer != null) customizer.accept(builder); LettuceClientConfiguration clientConfig = builder.build(); RedisStaticMasterReplicaConfiguration masterReplicaConfig = new RedisStaticMasterReplicaConfiguration(master.host(), master.port()); From df5fb13dd78b58c8bc1251dfaa5c5fffbc32c9f4 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:41:55 +0900 Subject: [PATCH 03/14] =?UTF-8?q?feat:=20Metrics=20Consumer=20+=20?= =?UTF-8?q?=EC=A1=B0=ED=9A=8C=EC=88=98/=EC=A3=BC=EB=AC=B8=20=EC=9D=B4?= =?UTF-8?q?=EB=B2=A4=ED=8A=B8=20=EB=B0=9C=ED=96=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - ProductMetrics Entity: 좋아요·조회수·판매량 집계 테이블 - EventHandled Entity: 멱등 소비 보장용 처리 이력 - MetricsConsumer: catalog-events BATCH 수신 → UPSERT product_metrics - ProductViewKafkaPublisher: @Async AFTER_COMMIT → catalog-events 발행 - ProductFacade: 상품 조회 시 ProductViewedEvent 발행 - OrderFacade: 주문 생성/취소 시 이벤트 발행 + Outbox 저장 --- .../event/ProductViewKafkaPublisher.java | 40 +++++ .../application/order/OrderFacade.java | 45 ++++++ .../application/product/ProductFacade.java | 5 + .../loopers/domain/event/EventHandled.java | 40 +++++ .../domain/metrics/ProductMetrics.java | 40 +++++ .../interfaces/consumer/MetricsConsumer.java | 143 ++++++++++++++++++ .../src/main/resources/application.yml | 3 - 7 files changed, 313 insertions(+), 3 deletions(-) create mode 100644 apps/commerce-api/src/main/java/com/loopers/application/event/ProductViewKafkaPublisher.java create mode 100644 apps/commerce-streamer/src/main/java/com/loopers/domain/event/EventHandled.java create mode 100644 apps/commerce-streamer/src/main/java/com/loopers/domain/metrics/ProductMetrics.java create mode 100644 apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/MetricsConsumer.java diff --git a/apps/commerce-api/src/main/java/com/loopers/application/event/ProductViewKafkaPublisher.java b/apps/commerce-api/src/main/java/com/loopers/application/event/ProductViewKafkaPublisher.java new file mode 100644 index 000000000..ca9e830c3 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/application/event/ProductViewKafkaPublisher.java @@ -0,0 +1,40 @@ +package com.loopers.application.event; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.ProductViewedEvent; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.scheduling.annotation.Async; +import org.springframework.stereotype.Component; +import org.springframework.transaction.event.TransactionPhase; +import org.springframework.transaction.event.TransactionalEventListener; + +import java.util.Map; +import java.util.UUID; + +@Slf4j +@Component +@RequiredArgsConstructor +public class ProductViewKafkaPublisher { + + private final KafkaTemplate kafkaTemplate; + private final ObjectMapper objectMapper; + + @Async("eventExecutor") + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT, fallbackExecution = true) + public void handle(ProductViewedEvent event) { + try { + String payload = objectMapper.writeValueAsString(Map.of( + "eventId", UUID.randomUUID().toString(), + "eventType", "PRODUCT_VIEWED", + "productId", event.productId(), + "memberId", event.memberId() + )); + kafkaTemplate.send("catalog-events", String.valueOf(event.productId()), payload); + } catch (JsonProcessingException e) { + log.warn("조회수 이벤트 Kafka 발행 실패 — productId={}", event.productId(), e); + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java index 651704727..767d276a8 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java @@ -1,9 +1,16 @@ package com.loopers.application.order; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import com.loopers.application.coupon.CouponApplyResult; import com.loopers.application.coupon.CouponFacade; import com.loopers.domain.brand.Brand; import com.loopers.domain.brand.BrandRepository; +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.OrderCancelledEvent; +import com.loopers.domain.event.OrderCreatedEvent; +import com.loopers.domain.event.OrderItemSnapshot; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderItem; import com.loopers.domain.order.OrderRepository; @@ -12,6 +19,7 @@ import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; +import org.springframework.context.ApplicationEventPublisher; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -32,6 +40,9 @@ public class OrderFacade { private final ProductRepository productRepository; private final BrandRepository brandRepository; private final CouponFacade couponFacade; + private final EventOutboxRepository eventOutboxRepository; + private final ApplicationEventPublisher applicationEventPublisher; + private final ObjectMapper objectMapper; @Transactional public Order createOrder(Long memberId, List itemRequests) { @@ -109,6 +120,17 @@ public Order createOrder(Long memberId, List itemRequests, Lon couponFacade.linkCouponToOrder(resolvedCouponIssueId, order.getId()); } + // 8. Outbox INSERT + 이벤트 발행 + List eventItems = order.getItems().stream() + .map(item -> new OrderItemSnapshot(item.getProductId(), item.getQuantity(), item.getProductPrice())) + .toList(); + + EventOutbox outbox = EventOutbox.create("order", String.valueOf(order.getId()), + "ORDER_CREATED", buildOrderPayload(order.getId(), memberId, eventItems)); + eventOutboxRepository.save(outbox); + + applicationEventPublisher.publishEvent(new OrderCreatedEvent(order.getId(), memberId, eventItems)); + return order; } @@ -152,6 +174,17 @@ public void cancelOrder(Long orderId, Long memberId) { if (order.getCouponIssueId() != null) { couponFacade.restoreCoupon(order.getCouponIssueId()); } + + // Outbox INSERT + 이벤트 발행 + List eventItems = order.getItems().stream() + .map(item -> new OrderItemSnapshot(item.getProductId(), item.getQuantity(), item.getProductPrice())) + .toList(); + + EventOutbox outbox = EventOutbox.create("order", String.valueOf(orderId), + "ORDER_CANCELLED", buildOrderPayload(orderId, memberId, eventItems)); + eventOutboxRepository.save(outbox); + + applicationEventPublisher.publishEvent(new OrderCancelledEvent(orderId, memberId, eventItems)); } public List getOrdersByMemberId(Long memberId, ZonedDateTime startAt, ZonedDateTime endAt) { @@ -166,4 +199,16 @@ public List getAllOrders() { } public record OrderItemRequest(Long productId, int quantity) {} + + private String buildOrderPayload(Long orderId, Long memberId, List items) { + try { + return objectMapper.writeValueAsString(Map.of( + "orderId", orderId, + "memberId", memberId, + "items", items + )); + } catch (JsonProcessingException e) { + throw new RuntimeException("주문 이벤트 페이로드 직렬화 실패", e); + } + } } diff --git a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java index 796d5437b..7a58a76fa 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java @@ -1,5 +1,6 @@ package com.loopers.application.product; +import com.loopers.domain.event.ProductViewedEvent; import com.loopers.domain.like.LikeRepository; import com.loopers.domain.product.Product; import com.loopers.domain.product.ProductRepository; @@ -12,6 +13,7 @@ import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; +import org.springframework.context.ApplicationEventPublisher; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Pageable; @@ -31,6 +33,7 @@ public class ProductFacade { private final BrandRepository brandRepository; private final LikeRepository likeRepository; private final ProductCachePort productCachePort; + private final ApplicationEventPublisher applicationEventPublisher; // ── 상품 상세 (캐시 적용) ── @@ -45,12 +48,14 @@ public ProductWithBrand getProductDetail(Long productId) { public ProductDto.ProductResponse getProductDetailCached(Long productId) { ProductDto.ProductResponse cached = productCachePort.getProductDetail(productId); if (cached != null) { + applicationEventPublisher.publishEvent(new ProductViewedEvent(productId, 0L)); return cached; } ProductWithBrand info = getProductDetail(productId); ProductDto.ProductResponse response = ProductDto.ProductResponse.from(info); productCachePort.putProductDetail(productId, response); + applicationEventPublisher.publishEvent(new ProductViewedEvent(productId, 0L)); return response; } diff --git a/apps/commerce-streamer/src/main/java/com/loopers/domain/event/EventHandled.java b/apps/commerce-streamer/src/main/java/com/loopers/domain/event/EventHandled.java new file mode 100644 index 000000000..8c8ad6e87 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/domain/event/EventHandled.java @@ -0,0 +1,40 @@ +package com.loopers.domain.event; + +import jakarta.persistence.*; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.NoArgsConstructor; + +import java.time.ZonedDateTime; + +@Entity +@Table(name = "event_handled", uniqueConstraints = { + @UniqueConstraint(name = "uk_event_handled_event_id", columnNames = "event_id") +}) +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class EventHandled { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "event_id", nullable = false, length = 100) + private String eventId; + + @Column(name = "event_type", nullable = false, length = 50) + private String eventType; + + @Column(name = "created_at", nullable = false, updatable = false) + private ZonedDateTime createdAt; + + @PrePersist + private void prePersist() { + this.createdAt = ZonedDateTime.now(); + } + + public EventHandled(String eventId, String eventType) { + this.eventId = eventId; + this.eventType = eventType; + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/domain/metrics/ProductMetrics.java b/apps/commerce-streamer/src/main/java/com/loopers/domain/metrics/ProductMetrics.java new file mode 100644 index 000000000..3e7dfa916 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/domain/metrics/ProductMetrics.java @@ -0,0 +1,40 @@ +package com.loopers.domain.metrics; + +import jakarta.persistence.*; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.NoArgsConstructor; + +import java.time.ZonedDateTime; + +@Entity +@Table(name = "product_metrics") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class ProductMetrics { + + @Id + @Column(name = "product_id") + private Long productId; + + @Column(name = "like_count", nullable = false) + private long likeCount; + + @Column(name = "view_count", nullable = false) + private long viewCount; + + @Column(name = "sales_count", nullable = false) + private long salesCount; + + @Column(name = "sales_amount", nullable = false) + private long salesAmount; + + @Column(name = "updated_at", nullable = false) + private ZonedDateTime updatedAt; + + @PrePersist + @PreUpdate + private void onPersist() { + this.updatedAt = ZonedDateTime.now(); + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/MetricsConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/MetricsConsumer.java new file mode 100644 index 000000000..8dd65f07f --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/MetricsConsumer.java @@ -0,0 +1,143 @@ +package com.loopers.interfaces.consumer; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.confg.kafka.KafkaConfig; +import jakarta.persistence.EntityManager; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.stereotype.Component; +import org.springframework.transaction.PlatformTransactionManager; +import org.springframework.transaction.support.TransactionTemplate; + +import java.util.List; + +@Slf4j +@Component +@RequiredArgsConstructor +public class MetricsConsumer { + + private final EntityManager entityManager; + private final PlatformTransactionManager transactionManager; + private final ObjectMapper objectMapper; + + @KafkaListener( + topics = {"catalog-events", "order-events"}, + groupId = "metrics-collector", + containerFactory = KafkaConfig.BATCH_LISTENER + ) + public void consume(List> records, Acknowledgment ack) { + TransactionTemplate tx = new TransactionTemplate(transactionManager); + + for (ConsumerRecord record : records) { + try { + tx.executeWithoutResult(status -> processRecord(record)); + } catch (Exception e) { + log.error("MetricsConsumer 처리 실패 — topic={}, offset={}", record.topic(), record.offset(), e); + } + } + + ack.acknowledge(); + } + + private void processRecord(ConsumerRecord record) { + try { + JsonNode payload = objectMapper.readTree(record.value()); + String eventId = extractEventId(record, payload); + String eventType = extractEventType(record, payload); + + // INSERT-first 멱등 패턴: event_handled에 먼저 삽입 시도 + int inserted = entityManager.createNativeQuery( + "INSERT IGNORE INTO event_handled (event_id, event_type, created_at) VALUES (:eventId, :eventType, NOW(6))" + ).setParameter("eventId", eventId) + .setParameter("eventType", eventType) + .executeUpdate(); + + if (inserted == 0) { + log.debug("이미 처리된 이벤트 — eventId={}", eventId); + return; + } + + switch (eventType) { + case "LIKE_CREATED" -> upsertLikeCount(payload, 1); + case "LIKE_REMOVED" -> upsertLikeCount(payload, -1); + case "PRODUCT_VIEWED" -> upsertViewCount(payload); + case "ORDER_CREATED" -> upsertSalesMetrics(payload, 1); + case "ORDER_CANCELLED" -> upsertSalesMetrics(payload, -1); + default -> log.warn("알 수 없는 이벤트 타입: {}", eventType); + } + } catch (Exception e) { + throw new RuntimeException("이벤트 처리 실패", e); + } + } + + private void upsertLikeCount(JsonNode payload, int delta) { + long productId = payload.get("productId").asLong(); + entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, view_count, sales_count, sales_amount, updated_at) " + + "VALUES (:productId, :delta, 0, 0, 0, NOW(6)) " + + "ON DUPLICATE KEY UPDATE like_count = like_count + :delta, updated_at = NOW(6)" + ).setParameter("productId", productId) + .setParameter("delta", delta) + .executeUpdate(); + } + + private void upsertViewCount(JsonNode payload) { + long productId = payload.get("productId").asLong(); + entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, view_count, sales_count, sales_amount, updated_at) " + + "VALUES (:productId, 0, 1, 0, 0, NOW(6)) " + + "ON DUPLICATE KEY UPDATE view_count = view_count + 1, updated_at = NOW(6)" + ).setParameter("productId", productId) + .executeUpdate(); + } + + private void upsertSalesMetrics(JsonNode payload, int direction) { + JsonNode items = payload.get("items"); + if (items == null || !items.isArray()) return; + + for (JsonNode item : items) { + long productId = item.get("productId").asLong(); + int quantity = item.get("quantity").asInt(); + int price = item.get("price").asInt(); + long amount = (long) quantity * price; + + entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, view_count, sales_count, sales_amount, updated_at) " + + "VALUES (:productId, 0, 0, :salesCount, :salesAmount, NOW(6)) " + + "ON DUPLICATE KEY UPDATE " + + "sales_count = sales_count + :salesCount, " + + "sales_amount = sales_amount + :salesAmount, " + + "updated_at = NOW(6)" + ).setParameter("productId", productId) + .setParameter("salesCount", quantity * direction) + .setParameter("salesAmount", amount * direction) + .executeUpdate(); + } + } + + private String extractEventId(ConsumerRecord record, JsonNode payload) { + // Debezium Outbox: header에 id가 포함됨, 직접 발행: payload에 eventId + if (payload.has("eventId")) { + return payload.get("eventId").asText(); + } + // fallback: topic + partition + offset 조합 + return record.topic() + "-" + record.partition() + "-" + record.offset(); + } + + private String extractEventType(ConsumerRecord record, JsonNode payload) { + if (payload.has("eventType")) { + return payload.get("eventType").asText(); + } + // Debezium header에서 eventType 추출 시도 + var headers = record.headers(); + var eventTypeHeader = headers.lastHeader("eventType"); + if (eventTypeHeader != null) { + return new String(eventTypeHeader.value()); + } + return "UNKNOWN"; + } +} diff --git a/apps/commerce-streamer/src/main/resources/application.yml b/apps/commerce-streamer/src/main/resources/application.yml index 0651bc2bd..2f6275748 100644 --- a/apps/commerce-streamer/src/main/resources/application.yml +++ b/apps/commerce-streamer/src/main/resources/application.yml @@ -25,9 +25,6 @@ spring: - logging.yml - monitoring.yml -demo-kafka: - test: - topic-name: demo.internal.topic-v1 --- spring: From 196bce91b4ab27f0c2f5505c62b5d956c95f17b3 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:42:13 +0900 Subject: [PATCH 04/14] =?UTF-8?q?feat:=20=EC=84=A0=EC=B0=A9=EC=88=9C=20?= =?UTF-8?q?=EC=BF=A0=ED=8F=B0=20=EB=B9=84=EB=8F=99=EA=B8=B0=20=EB=B0=9C?= =?UTF-8?q?=EA=B8=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - CouponIssueRequest Entity: 발급 요청 상태 추적 (PENDING → COMPLETED/REJECTED) - CouponFacade: 요청 저장 → Kafka 발행 (coupon-issue-requests) - CouponController: POST /issue-request, GET /issue-requests/{id} 엔드포인트 - Coupon: maxIssuanceCount/issuedCount 필드 추가 - CouponIssue: (coupon_id, member_id) UNIQUE 제약 추가 - CouponIssueConsumer: CAS UPDATE 기반 동시성 안전 발급 처리 --- .../application/coupon/CouponFacade.java | 41 ++++++ .../com/loopers/domain/coupon/Coupon.java | 6 + .../loopers/domain/coupon/CouponIssue.java | 2 + .../domain/coupon/CouponIssueRequest.java | 51 ++++++++ .../coupon/CouponIssueRequestRepository.java | 8 ++ .../coupon/CouponIssueRequestStatus.java | 7 ++ .../CouponIssueRequestJpaRepository.java | 8 ++ .../api/coupon/CouponController.java | 20 +++ .../interfaces/api/coupon/CouponDto.java | 22 ++++ .../consumer/CouponIssueConsumer.java | 118 ++++++++++++++++++ 10 files changed, 283 insertions(+) create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequest.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestRepository.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestStatus.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java create mode 100644 apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java diff --git a/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java index 75155e340..dc7af5471 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java @@ -1,15 +1,19 @@ package com.loopers.application.coupon; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import com.loopers.domain.coupon.*; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; +import org.springframework.kafka.core.KafkaTemplate; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Map; @Service @RequiredArgsConstructor @@ -18,6 +22,9 @@ public class CouponFacade { private final CouponRepository couponRepository; private final CouponIssueRepository couponIssueRepository; + private final CouponIssueRequestRepository couponIssueRequestRepository; + private final KafkaTemplate kafkaTemplate; + private final ObjectMapper objectMapper; private final Clock clock; // ── Admin: 쿠폰 템플릿 CRUD ── @@ -130,6 +137,40 @@ public void restoreCoupon(Long couponIssueId) { couponIssue.cancelUse(ZonedDateTime.now(clock)); } + // ── 대고객: 선착순 쿠폰 발급 요청 ── + + @Transactional + public CouponIssueRequest requestCouponIssue(Long couponId, Long memberId) { + Coupon coupon = couponRepository.findById(couponId) + .orElseThrow(() -> new CoreException(ErrorType.NOT_FOUND, "쿠폰을 찾을 수 없습니다.")); + + ZonedDateTime now = ZonedDateTime.now(clock); + if (now.isAfter(coupon.getExpiredAt())) { + throw new CoreException(ErrorType.BAD_REQUEST, "만료된 쿠폰은 발급 요청할 수 없습니다."); + } + + CouponIssueRequest request = couponIssueRequestRepository.save( + CouponIssueRequest.create(couponId, memberId)); + + try { + String payload = objectMapper.writeValueAsString(Map.of( + "requestId", request.getId(), + "couponId", couponId, + "memberId", memberId + )); + kafkaTemplate.send("coupon-issue-requests", String.valueOf(couponId), payload); + } catch (JsonProcessingException e) { + throw new RuntimeException("쿠폰 발급 요청 페이로드 직렬화 실패", e); + } + + return request; + } + + public CouponIssueRequest getIssueRequest(Long requestId) { + return couponIssueRequestRepository.findById(requestId) + .orElseThrow(() -> new CoreException(ErrorType.NOT_FOUND, "쿠폰 발급 요청을 찾을 수 없습니다.")); + } + public ZonedDateTime now() { return ZonedDateTime.now(clock); } diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/Coupon.java b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/Coupon.java index 04bc6ef62..b290f88ee 100644 --- a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/Coupon.java +++ b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/Coupon.java @@ -32,6 +32,12 @@ public class Coupon extends BaseEntity { @Column(name = "expired_at", nullable = false) private ZonedDateTime expiredAt; + @Column(name = "max_issuance_count") + private Integer maxIssuanceCount; + + @Column(name = "issued_count", nullable = false) + private int issuedCount; + public Coupon(String name, DiscountType discountType, int discountValue, int minOrderAmount, ZonedDateTime expiredAt) { validateDiscountValue(discountType, discountValue); this.name = name; diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssue.java b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssue.java index b1ce45529..0d9ede34a 100644 --- a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssue.java +++ b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssue.java @@ -13,6 +13,8 @@ @Table(name = "coupon_issue", indexes = { @Index(name = "idx_coupon_issue_member_id", columnList = "member_id"), @Index(name = "idx_coupon_issue_coupon_id", columnList = "coupon_id") +}, uniqueConstraints = { + @UniqueConstraint(name = "uk_coupon_issue_coupon_member", columnNames = {"coupon_id", "member_id"}) }) @Getter @NoArgsConstructor(access = AccessLevel.PROTECTED) diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequest.java b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequest.java new file mode 100644 index 000000000..04bb55d28 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequest.java @@ -0,0 +1,51 @@ +package com.loopers.domain.coupon; + +import jakarta.persistence.*; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.NoArgsConstructor; + +import java.time.ZonedDateTime; + +@Entity +@Table(name = "coupon_issue_request") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class CouponIssueRequest { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "coupon_id", nullable = false) + private Long couponId; + + @Column(name = "member_id", nullable = false) + private Long memberId; + + @Enumerated(EnumType.STRING) + @Column(nullable = false) + private CouponIssueRequestStatus status; + + @Column(name = "reject_reason") + private String rejectReason; + + @Column(name = "created_at", nullable = false, updatable = false) + private ZonedDateTime createdAt; + + @Column(name = "completed_at") + private ZonedDateTime completedAt; + + @PrePersist + private void prePersist() { + this.createdAt = ZonedDateTime.now(); + } + + public static CouponIssueRequest create(Long couponId, Long memberId) { + CouponIssueRequest request = new CouponIssueRequest(); + request.couponId = couponId; + request.memberId = memberId; + request.status = CouponIssueRequestStatus.PENDING; + return request; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestRepository.java b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestRepository.java new file mode 100644 index 000000000..bf23ee81a --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestRepository.java @@ -0,0 +1,8 @@ +package com.loopers.domain.coupon; + +import java.util.Optional; + +public interface CouponIssueRequestRepository { + CouponIssueRequest save(CouponIssueRequest request); + Optional findById(Long id); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestStatus.java b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestStatus.java new file mode 100644 index 000000000..9e7e56480 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/coupon/CouponIssueRequestStatus.java @@ -0,0 +1,7 @@ +package com.loopers.domain.coupon; + +public enum CouponIssueRequestStatus { + PENDING, + COMPLETED, + REJECTED +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java new file mode 100644 index 000000000..58269ced3 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java @@ -0,0 +1,8 @@ +package com.loopers.infrastructure.coupon; + +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; +import org.springframework.data.jpa.repository.JpaRepository; + +public interface CouponIssueRequestJpaRepository extends JpaRepository, CouponIssueRequestRepository { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java index 878d148bb..71d765a75 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java @@ -2,6 +2,7 @@ import com.loopers.application.coupon.CouponFacade; import com.loopers.domain.coupon.CouponIssue; +import com.loopers.domain.coupon.CouponIssueRequest; import com.loopers.domain.member.Member; import com.loopers.interfaces.api.ApiResponse; import com.loopers.support.auth.AuthMember; @@ -29,6 +30,25 @@ public ApiResponse issueCoupon( return ApiResponse.success(CouponDto.CouponIssueResponse.from(couponIssue, now)); } + @PostMapping("/api/v1/coupons/{couponId}/issue-request") + @ResponseStatus(HttpStatus.CREATED) + public ApiResponse requestCouponIssue( + @AuthMember Member member, + @PathVariable Long couponId + ) { + CouponIssueRequest request = couponFacade.requestCouponIssue(couponId, member.getId()); + return ApiResponse.success(CouponDto.CouponIssueRequestResponse.from(request)); + } + + @GetMapping("/api/v1/coupons/issue-requests/{requestId}") + public ApiResponse getIssueRequest( + @AuthMember Member member, + @PathVariable Long requestId + ) { + CouponIssueRequest request = couponFacade.getIssueRequest(requestId); + return ApiResponse.success(CouponDto.CouponIssueRequestResponse.from(request)); + } + @GetMapping("/api/v1/users/me/coupons") public ApiResponse> getMyCoupons( @AuthMember Member member diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponDto.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponDto.java index f6fc6b74b..69c148da4 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponDto.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponDto.java @@ -45,6 +45,28 @@ public static CouponResponse from(Coupon coupon) { } } + public record CouponIssueRequestResponse( + Long requestId, + Long couponId, + Long memberId, + String status, + String rejectReason, + ZonedDateTime createdAt, + ZonedDateTime completedAt + ) { + public static CouponIssueRequestResponse from(CouponIssueRequest request) { + return new CouponIssueRequestResponse( + request.getId(), + request.getCouponId(), + request.getMemberId(), + request.getStatus().name(), + request.getRejectReason(), + request.getCreatedAt(), + request.getCompletedAt() + ); + } + } + public record CouponIssueResponse( Long id, Long couponId, diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java new file mode 100644 index 000000000..0fa2b7e6a --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java @@ -0,0 +1,118 @@ +package com.loopers.interfaces.consumer; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.confg.kafka.KafkaConfig; +import jakarta.persistence.EntityManager; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.stereotype.Component; +import org.springframework.transaction.PlatformTransactionManager; +import org.springframework.transaction.support.TransactionTemplate; + +@Slf4j +@Component +@RequiredArgsConstructor +public class CouponIssueConsumer { + + private final EntityManager entityManager; + private final PlatformTransactionManager transactionManager; + private final ObjectMapper objectMapper; + + @KafkaListener( + topics = "coupon-issue-requests", + groupId = "coupon-issuer", + containerFactory = KafkaConfig.SINGLE_LISTENER + ) + public void consume(ConsumerRecord record, Acknowledgment ack) { + TransactionTemplate tx = new TransactionTemplate(transactionManager); + + try { + tx.executeWithoutResult(status -> processRecord(record)); + } catch (Exception e) { + log.error("CouponIssueConsumer 처리 실패 — offset={}", record.offset(), e); + } + + ack.acknowledge(); + } + + private void processRecord(ConsumerRecord record) { + try { + JsonNode payload = objectMapper.readTree(record.value()); + long requestId = payload.get("requestId").asLong(); + long couponId = payload.get("couponId").asLong(); + long memberId = payload.get("memberId").asLong(); + + String eventId = "coupon-issue-" + requestId; + + // INSERT-first 멱등 패턴 + int inserted = entityManager.createNativeQuery( + "INSERT IGNORE INTO event_handled (event_id, event_type, created_at) VALUES (:eventId, 'COUPON_ISSUE', NOW(6))" + ).setParameter("eventId", eventId) + .executeUpdate(); + + if (inserted == 0) { + log.debug("이미 처리된 쿠폰 발급 요청 — requestId={}", requestId); + return; + } + + // CAS UPDATE: issued_count 증가 (수량 확인) + int casResult = entityManager.createNativeQuery( + "UPDATE coupon SET issued_count = issued_count + 1 " + + "WHERE id = :couponId " + + "AND (max_issuance_count IS NULL OR issued_count < max_issuance_count) " + + "AND deleted_at IS NULL" + ).setParameter("couponId", couponId) + .executeUpdate(); + + if (casResult == 0) { + rejectRequest(requestId, "수량 소진"); + return; + } + + // coupon_issue INSERT (UNIQUE 제약으로 중복 방지) + try { + entityManager.createNativeQuery( + "INSERT INTO coupon_issue (coupon_id, member_id, status, expired_at, created_at) " + + "SELECT :couponId, :memberId, 'AVAILABLE', c.expired_at, NOW(6) " + + "FROM coupon c WHERE c.id = :couponId" + ).setParameter("couponId", couponId) + .setParameter("memberId", memberId) + .executeUpdate(); + } catch (Exception e) { + // UNIQUE 제약 위반 → 중복 발급 시도 + entityManager.createNativeQuery( + "UPDATE coupon SET issued_count = issued_count - 1 WHERE id = :couponId" + ).setParameter("couponId", couponId) + .executeUpdate(); + rejectRequest(requestId, "이미 발급된 쿠폰"); + return; + } + + // 성공 상태 업데이트 + entityManager.createNativeQuery( + "UPDATE coupon_issue_request SET status = 'COMPLETED', completed_at = NOW(6) " + + "WHERE id = :requestId" + ).setParameter("requestId", requestId) + .executeUpdate(); + + log.info("쿠폰 발급 완료 — requestId={}, couponId={}, memberId={}", requestId, couponId, memberId); + + } catch (Exception e) { + throw new RuntimeException("쿠폰 발급 처리 실패", e); + } + } + + private void rejectRequest(long requestId, String reason) { + entityManager.createNativeQuery( + "UPDATE coupon_issue_request SET status = 'REJECTED', reject_reason = :reason, completed_at = NOW(6) " + + "WHERE id = :requestId" + ).setParameter("requestId", requestId) + .setParameter("reason", reason) + .executeUpdate(); + log.info("쿠폰 발급 거절 — requestId={}, reason={}", requestId, reason); + } +} From dafedc138f93219102ca16ac06727147cc1ad7e1 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:43:00 +0900 Subject: [PATCH 05/14] =?UTF-8?q?feat:=20=EB=B0=B0=EC=B9=98=20=EC=A7=84?= =?UTF-8?q?=ED=99=94=20=E2=80=94=20MetricsReconcile=20+=20Outbox/EventHand?= =?UTF-8?q?led=20Cleanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - MetricsReconcileJobConfig: like_count + sales 정합성 보정 (LikeCountSync 대체) - OutboxCleanupJobConfig: 7일 이상 event_outbox 레코드 정리 - EventHandledCleanupJobConfig: 7일 이상 event_handled 레코드 정리 - LikeCountSyncJobConfig/Tasklet 삭제 (MetricsReconcile로 통합) --- .../EventHandledCleanupJobConfig.java | 49 +++++++++++++++++ .../step/EventHandledCleanupTasklet.java | 28 ++++++++++ .../step/LikeCountSyncTasklet.java | 38 ------------- .../MetricsReconcileJobConfig.java | 49 +++++++++++++++++ .../step/MetricsReconcileTasklet.java | 55 +++++++++++++++++++ .../OutboxCleanupJobConfig.java} | 26 ++++----- .../step/OutboxCleanupTasklet.java | 28 ++++++++++ 7 files changed, 222 insertions(+), 51 deletions(-) create mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/EventHandledCleanupJobConfig.java create mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/step/EventHandledCleanupTasklet.java delete mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/step/LikeCountSyncTasklet.java create mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/MetricsReconcileJobConfig.java create mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/step/MetricsReconcileTasklet.java rename apps/commerce-batch/src/main/java/com/loopers/batch/job/{likecountsync/LikeCountSyncJobConfig.java => outboxcleanup/OutboxCleanupJobConfig.java} (68%) create mode 100644 apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/step/OutboxCleanupTasklet.java diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/EventHandledCleanupJobConfig.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/EventHandledCleanupJobConfig.java new file mode 100644 index 000000000..a0a4b5b80 --- /dev/null +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/EventHandledCleanupJobConfig.java @@ -0,0 +1,49 @@ +package com.loopers.batch.job.eventhandledcleanup; + +import com.loopers.batch.job.eventhandledcleanup.step.EventHandledCleanupTasklet; +import com.loopers.batch.listener.JobListener; +import com.loopers.batch.listener.StepMonitorListener; +import lombok.RequiredArgsConstructor; +import org.springframework.batch.core.Job; +import org.springframework.batch.core.Step; +import org.springframework.batch.core.configuration.annotation.JobScope; +import org.springframework.batch.core.job.builder.JobBuilder; +import org.springframework.batch.core.launch.support.RunIdIncrementer; +import org.springframework.batch.core.repository.JobRepository; +import org.springframework.batch.core.step.builder.StepBuilder; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.transaction.PlatformTransactionManager; + +@ConditionalOnProperty(name = "spring.batch.job.name", havingValue = EventHandledCleanupJobConfig.JOB_NAME) +@RequiredArgsConstructor +@Configuration +public class EventHandledCleanupJobConfig { + public static final String JOB_NAME = "eventHandledCleanupJob"; + private static final String STEP_NAME = "eventHandledCleanupStep"; + + private final JobRepository jobRepository; + private final JobListener jobListener; + private final StepMonitorListener stepMonitorListener; + private final EventHandledCleanupTasklet eventHandledCleanupTasklet; + private final PlatformTransactionManager transactionManager; + + @Bean(JOB_NAME) + public Job eventHandledCleanupJob() { + return new JobBuilder(JOB_NAME, jobRepository) + .incrementer(new RunIdIncrementer()) + .start(eventHandledCleanupStep()) + .listener(jobListener) + .build(); + } + + @JobScope + @Bean(STEP_NAME) + public Step eventHandledCleanupStep() { + return new StepBuilder(STEP_NAME, jobRepository) + .tasklet(eventHandledCleanupTasklet, transactionManager) + .listener(stepMonitorListener) + .build(); + } +} diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/step/EventHandledCleanupTasklet.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/step/EventHandledCleanupTasklet.java new file mode 100644 index 000000000..2296e782c --- /dev/null +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/eventhandledcleanup/step/EventHandledCleanupTasklet.java @@ -0,0 +1,28 @@ +package com.loopers.batch.job.eventhandledcleanup.step; + +import jakarta.persistence.EntityManager; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.StepContribution; +import org.springframework.batch.core.scope.context.ChunkContext; +import org.springframework.batch.core.step.tasklet.Tasklet; +import org.springframework.batch.repeat.RepeatStatus; +import org.springframework.stereotype.Component; + +@Slf4j +@RequiredArgsConstructor +@Component +public class EventHandledCleanupTasklet implements Tasklet { + + private final EntityManager entityManager; + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + log.info("[EventHandledCleanup] event_handled 7일 이전 데이터 삭제 시작"); + int deleted = entityManager.createNativeQuery( + "DELETE FROM event_handled WHERE created_at < DATE_SUB(NOW(), INTERVAL 7 DAY)" + ).executeUpdate(); + log.info("[EventHandledCleanup] 삭제 완료 — 삭제 행 수: {}", deleted); + return RepeatStatus.FINISHED; + } +} diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/step/LikeCountSyncTasklet.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/step/LikeCountSyncTasklet.java deleted file mode 100644 index 611b361a0..000000000 --- a/apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/step/LikeCountSyncTasklet.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.loopers.batch.job.likecountsync.step; - -import jakarta.persistence.EntityManager; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.StepContribution; -import org.springframework.batch.core.scope.context.ChunkContext; -import org.springframework.batch.core.step.tasklet.Tasklet; -import org.springframework.batch.repeat.RepeatStatus; -import org.springframework.stereotype.Component; - -@Slf4j -@RequiredArgsConstructor -@Component -public class LikeCountSyncTasklet implements Tasklet { - - private final EntityManager entityManager; - - @Override - public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { - log.info("[LikeCountSync] 1단계: likes 테이블 → product_like_stats 동기화 시작"); - int synced = entityManager.createNativeQuery( - "REPLACE INTO product_like_stats (product_id, like_count, synced_at) " - + "SELECT l.product_id, COUNT(*), NOW() FROM likes l GROUP BY l.product_id" - ).executeUpdate(); - log.info("[LikeCountSync] 1단계 완료 — 동기화 행 수: {}", synced); - - log.info("[LikeCountSync] 2단계: product.like_count 드리프트 보정 시작"); - int corrected = entityManager.createNativeQuery( - "UPDATE product p JOIN product_like_stats pls ON p.id = pls.product_id " - + "SET p.like_count = pls.like_count " - + "WHERE p.like_count != pls.like_count AND p.deleted_at IS NULL" - ).executeUpdate(); - log.info("[LikeCountSync] 2단계 완료 — 보정된 상품 수: {}", corrected); - - return RepeatStatus.FINISHED; - } -} diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/MetricsReconcileJobConfig.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/MetricsReconcileJobConfig.java new file mode 100644 index 000000000..5d2af2c2e --- /dev/null +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/MetricsReconcileJobConfig.java @@ -0,0 +1,49 @@ +package com.loopers.batch.job.metricsreconcile; + +import com.loopers.batch.job.metricsreconcile.step.MetricsReconcileTasklet; +import com.loopers.batch.listener.JobListener; +import com.loopers.batch.listener.StepMonitorListener; +import lombok.RequiredArgsConstructor; +import org.springframework.batch.core.Job; +import org.springframework.batch.core.Step; +import org.springframework.batch.core.configuration.annotation.JobScope; +import org.springframework.batch.core.job.builder.JobBuilder; +import org.springframework.batch.core.launch.support.RunIdIncrementer; +import org.springframework.batch.core.repository.JobRepository; +import org.springframework.batch.core.step.builder.StepBuilder; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.transaction.PlatformTransactionManager; + +@ConditionalOnProperty(name = "spring.batch.job.name", havingValue = MetricsReconcileJobConfig.JOB_NAME) +@RequiredArgsConstructor +@Configuration +public class MetricsReconcileJobConfig { + public static final String JOB_NAME = "metricsReconcileJob"; + private static final String STEP_NAME = "metricsReconcileStep"; + + private final JobRepository jobRepository; + private final JobListener jobListener; + private final StepMonitorListener stepMonitorListener; + private final MetricsReconcileTasklet metricsReconcileTasklet; + private final PlatformTransactionManager transactionManager; + + @Bean(JOB_NAME) + public Job metricsReconcileJob() { + return new JobBuilder(JOB_NAME, jobRepository) + .incrementer(new RunIdIncrementer()) + .start(metricsReconcileStep()) + .listener(jobListener) + .build(); + } + + @JobScope + @Bean(STEP_NAME) + public Step metricsReconcileStep() { + return new StepBuilder(STEP_NAME, jobRepository) + .tasklet(metricsReconcileTasklet, transactionManager) + .listener(stepMonitorListener) + .build(); + } +} diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/step/MetricsReconcileTasklet.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/step/MetricsReconcileTasklet.java new file mode 100644 index 000000000..2336f3899 --- /dev/null +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/metricsreconcile/step/MetricsReconcileTasklet.java @@ -0,0 +1,55 @@ +package com.loopers.batch.job.metricsreconcile.step; + +import jakarta.persistence.EntityManager; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.StepContribution; +import org.springframework.batch.core.scope.context.ChunkContext; +import org.springframework.batch.core.step.tasklet.Tasklet; +import org.springframework.batch.repeat.RepeatStatus; +import org.springframework.stereotype.Component; + +@Slf4j +@RequiredArgsConstructor +@Component +public class MetricsReconcileTasklet implements Tasklet { + + private final EntityManager entityManager; + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + // 1단계: likes 테이블 기준 → product_metrics.like_count 보정 + log.info("[MetricsReconcile] 1단계: like_count 대사 시작"); + int likeCorrected = entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, view_count, sales_count, sales_amount, updated_at) " + + "SELECT l.product_id, COUNT(*), 0, 0, 0, NOW(6) FROM likes l GROUP BY l.product_id " + + "ON DUPLICATE KEY UPDATE like_count = VALUES(like_count), updated_at = NOW(6)" + ).executeUpdate(); + log.info("[MetricsReconcile] 1단계 완료 — 대사 행 수: {}", likeCorrected); + + // 2단계: product_metrics.like_count → Product.like_count 비정규화 보정 + log.info("[MetricsReconcile] 2단계: Product.like_count 드리프트 보정 시작"); + int productCorrected = entityManager.createNativeQuery( + "UPDATE product p JOIN product_metrics pm ON p.id = pm.product_id " + + "SET p.like_count = pm.like_count " + + "WHERE p.like_count != pm.like_count AND p.deleted_at IS NULL" + ).executeUpdate(); + log.info("[MetricsReconcile] 2단계 완료 — 보정된 상품 수: {}", productCorrected); + + // 3단계: order_items 기준 → product_metrics.sales_count/sales_amount 보정 + log.info("[MetricsReconcile] 3단계: sales_count/sales_amount 대사 시작"); + int salesCorrected = entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, view_count, sales_count, sales_amount, updated_at) " + + "SELECT oi.product_id, 0, 0, SUM(oi.quantity), SUM(oi.product_price * oi.quantity), NOW(6) " + + "FROM order_item oi JOIN orders o ON oi.order_id = o.id " + + "WHERE o.status != 'CANCELLED' AND o.deleted_at IS NULL " + + "GROUP BY oi.product_id " + + "ON DUPLICATE KEY UPDATE " + + "sales_count = VALUES(sales_count), sales_amount = VALUES(sales_amount), " + + "updated_at = NOW(6)" + ).executeUpdate(); + log.info("[MetricsReconcile] 3단계 완료 — 대사 행 수: {}", salesCorrected); + + return RepeatStatus.FINISHED; + } +} diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/LikeCountSyncJobConfig.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/OutboxCleanupJobConfig.java similarity index 68% rename from apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/LikeCountSyncJobConfig.java rename to apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/OutboxCleanupJobConfig.java index 3e5164367..fcdd0bb6e 100644 --- a/apps/commerce-batch/src/main/java/com/loopers/batch/job/likecountsync/LikeCountSyncJobConfig.java +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/OutboxCleanupJobConfig.java @@ -1,6 +1,6 @@ -package com.loopers.batch.job.likecountsync; +package com.loopers.batch.job.outboxcleanup; -import com.loopers.batch.job.likecountsync.step.LikeCountSyncTasklet; +import com.loopers.batch.job.outboxcleanup.step.OutboxCleanupTasklet; import com.loopers.batch.listener.JobListener; import com.loopers.batch.listener.StepMonitorListener; import lombok.RequiredArgsConstructor; @@ -16,33 +16,33 @@ import org.springframework.context.annotation.Configuration; import org.springframework.transaction.PlatformTransactionManager; -@ConditionalOnProperty(name = "spring.batch.job.name", havingValue = LikeCountSyncJobConfig.JOB_NAME) +@ConditionalOnProperty(name = "spring.batch.job.name", havingValue = OutboxCleanupJobConfig.JOB_NAME) @RequiredArgsConstructor @Configuration -public class LikeCountSyncJobConfig { - public static final String JOB_NAME = "likeCountSyncJob"; - private static final String STEP_SYNC_NAME = "likeCountSyncStep"; +public class OutboxCleanupJobConfig { + public static final String JOB_NAME = "outboxCleanupJob"; + private static final String STEP_NAME = "outboxCleanupStep"; private final JobRepository jobRepository; private final JobListener jobListener; private final StepMonitorListener stepMonitorListener; - private final LikeCountSyncTasklet likeCountSyncTasklet; + private final OutboxCleanupTasklet outboxCleanupTasklet; private final PlatformTransactionManager transactionManager; @Bean(JOB_NAME) - public Job likeCountSyncJob() { + public Job outboxCleanupJob() { return new JobBuilder(JOB_NAME, jobRepository) .incrementer(new RunIdIncrementer()) - .start(likeCountSyncStep()) + .start(outboxCleanupStep()) .listener(jobListener) .build(); } @JobScope - @Bean(STEP_SYNC_NAME) - public Step likeCountSyncStep() { - return new StepBuilder(STEP_SYNC_NAME, jobRepository) - .tasklet(likeCountSyncTasklet, transactionManager) + @Bean(STEP_NAME) + public Step outboxCleanupStep() { + return new StepBuilder(STEP_NAME, jobRepository) + .tasklet(outboxCleanupTasklet, transactionManager) .listener(stepMonitorListener) .build(); } diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/step/OutboxCleanupTasklet.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/step/OutboxCleanupTasklet.java new file mode 100644 index 000000000..d2e92e4f2 --- /dev/null +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/outboxcleanup/step/OutboxCleanupTasklet.java @@ -0,0 +1,28 @@ +package com.loopers.batch.job.outboxcleanup.step; + +import jakarta.persistence.EntityManager; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.StepContribution; +import org.springframework.batch.core.scope.context.ChunkContext; +import org.springframework.batch.core.step.tasklet.Tasklet; +import org.springframework.batch.repeat.RepeatStatus; +import org.springframework.stereotype.Component; + +@Slf4j +@RequiredArgsConstructor +@Component +public class OutboxCleanupTasklet implements Tasklet { + + private final EntityManager entityManager; + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + log.info("[OutboxCleanup] event_outbox 1시간 이전 데이터 삭제 시작"); + int deleted = entityManager.createNativeQuery( + "DELETE FROM event_outbox WHERE created_at < DATE_SUB(NOW(), INTERVAL 1 HOUR)" + ).executeUpdate(); + log.info("[OutboxCleanup] 삭제 완료 — 삭제 행 수: {}", deleted); + return RepeatStatus.FINISHED; + } +} From 10c99ed99678fa334605136c0bcd3eb743ba351d Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:43:34 +0900 Subject: [PATCH 06/14] =?UTF-8?q?refactor:=20=EB=AF=B8=EC=82=AC=EC=9A=A9?= =?UTF-8?q?=20=EC=BD=94=EB=93=9C=20=EC=A0=9C=EA=B1=B0=20+=20=ED=85=8C?= =?UTF-8?q?=EC=8A=A4=ED=8A=B8=20=EC=9E=91=EC=84=B1=20=EB=B0=8F=20=EC=88=98?= =?UTF-8?q?=EC=A0=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 미사용 코드 제거: - ProductLikeStats/Repository: 이벤트 기반으로 대체됨 - DemoKafkaConsumer: MetricsConsumer/CouponIssueConsumer로 대체 신규 테스트: - LikeCountEventListenerTest, CacheEvictionEventListenerTest - ProductViewKafkaPublisherTest - CouponIssueConcurrencyTest: CAS UPDATE 동시성 검증 기존 테스트 수정: - Facade 테스트: 이벤트 발행 의존성 반영 - LikeConcurrencyTest: async listener 대기 로직 추가 - SlidingWindowRateLimiterTest: 타이밍 마진 확대 (flaky 방지) - PaymentE2ETest: 테스트 데이터 사전 생성 - CommerceBatchApplicationTest: job.enabled=false - Batch E2E 테스트: schema-batch-test.sql 추가 --- .../domain/product/ProductLikeStats.java | 39 ----- .../product/ProductLikeStatsRepository.java | 11 -- .../ProductLikeStatsJpaRepository.java | 19 --- .../ProductLikeStatsRepositoryImpl.java | 40 ----- .../application/coupon/CouponFacadeTest.java | 98 ++++++++++- .../event/ProductViewKafkaPublisherTest.java | 48 ++++++ .../application/like/LikeFacadeTest.java | 101 +++++++++++- .../application/order/OrderFacadeTest.java | 16 +- .../product/ProductFacadeTest.java | 2 +- .../CouponIssueConcurrencyTest.java | 152 ++++++++++++++++++ .../concurrency/LikeConcurrencyTest.java | 33 +++- .../FakeCouponIssueRequestRepository.java | 39 +++++ .../SlidingWindowRateLimiterTest.java | 6 +- .../api/payment/PaymentE2ETest.java | 41 ++++- .../CacheEvictionEventListenerTest.java | 91 +++++++++++ .../listener/LikeCountEventListenerTest.java | 105 ++++++++++++ .../loopers/CommerceBatchApplicationTest.java | 2 + .../CouponReconciliationJobE2ETest.java | 2 + .../payment/PaymentRecoveryJobE2ETest.java | 2 + .../src/test/resources/schema-batch-test.sql | 100 ++++++++++++ .../consumer/DemoKafkaConsumer.java | 24 --- 21 files changed, 812 insertions(+), 159 deletions(-) delete mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStats.java delete mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStatsRepository.java delete mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsJpaRepository.java delete mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsRepositoryImpl.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/application/event/ProductViewKafkaPublisherTest.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/concurrency/CouponIssueConcurrencyTest.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/fake/FakeCouponIssueRequestRepository.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/interfaces/listener/CacheEvictionEventListenerTest.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/interfaces/listener/LikeCountEventListenerTest.java create mode 100644 apps/commerce-batch/src/test/resources/schema-batch-test.sql delete mode 100644 apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStats.java b/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStats.java deleted file mode 100644 index 1f6922f07..000000000 --- a/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStats.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.loopers.domain.product; - -import jakarta.persistence.Column; -import jakarta.persistence.Entity; -import jakarta.persistence.Id; -import jakarta.persistence.Table; -import lombok.AccessLevel; -import lombok.Getter; -import lombok.NoArgsConstructor; - -import java.time.ZonedDateTime; - -@Entity -@Table(name = "product_like_stats") -@Getter -@NoArgsConstructor(access = AccessLevel.PROTECTED) -public class ProductLikeStats { - - @Id - @Column(name = "product_id") - private Long productId; - - @Column(name = "like_count", nullable = false) - private int likeCount; - - @Column(name = "synced_at", nullable = false) - private ZonedDateTime syncedAt; - - public ProductLikeStats(Long productId, int likeCount) { - this.productId = productId; - this.likeCount = likeCount; - this.syncedAt = ZonedDateTime.now(); - } - - public void updateCount(int likeCount) { - this.likeCount = likeCount; - this.syncedAt = ZonedDateTime.now(); - } -} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStatsRepository.java b/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStatsRepository.java deleted file mode 100644 index 1e47288d7..000000000 --- a/apps/commerce-api/src/main/java/com/loopers/domain/product/ProductLikeStatsRepository.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.loopers.domain.product; - -import java.util.List; - -public interface ProductLikeStatsRepository { - ProductLikeStats save(ProductLikeStats stats); - List saveAll(List statsList); - List findAll(); - void syncAllFromLikes(); - int correctProductLikeCounts(); -} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsJpaRepository.java deleted file mode 100644 index c9fa84136..000000000 --- a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsJpaRepository.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.loopers.infrastructure.product; - -import com.loopers.domain.product.ProductLikeStats; -import org.springframework.data.jpa.repository.JpaRepository; -import org.springframework.data.jpa.repository.Modifying; -import org.springframework.data.jpa.repository.Query; - -public interface ProductLikeStatsJpaRepository extends JpaRepository { - - @Modifying - @Query(value = "REPLACE INTO product_like_stats (product_id, like_count, synced_at) " - + "SELECT l.product_id, COUNT(*), NOW() FROM likes l GROUP BY l.product_id", nativeQuery = true) - void syncAllFromLikes(); - - @Modifying - @Query(value = "UPDATE product p JOIN product_like_stats pls ON p.id = pls.product_id " - + "SET p.like_count = pls.like_count WHERE p.like_count != pls.like_count AND p.deleted_at IS NULL", nativeQuery = true) - int correctProductLikeCounts(); -} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsRepositoryImpl.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsRepositoryImpl.java deleted file mode 100644 index 557bf9ef4..000000000 --- a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductLikeStatsRepositoryImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.loopers.infrastructure.product; - -import com.loopers.domain.product.ProductLikeStats; -import com.loopers.domain.product.ProductLikeStatsRepository; -import lombok.RequiredArgsConstructor; -import org.springframework.stereotype.Repository; - -import java.util.List; - -@Repository -@RequiredArgsConstructor -public class ProductLikeStatsRepositoryImpl implements ProductLikeStatsRepository { - - private final ProductLikeStatsJpaRepository productLikeStatsJpaRepository; - - @Override - public ProductLikeStats save(ProductLikeStats stats) { - return productLikeStatsJpaRepository.save(stats); - } - - @Override - public List saveAll(List statsList) { - return productLikeStatsJpaRepository.saveAll(statsList); - } - - @Override - public List findAll() { - return productLikeStatsJpaRepository.findAll(); - } - - @Override - public void syncAllFromLikes() { - productLikeStatsJpaRepository.syncAllFromLikes(); - } - - @Override - public int correctProductLikeCounts() { - return productLikeStatsJpaRepository.correctProductLikeCounts(); - } -} diff --git a/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeTest.java index beeaa6768..ab589b9c8 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeTest.java @@ -1,7 +1,9 @@ package com.loopers.application.coupon; +import com.fasterxml.jackson.databind.ObjectMapper; import com.loopers.domain.coupon.*; import com.loopers.fake.FakeCouponIssueRepository; +import com.loopers.fake.FakeCouponIssueRequestRepository; import com.loopers.fake.FakeCouponRepository; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; @@ -9,6 +11,7 @@ import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.time.Clock; import java.time.ZonedDateTime; @@ -16,19 +19,29 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; class CouponFacadeTest { private CouponFacade couponFacade; private FakeCouponRepository couponRepository; private FakeCouponIssueRepository couponIssueRepository; + private FakeCouponIssueRequestRepository issueRequestRepository; + private KafkaTemplate kafkaTemplate; private final Clock clock = Clock.systemDefaultZone(); + @SuppressWarnings("unchecked") @BeforeEach void setUp() { couponRepository = new FakeCouponRepository(); couponIssueRepository = new FakeCouponIssueRepository(); - couponFacade = new CouponFacade(couponRepository, couponIssueRepository, clock); + issueRequestRepository = new FakeCouponIssueRequestRepository(); + kafkaTemplate = mock(KafkaTemplate.class); + couponFacade = new CouponFacade(couponRepository, couponIssueRepository, + issueRequestRepository, kafkaTemplate, new ObjectMapper(), clock); } @Nested @@ -257,4 +270,87 @@ void applyCouponToOrder_withOtherMember_throwsException() { .isEqualTo(ErrorType.FORBIDDEN); } } + + @Nested + @DisplayName("선착순 쿠폰 발급 요청") + class RequestCouponIssue { + + @DisplayName("유효한 쿠폰에 발급 요청하면 PENDING 상태의 CouponIssueRequest가 생성된다") + @Test + void requestCouponIssue_createsPendingRequest() { + Coupon coupon = couponFacade.createCoupon( + "선착순 할인", DiscountType.FIXED, 5000, 0, + ZonedDateTime.now().plusDays(30)); + + CouponIssueRequest result = couponFacade.requestCouponIssue(coupon.getId(), 1L); + + assertThat(result.getId()).isNotNull(); + assertThat(result.getCouponId()).isEqualTo(coupon.getId()); + assertThat(result.getMemberId()).isEqualTo(1L); + assertThat(result.getStatus()).isEqualTo(CouponIssueRequestStatus.PENDING); + } + + @DisplayName("발급 요청 시 Kafka에 메시지가 발행된다") + @Test + void requestCouponIssue_sendsKafkaMessage() { + Coupon coupon = couponFacade.createCoupon( + "선착순 할인", DiscountType.FIXED, 5000, 0, + ZonedDateTime.now().plusDays(30)); + + couponFacade.requestCouponIssue(coupon.getId(), 1L); + + verify(kafkaTemplate).send(eq("coupon-issue-requests"), + eq(String.valueOf(coupon.getId())), anyString()); + } + + @DisplayName("만료된 쿠폰은 발급 요청할 수 없다") + @Test + void requestCouponIssue_whenExpired_throwsException() { + Coupon coupon = couponFacade.createCoupon( + "할인", DiscountType.FIXED, 5000, 0, + ZonedDateTime.now().minusDays(1)); + + assertThatThrownBy(() -> couponFacade.requestCouponIssue(coupon.getId(), 1L)) + .isInstanceOf(CoreException.class) + .extracting(e -> ((CoreException) e).getErrorType()) + .isEqualTo(ErrorType.BAD_REQUEST); + } + + @DisplayName("존재하지 않는 쿠폰은 발급 요청할 수 없다") + @Test + void requestCouponIssue_whenNotExists_throwsException() { + assertThatThrownBy(() -> couponFacade.requestCouponIssue(999L, 1L)) + .isInstanceOf(CoreException.class) + .extracting(e -> ((CoreException) e).getErrorType()) + .isEqualTo(ErrorType.NOT_FOUND); + } + } + + @Nested + @DisplayName("발급 요청 상태 조회") + class GetIssueRequest { + + @DisplayName("저장된 발급 요청을 조회하면 반환된다") + @Test + void getIssueRequest_whenExists_returnsRequest() { + Coupon coupon = couponFacade.createCoupon( + "선착순 할인", DiscountType.FIXED, 5000, 0, + ZonedDateTime.now().plusDays(30)); + CouponIssueRequest saved = couponFacade.requestCouponIssue(coupon.getId(), 1L); + + CouponIssueRequest result = couponFacade.getIssueRequest(saved.getId()); + + assertThat(result.getId()).isEqualTo(saved.getId()); + assertThat(result.getStatus()).isEqualTo(CouponIssueRequestStatus.PENDING); + } + + @DisplayName("존재하지 않는 발급 요청을 조회하면 예외가 발생한다") + @Test + void getIssueRequest_whenNotExists_throwsException() { + assertThatThrownBy(() -> couponFacade.getIssueRequest(999L)) + .isInstanceOf(CoreException.class) + .extracting(e -> ((CoreException) e).getErrorType()) + .isEqualTo(ErrorType.NOT_FOUND); + } + } } diff --git a/apps/commerce-api/src/test/java/com/loopers/application/event/ProductViewKafkaPublisherTest.java b/apps/commerce-api/src/test/java/com/loopers/application/event/ProductViewKafkaPublisherTest.java new file mode 100644 index 000000000..8356c07b0 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/application/event/ProductViewKafkaPublisherTest.java @@ -0,0 +1,48 @@ +package com.loopers.application.event; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.ProductViewedEvent; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +class ProductViewKafkaPublisherTest { + + private ProductViewKafkaPublisher publisher; + private KafkaTemplate kafkaTemplate; + + @SuppressWarnings("unchecked") + @BeforeEach + void setUp() { + kafkaTemplate = mock(KafkaTemplate.class); + publisher = new ProductViewKafkaPublisher(kafkaTemplate, new ObjectMapper()); + } + + @Nested + @DisplayName("ProductViewedEvent 처리") + class HandleProductViewed { + + @DisplayName("catalog-events 토픽으로 Kafka 메시지를 발행한다") + @Test + void sendsKafkaMessage() { + publisher.handle(new ProductViewedEvent(100L, 1L)); + + verify(kafkaTemplate).send(eq("catalog-events"), eq("100"), anyString()); + } + + @DisplayName("productId를 Kafka 메시지 키로 사용한다") + @Test + void usesProductIdAsKey() { + publisher.handle(new ProductViewedEvent(42L, 5L)); + + verify(kafkaTemplate).send(eq("catalog-events"), eq("42"), anyString()); + } + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java index 7cb743953..69095d1f4 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java @@ -1,5 +1,10 @@ package com.loopers.application.like; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; import com.loopers.domain.like.Like; import com.loopers.domain.product.Product; import com.loopers.domain.product.vo.Price; @@ -12,7 +17,9 @@ import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.springframework.context.ApplicationEventPublisher; +import java.util.ArrayList; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; @@ -23,12 +30,22 @@ class LikeFacadeTest { private LikeFacade likeFacade; private FakeLikeRepository likeRepository; private FakeProductRepository productRepository; + private List savedOutboxes; + private List publishedEvents; @BeforeEach void setUp() { likeRepository = new FakeLikeRepository(); productRepository = new FakeProductRepository(); - likeFacade = new LikeFacade(likeRepository, productRepository); + savedOutboxes = new ArrayList<>(); + publishedEvents = new ArrayList<>(); + EventOutboxRepository eventOutboxRepository = outbox -> { + savedOutboxes.add(outbox); + return outbox; + }; + ApplicationEventPublisher eventPublisher = publishedEvents::add; + likeFacade = new LikeFacade(likeRepository, productRepository, + eventOutboxRepository, eventPublisher, new ObjectMapper()); } @Nested @@ -46,7 +63,7 @@ void addLike_savesLikeRecord_andIncrementsLikeCount() { assertThat(likeRepository.existsByMemberIdAndProductId(memberId, product.getId())).isTrue(); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(1); - assertThat(product.getLikeCount()).isEqualTo(1); + // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("이미 좋아요한 상품에 다시 좋아요하면 멱등하게 처리된다 (likeCount 불변)") @@ -61,7 +78,7 @@ void addLike_whenAlreadyLiked_isIdempotent() { assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(1); assertThat(likeRepository.findAllByMemberId(memberId)).hasSize(1); - assertThat(product.getLikeCount()).isEqualTo(1); + // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("존재하지 않는 상품에 좋아요하면 예외가 발생한다") @@ -84,7 +101,7 @@ void addLike_byMultipleMembers_accumulatesCount() { likeFacade.addLike(3L, product.getId()); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(3); - assertThat(product.getLikeCount()).isEqualTo(3); + // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } } @@ -104,7 +121,7 @@ void removeLike_deletesLikeRecord_andDecrementsLikeCount() { assertThat(likeRepository.existsByMemberIdAndProductId(memberId, product.getId())).isFalse(); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(0); - assertThat(product.getLikeCount()).isEqualTo(0); + // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("좋아요하지 않은 상품의 좋아요를 취소해도 예외 없이 멱등하게 처리된다") @@ -116,7 +133,79 @@ void removeLike_whenNotLiked_isIdempotent() { likeFacade.removeLike(1L, product.getId()); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(0); - assertThat(product.getLikeCount()).isEqualTo(0); + // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) + } + } + + @Nested + @DisplayName("Outbox + 이벤트 발행 검증") + class OutboxAndEvent { + + @DisplayName("좋아요 추가 시 EventOutbox가 저장되고 LikeCreatedEvent가 발행된다") + @Test + void addLike_savesOutboxAndPublishesEvent() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + + likeFacade.addLike(1L, product.getId()); + + assertThat(savedOutboxes).hasSize(1); + EventOutbox outbox = savedOutboxes.get(0); + assertThat(outbox.getAggregateType()).isEqualTo("catalog"); + assertThat(outbox.getAggregateId()).isEqualTo(String.valueOf(product.getId())); + assertThat(outbox.getEventType()).isEqualTo("LIKE_CREATED"); + + assertThat(publishedEvents).hasSize(1); + assertThat(publishedEvents.get(0)).isInstanceOf(LikeCreatedEvent.class); + LikeCreatedEvent event = (LikeCreatedEvent) publishedEvents.get(0); + assertThat(event.productId()).isEqualTo(product.getId()); + assertThat(event.memberId()).isEqualTo(1L); + } + + @DisplayName("좋아요 취소 시 EventOutbox가 저장되고 LikeRemovedEvent가 발행된다") + @Test + void removeLike_savesOutboxAndPublishesEvent() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + likeFacade.addLike(1L, product.getId()); + savedOutboxes.clear(); + publishedEvents.clear(); + + likeFacade.removeLike(1L, product.getId()); + + assertThat(savedOutboxes).hasSize(1); + EventOutbox outbox = savedOutboxes.get(0); + assertThat(outbox.getEventType()).isEqualTo("LIKE_REMOVED"); + + assertThat(publishedEvents).hasSize(1); + assertThat(publishedEvents.get(0)).isInstanceOf(LikeRemovedEvent.class); + } + + @DisplayName("이미 좋아요한 상품에 다시 좋아요하면 Outbox와 이벤트가 발행되지 않는다") + @Test + void addLike_whenIdempotent_noOutboxOrEvent() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + likeFacade.addLike(1L, product.getId()); + savedOutboxes.clear(); + publishedEvents.clear(); + + likeFacade.addLike(1L, product.getId()); + + assertThat(savedOutboxes).isEmpty(); + assertThat(publishedEvents).isEmpty(); + } + + @DisplayName("좋아요하지 않은 상품을 취소하면 Outbox와 이벤트가 발행되지 않는다") + @Test + void removeLike_whenNotLiked_noOutboxOrEvent() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + + likeFacade.removeLike(1L, product.getId()); + + assertThat(savedOutboxes).isEmpty(); + assertThat(publishedEvents).isEmpty(); } } diff --git a/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java index ee6839f6b..54fc96fcd 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java @@ -1,8 +1,10 @@ package com.loopers.application.order; +import com.fasterxml.jackson.databind.ObjectMapper; import com.loopers.application.coupon.CouponFacade; import com.loopers.domain.brand.Brand; import com.loopers.domain.coupon.*; +import com.loopers.domain.event.EventOutboxRepository; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderItem; import com.loopers.domain.order.OrderStatus; @@ -16,13 +18,16 @@ import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; class OrderFacadeTest { @@ -34,6 +39,7 @@ class OrderFacadeTest { private FakeCouponIssueRepository couponIssueRepository; private CouponFacade couponFacade; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { orderRepository = new FakeOrderRepository(); @@ -41,10 +47,16 @@ void setUp() { brandRepository = new FakeBrandRepository(); couponRepository = new FakeCouponRepository(); couponIssueRepository = new FakeCouponIssueRepository(); + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + KafkaTemplate kafkaTemplate = mock(KafkaTemplate.class); couponFacade = new CouponFacade(couponRepository, couponIssueRepository, - Clock.systemDefaultZone()); + issueRequestRepository, kafkaTemplate, new ObjectMapper(), Clock.systemDefaultZone()); + EventOutboxRepository eventOutboxRepository = outbox -> outbox; orderFacade = new OrderFacade(orderRepository, productRepository, brandRepository, - couponFacade); + couponFacade, eventOutboxRepository, event -> {}, new ObjectMapper()); } @Nested diff --git a/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java index 591ea7d2d..1d0d57c77 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java @@ -38,7 +38,7 @@ void setUp() { brandRepository = new FakeBrandRepository(); likeRepository = new FakeLikeRepository(); productRepository.setBrandRepository(brandRepository); - productFacade = new ProductFacade(productRepository, brandRepository, likeRepository, new FakeProductCachePort()); + productFacade = new ProductFacade(productRepository, brandRepository, likeRepository, new FakeProductCachePort(), event -> {}); } @Nested diff --git a/apps/commerce-api/src/test/java/com/loopers/concurrency/CouponIssueConcurrencyTest.java b/apps/commerce-api/src/test/java/com/loopers/concurrency/CouponIssueConcurrencyTest.java new file mode 100644 index 000000000..3f8cf5234 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/concurrency/CouponIssueConcurrencyTest.java @@ -0,0 +1,152 @@ +package com.loopers.concurrency; + +import com.loopers.domain.coupon.Coupon; +import com.loopers.domain.coupon.CouponRepository; +import com.loopers.domain.coupon.DiscountType; +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.jdbc.core.JdbcTemplate; + +import java.time.ZonedDateTime; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +@SpringBootTest +class CouponIssueConcurrencyTest { + + @Autowired + private CouponRepository couponRepository; + + @Autowired + private JdbcTemplate jdbcTemplate; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @DisplayName("선착순 쿠폰: maxIssuanceCount보다 많은 동시 요청이 와도 수량 초과 발급이 발생하지 않는다") + @Test + void concurrentCouponIssue_doesNotExceedMaxIssuanceCount() throws InterruptedException { + // arrange + int maxIssuance = 100; + int threadCount = 200; + + Coupon coupon = couponRepository.save( + new Coupon("선착순 할인", DiscountType.FIXED, 5000, 0, ZonedDateTime.now().plusDays(30))); + Long couponId = coupon.getId(); + + // maxIssuanceCount 설정 (Entity에 setter 없으므로 native SQL) + jdbcTemplate.update("UPDATE coupon SET max_issuance_count = ? WHERE id = ?", + maxIssuance, couponId); + + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch latch = new CountDownLatch(threadCount); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger failCount = new AtomicInteger(0); + + // act: CouponIssueConsumer의 CAS UPDATE를 동시에 실행 + for (int i = 0; i < threadCount; i++) { + long memberId = i + 1; + executor.submit(() -> { + try { + // CouponIssueConsumer와 동일한 CAS UPDATE + int casResult = jdbcTemplate.update( + "UPDATE coupon SET issued_count = issued_count + 1 " + + "WHERE id = ? " + + "AND (max_issuance_count IS NULL OR issued_count < max_issuance_count) " + + "AND deleted_at IS NULL", + couponId + ); + + if (casResult > 0) { + jdbcTemplate.update( + "INSERT INTO coupon_issue (coupon_id, member_id, status, expired_at, created_at) " + + "SELECT ?, ?, 'AVAILABLE', c.expired_at, NOW(6) " + + "FROM coupon c WHERE c.id = ?", + couponId, memberId, couponId + ); + successCount.incrementAndGet(); + } else { + failCount.incrementAndGet(); + } + } catch (Exception e) { + failCount.incrementAndGet(); + } finally { + latch.countDown(); + } + }); + } + latch.await(); + executor.shutdown(); + + // assert + Integer issuedCount = jdbcTemplate.queryForObject( + "SELECT issued_count FROM coupon WHERE id = ?", Integer.class, couponId); + Integer couponIssueCount = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM coupon_issue WHERE coupon_id = ?", Integer.class, couponId); + + assertThat(successCount.get()).isEqualTo(maxIssuance); + assertThat(failCount.get()).isEqualTo(threadCount - maxIssuance); + assertThat(issuedCount).isEqualTo(maxIssuance); + assertThat(couponIssueCount).isEqualTo(maxIssuance); + } + + @DisplayName("선착순 쿠폰: maxIssuanceCount가 없으면 제한 없이 발급된다") + @Test + void concurrentCouponIssue_withoutLimit_allSucceed() throws InterruptedException { + // arrange + int threadCount = 100; + + Coupon coupon = couponRepository.save( + new Coupon("무제한 할인", DiscountType.FIXED, 5000, 0, ZonedDateTime.now().plusDays(30))); + Long couponId = coupon.getId(); + // maxIssuanceCount는 null (기본값) + + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch latch = new CountDownLatch(threadCount); + AtomicInteger successCount = new AtomicInteger(0); + + // act + for (int i = 0; i < threadCount; i++) { + long memberId = i + 1; + executor.submit(() -> { + try { + int casResult = jdbcTemplate.update( + "UPDATE coupon SET issued_count = issued_count + 1 " + + "WHERE id = ? " + + "AND (max_issuance_count IS NULL OR issued_count < max_issuance_count) " + + "AND deleted_at IS NULL", + couponId + ); + if (casResult > 0) { + successCount.incrementAndGet(); + } + } catch (Exception ignored) { + } finally { + latch.countDown(); + } + }); + } + latch.await(); + executor.shutdown(); + + // assert: 제한 없으므로 모두 성공 + Integer issuedCount = jdbcTemplate.queryForObject( + "SELECT issued_count FROM coupon WHERE id = ?", Integer.class, couponId); + + assertThat(successCount.get()).isEqualTo(threadCount); + assertThat(issuedCount).isEqualTo(threadCount); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/concurrency/LikeConcurrencyTest.java b/apps/commerce-api/src/test/java/com/loopers/concurrency/LikeConcurrencyTest.java index 751ce4aaa..8cb97ef55 100644 --- a/apps/commerce-api/src/test/java/com/loopers/concurrency/LikeConcurrencyTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/concurrency/LikeConcurrencyTest.java @@ -76,12 +76,13 @@ void concurrentLikes_allSucceed_andCountIsCorrect() throws InterruptedException latch.await(); executor.shutdown(); - // assert — Like 레코드 수와 Product.likeCount가 일치해야 한다 + // assert — Like 레코드는 즉시 확인, likeCount는 비동기 리스너 완료 대기 long actualLikeRecords = likeRepository.countByProductId(productId); - Product updatedProduct = productRepository.findById(productId).orElseThrow(); assertThat(successCount.get()).isEqualTo(threadCount); assertThat(actualLikeRecords).isEqualTo(threadCount); - assertThat(updatedProduct.getLikeCount()).isEqualTo(threadCount); + + // likeCount는 @Async AFTER_COMMIT 리스너에서 갱신 → 폴링으로 대기 + waitForLikeCount(productId, threadCount); } @DisplayName("동일 상품에 여러 명이 좋아요 후 일부가 취소하면 Like 레코드 수와 Product.likeCount가 일치한다") @@ -112,6 +113,9 @@ void concurrentLikeAndUnlike_countsCorrectly() throws InterruptedException { latch1.await(); executor1.shutdown(); + // addLike 비동기 리스너 완료 대기 + waitForLikeCount(productId, likeCount); + // 5명이 동시에 좋아요 취소 int unlikeCount = 5; ExecutorService executor2 = Executors.newFixedThreadPool(unlikeCount); @@ -133,8 +137,27 @@ void concurrentLikeAndUnlike_countsCorrectly() throws InterruptedException { // assert — Like 레코드 수와 Product.likeCount가 일치해야 한다 long actualLikeRecords = likeRepository.countByProductId(productId); - Product updatedProduct = productRepository.findById(productId).orElseThrow(); assertThat(actualLikeRecords).isEqualTo(likeCount - unlikeCount); - assertThat(updatedProduct.getLikeCount()).isEqualTo((int) actualLikeRecords); + + waitForLikeCount(productId, (int) actualLikeRecords); + } + + /** + * @Async AFTER_COMMIT 리스너의 likeCount 갱신 완료를 폴링으로 대기한다. + * 최대 10초 (100ms × 100회) 대기. + */ + private void waitForLikeCount(Long productId, int expected) throws InterruptedException { + for (int attempt = 0; attempt < 100; attempt++) { + Product p = productRepository.findById(productId).orElseThrow(); + if (p.getLikeCount() == expected) { + return; + } + Thread.sleep(100); + } + // 최종 assert (실패 시 명확한 메시지) + Product p = productRepository.findById(productId).orElseThrow(); + assertThat(p.getLikeCount()) + .as("likeCount가 %d이어야 하지만 비동기 리스너가 시간 내 완료되지 않음", expected) + .isEqualTo(expected); } } diff --git a/apps/commerce-api/src/test/java/com/loopers/fake/FakeCouponIssueRequestRepository.java b/apps/commerce-api/src/test/java/com/loopers/fake/FakeCouponIssueRequestRepository.java new file mode 100644 index 000000000..0d1ea161a --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/fake/FakeCouponIssueRequestRepository.java @@ -0,0 +1,39 @@ +package com.loopers.fake; + +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; + +import java.lang.reflect.Field; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; + +public class FakeCouponIssueRequestRepository implements CouponIssueRequestRepository { + + private final Map store = new ConcurrentHashMap<>(); + private long sequence = 1L; + + @Override + public CouponIssueRequest save(CouponIssueRequest request) { + if (request.getId() == null) { + setId(request, sequence++); + } + store.put(request.getId(), request); + return request; + } + + @Override + public Optional findById(Long id) { + return Optional.ofNullable(store.get(id)); + } + + private void setId(CouponIssueRequest request, long id) { + try { + Field idField = CouponIssueRequest.class.getDeclaredField("id"); + idField.setAccessible(true); + idField.set(request, id); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/resilience/SlidingWindowRateLimiterTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/resilience/SlidingWindowRateLimiterTest.java index f295430ae..3b9d87b6c 100644 --- a/apps/commerce-api/src/test/java/com/loopers/infrastructure/resilience/SlidingWindowRateLimiterTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/resilience/SlidingWindowRateLimiterTest.java @@ -42,7 +42,7 @@ void exceedLimit_rejected() { @DisplayName("U2-3: 윈도우 경계에서 이전 윈도우 가중치가 적용된다 (Boundary Burst 방지)") @Test void windowBoundary_prevWindowWeightApplied() throws InterruptedException { - var rateLimiter = new SlidingWindowRateLimiter(10, 200); + var rateLimiter = new SlidingWindowRateLimiter(10, 1000); // 현재 윈도우에서 10건 소진 for (int i = 0; i < 10; i++) { @@ -50,8 +50,8 @@ void windowBoundary_prevWindowWeightApplied() throws InterruptedException { } assertThat(rateLimiter.tryAcquire()).isFalse(); - // 윈도우 경계를 넘어감 (새 윈도우 시작 직후) - Thread.sleep(220); + // 윈도우 경계를 넘어감 (새 윈도우 시작 직후, 충분한 마진 확보) + Thread.sleep(1100); // Sliding Window: 이전 윈도우 10건이 가중치로 반영되어 // Fixed Window와 달리 10건 전부 허용되지 않는다 (Boundary Burst 방지) diff --git a/apps/commerce-api/src/test/java/com/loopers/interfaces/api/payment/PaymentE2ETest.java b/apps/commerce-api/src/test/java/com/loopers/interfaces/api/payment/PaymentE2ETest.java index 946bad01f..8b9132dbf 100644 --- a/apps/commerce-api/src/test/java/com/loopers/interfaces/api/payment/PaymentE2ETest.java +++ b/apps/commerce-api/src/test/java/com/loopers/interfaces/api/payment/PaymentE2ETest.java @@ -1,7 +1,14 @@ package com.loopers.interfaces.api.payment; import com.github.tomakehurst.wiremock.WireMockServer; -import com.github.tomakehurst.wiremock.client.WireMock; +import com.loopers.domain.brand.Brand; +import com.loopers.domain.brand.BrandRepository; +import com.loopers.domain.order.Order; +import com.loopers.domain.order.OrderRepository; +import com.loopers.domain.product.Product; +import com.loopers.domain.product.ProductRepository; +import com.loopers.domain.product.vo.Price; +import com.loopers.domain.product.vo.Stock; import com.loopers.interfaces.api.ApiResponse; import com.loopers.utils.DatabaseCleanUp; import org.junit.jupiter.api.*; @@ -13,6 +20,7 @@ import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; +import java.util.List; import java.util.Map; import static com.github.tomakehurst.wiremock.client.WireMock.*; @@ -38,6 +46,15 @@ class PaymentE2ETest { @Autowired private DatabaseCleanUp databaseCleanUp; + @Autowired + private BrandRepository brandRepository; + + @Autowired + private ProductRepository productRepository; + + @Autowired + private OrderRepository orderRepository; + @DynamicPropertySource static void pgProperties(DynamicPropertyRegistry registry) { registry.add("pg.simulator.url", pgSimulator::baseUrl); @@ -71,9 +88,19 @@ private HttpEntity> jsonRequest(Map body) { } /** - * 테스트 전: DB에 주문 데이터를 미리 삽입해야 함. - * 이 E2E 테스트는 전체 인프라(MySQL + Redis)가 필요합니다. + * 테스트용 주문 데이터를 생성한다. + * Brand → Product → Order(CREATED) 순서로 저장. */ + private Order createTestOrder(int amount) { + Brand brand = brandRepository.save(new Brand("테스트브랜드", "E2E 테스트")); + Product product = productRepository.save( + new Product(brand.getId(), "테스트상품", new Price(amount), new Stock(100))); + Order order = Order.create(1L, List.of( + new Order.ItemSnapshot(product.getId(), product.getName(), + amount, brand.getName(), 1) + )); + return orderRepository.save(order); + } @Nested @DisplayName("결제 요청") @@ -94,11 +121,11 @@ void requestPayment_success_returnsPending() { .willReturn(aResponse() .withStatus(500))); // 기록 없음 - // TODO: DB에 주문 데이터 삽입 필요 (Order, Product, etc.) - // 이 테스트는 Docker + Testcontainers 환경에서 실행해야 합니다. + // DB에 주문 데이터 삽입 + Order order = createTestOrder(5000); Map paymentRequest = Map.of( - "orderId", 1L, + "orderId", order.getId(), "cardType", "SAMSUNG", "cardNo", "1234-5678-9012-3456", "amount", 5000 @@ -168,8 +195,6 @@ class ManualConfirm { @DisplayName("E7-3: POST /{id}/confirm → PG 조회 → 상태 갱신") @Test void manualConfirm_pgQuery_statusUpdated() { - // TODO: 사전에 PENDING Payment를 DB에 삽입 필요 - ResponseEntity> response = testRestTemplate.exchange( "/api/v1/payments/1/confirm", HttpMethod.POST, diff --git a/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/CacheEvictionEventListenerTest.java b/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/CacheEvictionEventListenerTest.java new file mode 100644 index 000000000..a51972402 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/CacheEvictionEventListenerTest.java @@ -0,0 +1,91 @@ +package com.loopers.interfaces.listener; + +import com.loopers.application.product.ProductCachePort; +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; +import com.loopers.interfaces.api.product.ProductDto; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +class CacheEvictionEventListenerTest { + + private CacheEvictionEventListener listener; + private SpyProductCachePort cachePort; + + @BeforeEach + void setUp() { + cachePort = new SpyProductCachePort(); + listener = new CacheEvictionEventListener(cachePort); + } + + @Nested + @DisplayName("LikeCreatedEvent 처리") + class HandleLikeCreated { + + @DisplayName("상품 상세 캐시와 목록 캐시가 무효화된다") + @Test + void evictsProductDetailAndList() { + listener.handleLikeCreated(new LikeCreatedEvent(100L, 1L)); + + assertThat(cachePort.evictedProductDetailIds).containsExactly(100L); + assertThat(cachePort.evictProductListCount).isEqualTo(1); + } + } + + @Nested + @DisplayName("LikeRemovedEvent 처리") + class HandleLikeRemoved { + + @DisplayName("상품 상세 캐시와 목록 캐시가 무효화된다") + @Test + void evictsProductDetailAndList() { + listener.handleLikeRemoved(new LikeRemovedEvent(200L, 1L)); + + assertThat(cachePort.evictedProductDetailIds).containsExactly(200L); + assertThat(cachePort.evictProductListCount).isEqualTo(1); + } + } + + @Nested + @DisplayName("캐시 무효화 실패") + class CacheEvictionFailure { + + @DisplayName("캐시 무효화 중 예외가 발생해도 best-effort로 처리된다") + @Test + void doesNotPropagateException() { + CacheEvictionEventListener failingListener = new CacheEvictionEventListener( + new FailingProductCachePort()); + + failingListener.handleLikeCreated(new LikeCreatedEvent(100L, 1L)); + failingListener.handleLikeRemoved(new LikeRemovedEvent(200L, 1L)); + } + } + + static class SpyProductCachePort implements ProductCachePort { + final List evictedProductDetailIds = new ArrayList<>(); + int evictProductListCount = 0; + + @Override public ProductDto.ProductResponse getProductDetail(Long productId) { return null; } + @Override public void putProductDetail(Long productId, ProductDto.ProductResponse response) {} + @Override public void evictProductDetail(Long productId) { evictedProductDetailIds.add(productId); } + @Override public ProductDto.PagedProductResponse getProductList(Long brandId, String sort, int page, int size) { return null; } + @Override public void putProductList(Long brandId, String sort, int page, int size, ProductDto.PagedProductResponse response) {} + @Override public void evictProductList() { evictProductListCount++; } + } + + static class FailingProductCachePort implements ProductCachePort { + @Override public ProductDto.ProductResponse getProductDetail(Long productId) { return null; } + @Override public void putProductDetail(Long productId, ProductDto.ProductResponse response) {} + @Override public void evictProductDetail(Long productId) { throw new RuntimeException("Redis down"); } + @Override public ProductDto.PagedProductResponse getProductList(Long brandId, String sort, int page, int size) { return null; } + @Override public void putProductList(Long brandId, String sort, int page, int size, ProductDto.PagedProductResponse response) {} + @Override public void evictProductList() { throw new RuntimeException("Redis down"); } + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/LikeCountEventListenerTest.java b/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/LikeCountEventListenerTest.java new file mode 100644 index 000000000..83ed53246 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/interfaces/listener/LikeCountEventListenerTest.java @@ -0,0 +1,105 @@ +package com.loopers.interfaces.listener; + +import com.loopers.domain.event.LikeCreatedEvent; +import com.loopers.domain.event.LikeRemovedEvent; +import com.loopers.domain.product.Product; +import com.loopers.domain.product.vo.Price; +import com.loopers.domain.product.vo.Stock; +import com.loopers.fake.FakeProductRepository; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.springframework.transaction.TransactionStatus; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class LikeCountEventListenerTest { + + private LikeCountEventListener listener; + private FakeProductRepository productRepository; + + @BeforeEach + void setUp() { + productRepository = new FakeProductRepository(); + + // 동기 실행 Executor + 스텁 TransactionManager + var txManager = mock(org.springframework.transaction.PlatformTransactionManager.class); + when(txManager.getTransaction(any())).thenReturn(mock(TransactionStatus.class)); + + listener = new LikeCountEventListener(productRepository, Runnable::run, txManager); + } + + @Nested + @DisplayName("LikeCreatedEvent 처리") + class HandleLikeCreated { + + @DisplayName("LikeCreatedEvent 수신 시 상품의 likeCount가 1 증가한다") + @Test + void incrementsLikeCount() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + + listener.handleLikeCreated(new LikeCreatedEvent(product.getId(), 1L)); + + assertThat(productRepository.findById(product.getId()).get().getLikeCount()).isEqualTo(1); + } + + @DisplayName("여러 번 수신하면 likeCount가 누적된다") + @Test + void accumulatesLikeCount() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + + listener.handleLikeCreated(new LikeCreatedEvent(product.getId(), 1L)); + listener.handleLikeCreated(new LikeCreatedEvent(product.getId(), 2L)); + listener.handleLikeCreated(new LikeCreatedEvent(product.getId(), 3L)); + + assertThat(productRepository.findById(product.getId()).get().getLikeCount()).isEqualTo(3); + } + + @DisplayName("존재하지 않는 상품이면 예외 없이 무시된다 (best-effort)") + @Test + void whenProductNotExists_doesNotThrow() { + listener.handleLikeCreated(new LikeCreatedEvent(999L, 1L)); + } + } + + @Nested + @DisplayName("LikeRemovedEvent 처리") + class HandleLikeRemoved { + + @DisplayName("LikeRemovedEvent 수신 시 상품의 likeCount가 1 감소한다") + @Test + void decrementsLikeCount() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + productRepository.incrementLikeCount(product.getId()); + productRepository.incrementLikeCount(product.getId()); + + listener.handleLikeRemoved(new LikeRemovedEvent(product.getId(), 1L)); + + assertThat(productRepository.findById(product.getId()).get().getLikeCount()).isEqualTo(1); + } + + @DisplayName("likeCount가 0이면 음수가 되지 않는다") + @Test + void doesNotGoBelowZero() { + Product product = productRepository.save( + new Product(1L, "에어맥스", new Price(150000), new Stock(10))); + + listener.handleLikeRemoved(new LikeRemovedEvent(product.getId(), 1L)); + + assertThat(productRepository.findById(product.getId()).get().getLikeCount()).isEqualTo(0); + } + + @DisplayName("존재하지 않는 상품이면 예외 없이 무시된다 (best-effort)") + @Test + void whenProductNotExists_doesNotThrow() { + listener.handleLikeRemoved(new LikeRemovedEvent(999L, 1L)); + } + } +} diff --git a/apps/commerce-batch/src/test/java/com/loopers/CommerceBatchApplicationTest.java b/apps/commerce-batch/src/test/java/com/loopers/CommerceBatchApplicationTest.java index c5e3bc7a3..71a907186 100644 --- a/apps/commerce-batch/src/test/java/com/loopers/CommerceBatchApplicationTest.java +++ b/apps/commerce-batch/src/test/java/com/loopers/CommerceBatchApplicationTest.java @@ -2,8 +2,10 @@ import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.TestPropertySource; @SpringBootTest +@TestPropertySource(properties = "spring.batch.job.enabled=false") public class CommerceBatchApplicationTest { @Test void contextLoads() {} diff --git a/apps/commerce-batch/src/test/java/com/loopers/job/payment/CouponReconciliationJobE2ETest.java b/apps/commerce-batch/src/test/java/com/loopers/job/payment/CouponReconciliationJobE2ETest.java index fb0099131..b76ef02b8 100644 --- a/apps/commerce-batch/src/test/java/com/loopers/job/payment/CouponReconciliationJobE2ETest.java +++ b/apps/commerce-batch/src/test/java/com/loopers/job/payment/CouponReconciliationJobE2ETest.java @@ -13,6 +13,7 @@ import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.jdbc.Sql; import java.time.LocalDate; @@ -30,6 +31,7 @@ @SpringBootTest @SpringBatchTest @TestPropertySource(properties = "spring.batch.job.name=" + PaymentCouponReconciliationJobConfig.JOB_NAME) +@Sql(scripts = "/schema-batch-test.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_CLASS) class CouponReconciliationJobE2ETest { @Autowired diff --git a/apps/commerce-batch/src/test/java/com/loopers/job/payment/PaymentRecoveryJobE2ETest.java b/apps/commerce-batch/src/test/java/com/loopers/job/payment/PaymentRecoveryJobE2ETest.java index 3bf6852ef..cc339c6c7 100644 --- a/apps/commerce-batch/src/test/java/com/loopers/job/payment/PaymentRecoveryJobE2ETest.java +++ b/apps/commerce-batch/src/test/java/com/loopers/job/payment/PaymentRecoveryJobE2ETest.java @@ -13,6 +13,7 @@ import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.TestPropertySource; +import org.springframework.test.context.jdbc.Sql; import java.time.LocalDate; @@ -30,6 +31,7 @@ @SpringBootTest @SpringBatchTest @TestPropertySource(properties = "spring.batch.job.name=" + PaymentRecoveryJobConfig.JOB_NAME) +@Sql(scripts = "/schema-batch-test.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_CLASS) class PaymentRecoveryJobE2ETest { @Autowired diff --git a/apps/commerce-batch/src/test/resources/schema-batch-test.sql b/apps/commerce-batch/src/test/resources/schema-batch-test.sql new file mode 100644 index 000000000..4b83fd751 --- /dev/null +++ b/apps/commerce-batch/src/test/resources/schema-batch-test.sql @@ -0,0 +1,100 @@ +-- Batch E2E 테스트용 도메인 테이블 DDL +-- commerce-batch는 도메인 Entity가 없으므로 Hibernate ddl-auto로 생성되지 않는다. +-- Tasklet이 참조하는 테이블만 최소한으로 정의한다. + +CREATE TABLE IF NOT EXISTS brand ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) NOT NULL, + description VARCHAR(255), + created_at DATETIME(6) NOT NULL, + updated_at DATETIME(6) NOT NULL, + deleted_at DATETIME(6) +); + +CREATE TABLE IF NOT EXISTS product ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + brand_id BIGINT NOT NULL, + name VARCHAR(255) NOT NULL, + price INT NOT NULL, + stock_quantity INT NOT NULL, + like_count INT NOT NULL DEFAULT 0, + created_at DATETIME(6) NOT NULL, + updated_at DATETIME(6) NOT NULL, + deleted_at DATETIME(6) +); + +CREATE TABLE IF NOT EXISTS orders ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + member_id BIGINT NOT NULL, + status VARCHAR(50) NOT NULL, + total_price INT NOT NULL, + original_total_price INT NOT NULL, + discount_amount INT NOT NULL DEFAULT 0, + coupon_issue_id BIGINT, + version BIGINT, + created_at DATETIME(6) NOT NULL, + updated_at DATETIME(6) NOT NULL, + deleted_at DATETIME(6) +); + +CREATE TABLE IF NOT EXISTS order_item ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + order_id BIGINT, + product_id BIGINT NOT NULL, + product_name VARCHAR(255) NOT NULL, + product_price INT NOT NULL, + brand_name VARCHAR(255), + quantity INT NOT NULL +); + +CREATE TABLE IF NOT EXISTS coupon ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) NOT NULL, + discount_type VARCHAR(50) NOT NULL, + discount_value INT NOT NULL, + min_order_amount INT NOT NULL, + expired_at DATETIME(6) NOT NULL, + max_issuance_count INT, + issued_count INT NOT NULL DEFAULT 0, + created_at DATETIME(6) NOT NULL, + updated_at DATETIME(6) NOT NULL, + deleted_at DATETIME(6) +); + +CREATE TABLE IF NOT EXISTS coupon_issue ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + coupon_id BIGINT NOT NULL, + member_id BIGINT NOT NULL, + used_order_id BIGINT, + status VARCHAR(50) NOT NULL, + expired_at DATETIME(6) NOT NULL, + created_at DATETIME(6) NOT NULL +); + +CREATE TABLE IF NOT EXISTS payments ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + order_id BIGINT NOT NULL, + status VARCHAR(50) NOT NULL, + amount INT NOT NULL, + card_type VARCHAR(255), + card_no VARCHAR(255), + pg_provider VARCHAR(255), + transaction_key VARCHAR(255), + failure_reason VARCHAR(255), + created_at DATETIME(6) NOT NULL, + updated_at DATETIME(6) NOT NULL, + deleted_at DATETIME(6) +); + +CREATE TABLE IF NOT EXISTS reconciliation_mismatch ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + type VARCHAR(50), + payment_id BIGINT, + our_status VARCHAR(50), + external_status VARCHAR(50), + detected_at DATETIME(6), + resolution VARCHAR(50), + created_at DATETIME(6), + updated_at DATETIME(6), + note TEXT +); diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java deleted file mode 100644 index ba862cec6..000000000 --- a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.loopers.interfaces.consumer; - -import com.loopers.confg.kafka.KafkaConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.stereotype.Component; - -import java.util.List; - -@Component -public class DemoKafkaConsumer { - @KafkaListener( - topics = {"${demo-kafka.test.topic-name}"}, - containerFactory = KafkaConfig.BATCH_LISTENER - ) - public void demoListener( - List> messages, - Acknowledgment acknowledgment - ){ - System.out.println(messages); - acknowledgment.acknowledge(); - } -} From 539bc0230677ce18de5673cefcc2529d6155466f Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:43:52 +0900 Subject: [PATCH 07/14] =?UTF-8?q?docs:=20=EC=9D=B4=EB=B2=A4=ED=8A=B8=20?= =?UTF-8?q?=ED=8C=8C=EC=9D=B4=ED=94=84=EB=9D=BC=EC=9D=B8=20=EC=84=A4?= =?UTF-8?q?=EA=B3=84=20=EB=AC=B8=EC=84=9C=20=EC=B6=94=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 08-event-pipeline.md: Outbox + Debezium CDC + Kafka 이벤트 파이프라인 설계 - 09-event-review.md: 이벤트 파이프라인 구현 리뷰 기록 - 05-payment-resilience.md: 결제 복구 배치 관련 설계 업데이트 - blog/week7-event-pipeline-notes.md: 주간 학습 기록 --- CLAUDE.md | 9 + blog/week7-event-pipeline-notes.md | 216 ++++ docs/design/05-payment-resilience.md | 2 + docs/design/08-event-pipeline.md | 1739 ++++++++++++++++++++++++++ docs/design/09-event-review.md | 858 +++++++++++++ 5 files changed, 2824 insertions(+) create mode 100644 blog/week7-event-pipeline-notes.md create mode 100644 docs/design/08-event-pipeline.md create mode 100644 docs/design/09-event-review.md diff --git a/CLAUDE.md b/CLAUDE.md index e6a4663f8..6414ba251 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -22,6 +22,15 @@ --- +## 트래픽 규모 전제 + +- 이 프로젝트는 **쿠팡, 무신사급 대규모 트래픽 이커머스**를 위한 설계를 적용하는 프로젝트이다 +- 모든 설계 결정(배치 주기, 테이블 정리 전략, 스레드 풀, 커넥션 풀 등)은 대규모 트래픽 기준으로 검토한다 +- "현재 단일 인스턴스니까 괜찮다"가 아니라, **스케일아웃 시에도 안전한 구조**를 기본으로 설계한다 +- 산술 근거 제시 시 피크 트래픽 기준으로 계산한다 + +--- + ## 도메인 & 객체 설계 전략 ### Entity / VO / Domain Service 구분 diff --git a/blog/week7-event-pipeline-notes.md b/blog/week7-event-pipeline-notes.md new file mode 100644 index 000000000..5b1876462 --- /dev/null +++ b/blog/week7-event-pipeline-notes.md @@ -0,0 +1,216 @@ +7주차 이벤트 파이프라인 — 테크니컬 라이팅 소재 노트 + +> 이 파일은 블로그 글의 "소재 창고"다. 설계 과정에서의 고민, 트레이드오프 분석, 결정과 근거를 그때그때 기록한다. + +--- + +## 소재 1: 핵심 vs 부가 로직 판단 — "이것이 실패하면 사용자 요청이 실패해야 하는가?" + +**고민**: 좋아요의 incrementLikeCount는 핵심인가 부가인가? + +좋아요를 누른 사용자에게 "좋아요 등록 완료"라고 응답했는데, 집계가 실패해서 목록의 좋아요 수가 반영 안 된다면? 사용자 입장에서는 좋아요를 눌렀는데 숫자가 안 올라간 것처럼 보인다. +`` +처음엔 "집계 실패해도 좋아요 자체는 성공"이라고 판단했다. 하지만 즉시 반영 UX를 위해 incrementLikeCount를 AFTER_COMMIT에서 best-effort로 실행하기로 했다. + +**결정**: 핵심은 아니지만 UX를 위해 best-effort 즉시 반영 + Kafka 집계 + 배치 대사로 3중 안전망 구축. + +**트레이드오프**: 즉시 반영(UX) vs 트랜잭션 분리(안정성). 둘 다 잡되, 실패 시 최종 정합성은 Kafka+배치가 보장. + +**라이팅 포인트**: "핵심/부가 판단은 기술적 판단이 아니라 비즈니스 판단이다. 같은 연산이라도 UX 관점에서는 다른 답이 나올 수 있다." + +--- + +## 소재 2: Outbox Poller 중복 처리 — 놓치는 것 vs 중복 처리 + +**고민**: 다중 인스턴스에서 Outbox Poller가 같은 행을 두 번 처리하면? + +첫 반응: SELECT FOR UPDATE SKIP LOCKED로 행 잠금 → 중복 방지. + +**반론 (사용자 피드백)**: DB 커넥션을 잠금으로 점유하는 게 대량 트래픽에서 더 치명적이지 않나? 놓치는 것보다 중복 처리가 낫지 않나? + +**분석**: +- SKIP LOCKED: Kafka 장애 시 잠긴 행을 아무도 재시도 못함. 커넥션 점유 → API 응답 지연 +- 중복 허용: Consumer의 event_handled PK lookup으로 중복 걸러냄. 비용 = PK lookup 1회 (~0.1ms) + +놓치는 것 >> 중복 처리. Consumer가 멱등하면 중복은 무해하다. + +**결정**: 잠금 없는 단순 SELECT + Consumer 멱등 처리. At Least Once. + +**라이팅 포인트**: "동시성 제어의 관점을 바꾸자. Producer 쪽에서 중복을 막으려고 DB 잠금을 쓰면, 정작 막아야 할 것(이벤트 유실)이 발생한다. 중복 제어는 Consumer에게 맡기자." + +--- + +## 소재 3: Debezium vs Poller — 망치로 호두 까기? + +**고민**: Outbox → Kafka 발행에 CDC(Debezium)를 쓸 것인가, 단순 Poller를 쓸 것인가? + +**Poller**: @Scheduled 1개면 끝. 5초 주기. 인프라 추가 없음. +**Debezium**: Kafka Connect 클러스터 + MySQL binlog 설정 + Connector 관리. + +처음 분석: "event_outbox 하나를 5초마다 폴링하는 데 Kafka Connect 클러스터를 추가하는 건 망치로 호두 까기" + +**전환 이유**: 실무 적용 전 설정 경험 확보에 의의. 학습 프로젝트에서 과도한 인프라를 일부러 경험하는 것은 가치 있다. + +**Debezium을 선택함으로써 달라진 점**: +1. Outbox 테이블에 status 컬럼 불필요 (binlog에서 읽으므로) +2. 테이블 정리가 극적으로 단순해짐 (1시간 보존 후 DELETE) +3. Near real-time 발행 (5초 → 수백 ms) +4. 다중 인스턴스 중복 발행 문제 원천 해결 + +**트레이드오프**: 인프라 복잡도 ↑↑, 운영 난이도 ↑ / 안정성 ↑, 지연 ↓, 중복 해결 + +**라이팅 포인트**: "올바른 선택은 컨텍스트에 따라 다르다. 프로덕션이라면 Poller로 시작하고 규모가 커지면 Debezium으로 전환하는 것이 맞다. 학습이라면 일부러 어려운 길을 가는 것이 맞다." + +--- + +## 소재 4: 모놀리스에서 Kafka를 쓰는 의미 + +**고민**: "MSA가 아닌데 Kafka를 왜 쓰지?" + +이 프로젝트는 모놀리스가 아니다. commerce-api, commerce-streamer, commerce-batch — 3개의 독립 JVM 프로세스. 하지만 같은 DB를 공유한다. + +**핵심**: Kafka는 MSA 전용 기술이 아니라, 프로세스 간 비동기 통신 인프라. + +MSA에서 Kafka가 필요한 이유: 서비스 간 데이터 동기화 (각자 DB) +멀티 프로세스에서 Kafka가 필요한 이유: 프로세스 간 메시지 전달 + 부하 분리 + +현재 아키텍처에서 Kafka 없이 가능한 대안: +- ApplicationEvent: 같은 JVM 안에서만 동작 → streamer가 받을 수 없음 +- DB 폴링: Kafka가 하는 것과 동일하지만 더 느리고 비효율적 +- Redis Pub/Sub: 구독자 없으면 메시지 유실 +- HTTP 호출: 동기 + 장애 전파 + +**결정**: Kafka는 멀티 프로세스 아키텍처의 필연적 선택. + +**라이팅 포인트**: "Kafka를 '마이크로서비스 아키텍처의 도구'로 한정하면 가능성의 절반을 잃는다. 프로세스 간 비동기 통신이 필요한 모든 곳에서 Kafka는 유효하다." + +--- + +## 소재 5: Outbox 테이블 정리 — 라운드 로빈 vs 파티셔닝 vs Debezium + DELETE + +**고민**: 쿠팡급 일 150만 건, 연 5.5억 건이 쌓이는 Outbox를 어떻게 정리하나? + +분석한 방법: Batch DELETE, 라운드 로빈, PARTITION DROP, MySQL EVENT, Debezium + 단순 DELETE + +**라운드 로빈의 치명적 문제**: JPA Entity는 테이블명이 고정(@Table(name="...")). 두 테이블 교대 → Native Query 강제 → DIP 위반. 코드가 인프라 구현에 오염된다. + +**PARTITION BY RANGE**: JPA 호환, DROP PARTITION O(1). 하지만 PK에 파티션키(created_at) 포함 필수 → 복합 PK 강제. + +**반전**: Debezium을 도입하면 테이블이 "큐"가 아니라 "쓰기 로그"로 변한다. 1시간 보존이면 최대 6.25만 건. 이 규모에서 DELETE는 ~1초. + +**결정**: Debezium + 단순 Batch DELETE. "복잡한 문제가 아니라, 문제를 복잡하게 만들지 않는 것." + +**라이팅 포인트**: "상위 설계 결정(Debezium 도입)이 하위 문제(테이블 정리)를 소멸시킨 사례. 개별 문제를 최적화하기 전에, 문제 자체를 없앨 수 있는 상위 결정이 있는지 먼저 살펴보자." + +--- + +## 소재 6: @Async 스레드 풀 — "DB 커넥션과 싸우지 마라" + +**고민**: @Async 스레드 풀 크기를 어떻게 잡나? + +첫 분석: 피크 TPS 기반으로 core=4, max=8 추천. +재분석: @Async에서 실행되는 작업이 DB 커넥션을 쓰는가? + +- incrementLikeCount → 동기 (Tomcat 스레드에서 실행) → DB 커넥션은 Tomcat 풀 몫 +- 캐시 무효화 → 동기 → Redis (Lettuce NIO, 풀 불필요) +- 유저 로깅 → @Async → DB/Redis 불필요 +- Kafka 발행 → @Async → KafkaTemplate.send()는 논블로킹 + +**결정**: @Async 작업은 모두 초경량. core=2, max=4로 충분. "DB 커넥션 풀과 경합하지 않는 것을 확인한 후에야 풀 크기를 결정할 수 있다." + +**라이팅 포인트**: "스레드 풀 크기는 작업 수가 아니라, 작업이 잡는 자원으로 결정한다. CPU 바운드 작업에 큰 풀은 컨텍스트 스위칭 비용만 늘린다." + +--- + +## 소재 7: Kafka 설정 점검 — value-serializer 오타가 동작하는 이유 + +**발견**: kafka.yml의 consumer 섹션에 `value-serializer` (serializer 키에 Deserializer 클래스). + +Spring Boot는 이 키를 무시한다 (consumer에는 `value-deserializer` 키만 인식). 그런데 동작하는 이유: KafkaConfig.java에서 ByteArrayJsonMessageConverter를 설정해서 변환을 대체하고 있다. + +**교훈**: "설정이 잘못되어도 다른 계층이 보완해서 동작하면, 문제를 발견하기 어렵다. 코드 리뷰에서 설정 파일도 검증 대상이다." + +--- + +## 소재 8: Kafka Config 심층 분석 — "설정은 개별이 아니라 조합으로 검증해야 한다" + +설계 명세를 작성하고 Kafka 기술 가이드 키워드(acks, min.insync.replicas, idempotency, zero-copy, page cache, KRaft)로 리뷰하면서 7가지 문제를 발견했다. 핵심은 **개별 설정값이 아니라 설정 간 상호작용**을 이해하지 못하면 "설정했는데 안 되는" 상황이 발생한다는 것. + +### 8-1. acks=all이 무의미해지는 순간 + +`acks=all`은 "ISR(In-Sync Replicas) 전원에게 기록 확인"이다. 그런데 **브로커가 1대뿐이면 ISR = {Leader 1대}**이므로 `acks=all ≡ acks=1`이다. `acks=all`이 의미를 갖으려면: + +| 조합 | 효과 | +|---|---| +| acks=all + replicas=1 | acks=1과 동일 — 무의미 | +| acks=all + replicas=3 + min.insync.replicas=1 | Leader만 확인 — 여전히 약함 | +| acks=all + replicas=3 + min.insync.replicas=2 | Leader + 최소 1 Follower 확인 — **프로덕션 권장** | + +**라이팅 포인트**: "acks=all은 '완벽한 안전'이 아니라, min.insync.replicas와 조합될 때만 의미가 있다. 설정의 의미는 단독이 아니라 조합에서 나온다." + +### 8-2. enable.idempotence=true + retries=3의 모순 + +Idempotent Producer는 내부적으로 `retries=Integer.MAX_VALUE`를 강제한다. 그런데 `retries: 3`을 yml에 명시하면 **기본값을 덮어쓴다**. 결과: 3회 재시도 후 포기 → 메시지 유실 가능. idempotent producer의 핵심 보장("절대 유실하지 않음")이 깨진다. + +올바른 제어: `retries`를 건드리지 않고, `delivery.timeout.ms`(기본 120초)로 **시간 기반** 제어. + +**고민**: 왜 이 실수가 흔한가? — Spring Boot의 yml 설정이 Kafka 클라이언트의 기본값 체계를 **완전히 무시**하기 때문이다. `retries: 3`은 "3번만 재시도하세요"라는 명시적 지시이고, enable.idempotence의 암묵적 기본값(MAX_VALUE)보다 우선한다. **명시 > 암묵**이라는 설정 우선순위 원칙이 여기서 함정이 된다. + +**라이팅 포인트**: "프레임워크가 자동으로 설정해주는 값을 '직접 설정'으로 덮어쓰는 순간, 자동 설정의 의도도 함께 덮어쓴다. 설정을 추가하기 전에, '이 값을 내가 관리해야 하는가?'를 먼저 질문하자." + +### 8-3. Consumer 멱등성의 원자성 갭 + +초기 설계: `비즈니스 로직 실행 → event_handled INSERT`. 이 두 단계 사이에 크래시가 발생하면? + +``` +비즈니스 로직 성공 (product_metrics +1) + ← 여기서 크래시 +event_handled INSERT (실행 안 됨) +→ 재시작 시 event_handled에 없음 → 다시 처리 → product_metrics +1 (중복) +``` + +**해결**: INSERT-first 패턴 — `event_handled INSERT → 비즈니스 로직`을 **단일 트랜잭션**으로 묶는다. + +- event_handled INSERT 성공 → 비즈니스 로직 실행 → TX 커밋: 정상 흐름 +- 비즈니스 로직 실패 → TX 롤백: event_handled도 롤백 → 재시도 가능 +- TX 커밋 후 크래시 → 재시작 시 event_handled에 이미 존재 → skip + +**라이팅 포인트**: "멱등 처리에서 '확인'과 '실행'이 원자적이지 않으면, 멱등이 아니다. 체크와 실행 사이의 갭이 바로 장애가 파고드는 틈이다." + +### 8-4. 놓치기 쉬운 설정들 + +| 설정 | 역할 | 왜 놓치나 | +|---|---|---| +| `isolation.level: read_committed` | TX 커밋된 메시지만 읽기 | Debezium이 TX 단위로 발행하므로 필수인데, Consumer 설정이라 Producer 설계 시 빠짐 | +| `auto.offset.reset: earliest` | 신규 Consumer Group이 처음부터 읽기 | 기본값 `latest` → 기존 메시지 유실 | +| `max.poll.interval.ms` | poll() 간격 초과 시 리밸런싱 | SINGLE_LISTENER에서 건별 CAS UPDATE → 기본 5분 초과 가능 | +| `compression.type: lz4` | 배치 압축 | "압축은 나중에"라는 생각 → 초기부터 설정해야 Broker 디스크 + 네트워크 절약 | + +### 8-5. Zero-Copy와 OS Page Cache — 배치/압축 설정의 물리적 근거 + +Kafka가 빠른 이유를 두 가지 OS 최적화로 설명할 수 있다: + +1. **Zero-Copy**: Broker → Consumer 전송 시 `sendfile()` 시스템콜 사용. 디스크 → 커널 버퍼 → 네트워크 소켓으로 **유저 스페이스를 거치지 않고** 직접 전달. CPU 사용량과 메모리 복사 최소화. + +2. **OS Page Cache**: Broker는 메시지를 JVM 힙이 아닌 OS 페이지 캐시에 저장. 최근 메시지는 디스크 I/O 없이 메모리에서 서빙. + +이 두 최적화의 효율을 극대화하는 것이 `linger.ms`, `batch.size`, `compression.type` 설정의 **물리적 근거**다: +- 작은 메시지를 하나씩 보내면 → 네트워크 라운드트립 N배 + sendfile 호출 N배 +- 배치로 묶어서 보내면 → 한 번의 sendfile로 큰 블록 전송 + 압축으로 페이지 캐시 적중률 향상 + +**라이팅 포인트**: "설정값의 의미를 물리 계층까지 추적하면, '왜 이 값인가'에 답할 수 있다. linger.ms=50은 '50ms 지연'이 아니라, 'Zero-Copy 한 번의 전송량을 극대화하는 버퍼링 시간'이다." + +### 8-6. Consumer Group 분리 — 처리 특성이 다르면 격리하라 + +MetricsConsumer(배치 UPSERT)와 CouponIssueConsumer(건별 CAS)가 같은 group-id를 공유하면: 쿠폰 발급의 건별 처리 지연 → group 전체 리밸런싱 → 메트릭 집계까지 중단. + +**결정**: `metrics-collector`, `coupon-issuer`로 분리. 처리 특성(배치 vs 건별), 부하 패턴(상시 vs 이벤트성), 장애 영향 범위를 기준으로 Consumer Group을 설계한다. + +--- + +## (기록 예정) + +- [x] 08 설계 명세 작성 시 최종 설계 결정 기록 +- [ ] Phase별 구현 시 구현 고민점/문제점/해결 흐름 추가 +- [ ] Debezium 셋업 과정에서의 삽질 기록 +- [ ] 선착순 쿠폰 동시성 테스트 결과와 인사이트 diff --git a/docs/design/05-payment-resilience.md b/docs/design/05-payment-resilience.md index 561fd114e..903bc340b 100644 --- a/docs/design/05-payment-resilience.md +++ b/docs/design/05-payment-resilience.md @@ -327,6 +327,8 @@ PG 시뮬레이터의 정상 요청 실패율은 40%이다. 50%는 PG의 기본 실패율(40%)에 10%p 여유를 둔 값이다. 10건 중 5건 이상 실패하면 PG에 추가적인 문제가 있다고 판단할 수 있다. +> **참고**: 현재 임계치는 시뮬레이터 실패율(40%) 기준으로 산출. 실무에서 PG 연동 시 운영 환경 실측 실패율(baseline)을 기반으로 재조정이 필요하다. + ### 7.4 CB 세분화: PG × API 유형별 분리 **원칙**: 결제 요청 CB가 Open되어도 상태 조회 CB는 Closed → 복구 로직 계속 동작. diff --git a/docs/design/08-event-pipeline.md b/docs/design/08-event-pipeline.md new file mode 100644 index 000000000..fca54c090 --- /dev/null +++ b/docs/design/08-event-pipeline.md @@ -0,0 +1,1739 @@ +# ApplicationEvent + Kafka 이벤트 파이프라인 — 설계 명세 + +--- + +## 1. 개요 + +본 문서는 7주차 과제의 구현 명세를 정의한다. + +| Step | 주제 | 핵심 | +|---|---|---| +| Step 1 | ApplicationEvent로 경계 나누기 | 핵심 로직 vs 부가 로직 판단 + 트랜잭션 분리 | +| Step 2 | Kafka 이벤트 파이프라인 | Outbox → Debezium CDC → Kafka → commerce-streamer, product_metrics 집계, 멱등 처리 | +| Step 3 | 선착순 쿠폰 발급 | API → Kafka 발행 → Consumer 순차 처리, 수량 제한 동시성 제어 | + +**참조 문서:** +- 05-payment-resilience.md — PG 비동기 결제 Resilience 설계 (스타일 기준) +- 09-event-review.md — 이벤트 파이프라인 아키텍트 리뷰 (분석 근거) + +--- + +## 2. 현재 상태 + +### 2.1 인프라 상태 + +| 구성 요소 | 상태 | 비고 | +|---|---|---| +| commerce-api | Kafka 미사용 | `modules:kafka` 의존 없음, kafka.yml 미임포트 | +| commerce-streamer | DemoKafkaConsumer 1개 | `demo.internal.topic-v1` 소비만 | +| modules/kafka | 설정 완료 | KafkaTemplate, BATCH_LISTENER (manual ack, concurrency 3, max poll 3000) | +| Docker Kafka | KRaft 모드 | 단일 브로커, port 9092/19092, 토픽 자동 생성 비활성화 | +| Docker MySQL | 8.0 | binlog 미활성화 (기본값), port 3306 | +| Docker Redis | Master-Replica | port 6379/6380, AOF 영속성 | + +**kafka.yml 발견된 문제점:** + +| # | 문제 | 위치 | 심각도 | +|---|---|---|---| +| 1 | Consumer `value-serializer` → `value-deserializer` 오타 | kafka.yml:21 | 경미 (Converter가 대체) | +| 2 | Producer acks/idempotence 미설정 | kafka.yml:14-17 | **중요** (메시지 유실 가능) | +| 3 | 단건 처리용 Consumer Factory 부재 | KafkaConfig.java | **중요** (쿠폰 발급용) | +| 4 | Error Handler / DLQ 미설정 | KafkaConfig.java | **중요** | +| 5 | 토픽 생성 전략 없음 | N/A | 중간 | + +### 2.2 좋아요 흐름 + +``` +[LikeFacade.addLike — 단일 TX @Transactional] + 1. 상품 존재 확인 (productRepository.findById) + 2. 중복 좋아요 확인 (existsByMemberIdAndProductId) + 3. Like INSERT (likeRepository.save) + 4. Product.incrementLikeCount (SQL atomic UPDATE) ← 부가 로직이 TX 내부 +[TX commit] + +[LikeController — 인라인 처리] + 5. productCachePort.evictProductDetail(productId) ← 캐시 무효화가 Controller에 인라인 + 6. productCachePort.evictProductList() +``` + +**문제점:** +- Like INSERT(핵심)와 likeCount UPDATE(부가/집계)가 같은 TX — 집계 실패 시 좋아요 자체 롤백 +- 캐시 무효화가 Controller에 인라인 — 관심사 분리 안 됨 + +### 2.3 주문 흐름 + +``` +[OrderFacade.createOrder — 단일 TX @Transactional] + 1. 상품 비관적 락 (deadlock 방지 위해 ID 정렬) + 2. 브랜드 조회 (N+1 방지) + 3. 스냅샷 생성 (OrderItem) + 4. 재고 차감 (product.decreaseStock) + 5. 쿠폰 적용 (CouponFacade.applyCouponToOrder — CAS UPDATE) + 6. 주문 저장 (Order.create) + 7. 쿠폰-주문 연결 (couponIssue.linkOrder) +[TX commit] +``` + +**문제점:** +- 부가 로직(판매량 집계, 알림)이 존재하지 않지만, 추가 시 TX 안에 진입할 구조 +- 쿠폰 적용은 가격 계산에 직접 영향 → 핵심 로직 (분리 불가) + +### 2.4 조회 흐름 + +``` +ProductFacade.getProductDetailCached(): + L1(Caffeine) → L2(Redis) → DB → 캐시 저장 + +조회수 추적: 없음 (7주차에서 신규 추가) +``` + +### 2.5 쿠폰 구조 + +``` +Coupon: name, discountType, discountValue, minOrderAmount, expiredAt +CouponIssue: couponId, memberId, status(AVAILABLE/USED/EXPIRED), expiredAt + +수량 제한: 없음 → maxIssuanceCount, issuedCount 추가 필요 +중복 발급 방지: 없음 (같은 쿠폰을 같은 유저가 여러 번 발급 가능) +인덱스: idx_coupon_issue_member_id, idx_coupon_issue_coupon_id (UNIQUE 없음) +``` + +--- + +## 3. Step 1 — ApplicationEvent 경계 분리 + +### 3.1 판단 프레임워크 + +``` +핵심 로직 = "이것이 실패하면 사용자 요청 자체가 실패해야 하는가?" + → YES: 핵심 TX 안에 유지 + → NO: 이벤트로 분리 가능 + +부가 로직 = "이것이 실패해도 사용자에게는 성공으로 보여야 하는가?" + → YES: 이벤트 분리 (eventual consistency) +``` + +### 3.2 플로우별 핵심/부가 분리표 + +#### 좋아요 플로우 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| Like INSERT | **핵심** | 사용자 의도 (좋아요 누르기) | X | +| Outbox INSERT | **핵심** | Kafka 발행 보장 (같은 TX) | X | +| Product.incrementLikeCount | 부가 | 집계 실패와 무관하게 좋아요는 성공 | O | +| 캐시 무효화 | 부가 | 캐시 무효화 실패해도 좋아요는 성공 | O | + +#### 주문 플로우 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| 재고 차감 | **핵심** | 재고 없으면 주문 불가 | X | +| 쿠폰 적용 | **핵심** | 할인 금액이 totalPrice 계산에 직접 영향 | X | +| 주문 저장 | **핵심** | 주문 자체 | X | +| Outbox INSERT | **핵심** | Kafka 발행 보장 (같은 TX) | X | +| 판매량 집계 | 부가 | 집계 실패해도 주문에 영향 없음 | O | + +#### 조회 플로우 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| 상품 데이터 반환 | **핵심** | 사용자 요청 목적 | X | +| 조회수 기록 | 부가 | 조회수 기록 실패해도 상품은 보여야 함 | O | + +#### 주문 취소 플로우 + +| 처리 | 핵심/부가 | 이벤트 분리 | +|---|---|---| +| Order.cancel() | **핵심** | X | +| 재고 복원 | **핵심** | X (재고 복원 실패 시 데이터 불일치) | +| 쿠폰 복원 | **핵심** | X (쿠폰 복원 실패 시 고객 손해) | +| Outbox INSERT | **핵심** | X | +| 판매량 차감 집계 | 부가 | O | + +### 3.3 이벤트 클래스 설계 + +```java +// commerce-api: com.loopers.domain.event + +public record LikeCreatedEvent( + Long productId, + Long memberId, + Long likeId +) {} + +public record LikeRemovedEvent( + Long productId, + Long memberId, + Long likeId +) {} + +public record OrderCreatedEvent( + Long orderId, + Long memberId, + List items // productId, quantity, price +) { + public record OrderItemInfo(Long productId, int quantity, int price) {} +} + +public record OrderCancelledEvent( + Long orderId, + Long memberId, + List items +) { + public record OrderItemInfo(Long productId, int quantity, int price) {} +} + +public record ProductViewedEvent( + Long productId, + Long memberId // nullable — 비로그인 조회 허용 +) {} +``` + +### 3.4 이벤트 리스너 설계 + +| 리스너 | 이벤트 | Phase | @Async | 처리 내용 | 실패 대응 | +|---|---|---|---|---|---| +| LikeCountEventListener | LikeCreated/Removed | AFTER_COMMIT | X (동기) | incrementLikeCount / decrementLikeCount | try-catch + 로그, product_metrics가 최종 보정 | +| CacheEvictionEventListener | LikeCreated/Removed | AFTER_COMMIT | X (동기) | evictProductDetail + evictProductList | try-catch + 로그, 다음 TTL 만료 시 자연 갱신 | +| ProductViewKafkaPublisher | ProductViewed | AFTER_COMMIT | **O** | KafkaTemplate.send (Outbox 미경유) | try-catch + 로그, 유실 허용 | + +**incrementLikeCount를 동기로 유지하는 이유 (09 §2.7):** +- 사용자가 좋아요 직후 목록을 새로고침하면 반영되어 있기를 기대 +- AFTER_COMMIT에서 best-effort로 실행하되, 실패해도 Like 자체는 이미 저장됨 +- product_metrics + MetricsReconcileTasklet이 최종 정합성을 보장하는 안전망 역할 + +**캐시 무효화를 동기로 유지하는 이유:** +- 다음 조회 시 최신 데이터 보장 (UX) +- Redis eviction은 ~1ms — 응답 지연 무시 가능 + +### 3.5 이벤트 발행 위치 + +```java +// LikeFacade — 변경 후 +@Transactional +public void addLike(Long memberId, Long productId) { + // ... 기존 검증 ... + Like like = likeRepository.save(new Like(memberId, productId)); + outboxRepository.save(EventOutbox.create( + "Product", productId, "LIKE_CREATED", payload)); // Outbox INSERT (같은 TX) + eventPublisher.publishEvent(new LikeCreatedEvent( + productId, memberId, like.getId())); // AFTER_COMMIT 트리거 +} + +// OrderFacade — 변경 후 +@Transactional +public Order createOrder(...) { + // ... 기존 핵심 로직 (재고 차감 + 쿠폰 + 주문 저장) ... + outboxRepository.save(EventOutbox.create( + "Order", order.getId(), "ORDER_CREATED", payload)); // Outbox INSERT (같은 TX) + eventPublisher.publishEvent(new OrderCreatedEvent( + order.getId(), memberId, items)); // AFTER_COMMIT 트리거 + return order; +} + +// ProductFacade — 변경 후 +public ProductDto.ProductResponse getProductDetailCached(Long productId) { + // ... 기존 캐시 조회 로직 ... + eventPublisher.publishEvent(new ProductViewedEvent( + productId, memberId)); // 조회수 이벤트 (TX 없음) + return response; +} +``` + +### 3.6 @Async 스레드 풀 + +```java +@Configuration +@EnableAsync +public class AsyncConfig implements AsyncConfigurer { + + @Override + public Executor getAsyncExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(2); + executor.setMaxPoolSize(4); + executor.setQueueCapacity(100); + executor.setThreadNamePrefix("event-async-"); + executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); + executor.initialize(); + return executor; + } +} +``` + +**core=2, max=4 근거 (09 §13):** +- @Async 대상 작업: 조회수 Kafka 발행 (논블로킹) +- DB/Redis 커넥션 미사용 → HikariCP(max 40)과 경합 없음 +- 큰 풀은 컨텍스트 스위칭만 유발 +- CallerRunsPolicy → 큐 초과 시 호출 스레드에서 실행 (배압) + +--- + +## 4. Step 2 — Kafka 이벤트 파이프라인 + +### 4.1 전체 아키텍처 흐름도 + +``` +┌─────────────────── commerce-api ───────────────────┐ +│ │ +│ [Facade] │ +│ │ │ +│ ├─ [TX] 도메인 변경 + event_outbox INSERT │ +│ │ → commit │ +│ │ │ +│ ├─ [AFTER_COMMIT] ApplicationEvent │ +│ │ ├─ incrementLikeCount (동기, best-effort) │ +│ │ ├─ 캐시 무효화 (동기) │ +│ │ └─ 조회수 KafkaTemplate.send (@Async) │ +│ │ │ +│ └─ event_outbox 테이블 │ +│ ↓ (MySQL binlog) │ +└─────────┼───────────────────────────────────────────┘ + │ + ┌──────┼──────── Kafka Connect ──────────────┐ + │ [Debezium MySQL Connector] │ + │ └─ Outbox Event Router SMT │ + │ → route.by.field = aggregate_type │ + └──────┼─────────────────────────────────────┘ + │ + ┌──────┼──────── Kafka ──────────────────────┐ + │ ▼ │ + │ ┌─────────────────┐ ┌──────────────────┐ │ + │ │ catalog-events │ │ order-events │ │ + │ │ (product views, │ │ (order created, │ │ + │ │ likes) │ │ cancelled) │ │ + │ └────────┬────────┘ └────────┬─────────┘ │ + │ │ │ │ + │ ┌────────────────────────────┐ │ + │ │ coupon-issue-requests │ │ + │ └────────┬───────────────────┘ │ + └───────────┼──────────────┼──────────────────┘ + │ │ + ┌───────────┼──────────────┼─── commerce-streamer ──┐ + │ ▼ ▼ │ + │ [MetricsConsumer] [CouponIssueConsumer] │ + │ → product_metrics → CAS UPDATE coupon │ + │ UPSERT → CouponIssue INSERT │ + │ → event_handled → event_handled │ + └───────────────────────────────────────────────────┘ +``` + +### 4.2 event_outbox DDL + Entity + +```sql +CREATE TABLE event_outbox ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + aggregate_type VARCHAR(50) NOT NULL, -- 'Product', 'Order' + aggregate_id BIGINT NOT NULL, -- productId, orderId + event_type VARCHAR(50) NOT NULL, -- 'LIKE_CREATED', 'ORDER_CREATED', ... + payload TEXT NOT NULL, -- JSON + created_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + INDEX idx_event_outbox_created_at (created_at) +); +``` + +**status 컬럼이 없는 이유 (09 §8.5):** +Debezium이 MySQL binlog에서 직접 읽으므로 PENDING/PROCESSED 구분이 불필요하다. +Poller 방식이라면 `SELECT WHERE status = 'PENDING'`이 필요하지만, CDC 방식은 INSERT 시점에 binlog 이벤트가 발생하며 Debezium이 이를 실시간 감지한다. + +```java +// commerce-api: com.loopers.domain.event + +@Entity +@Table(name = "event_outbox") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class EventOutbox { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "aggregate_type", nullable = false, length = 50) + private String aggregateType; + + @Column(name = "aggregate_id", nullable = false) + private Long aggregateId; + + @Column(name = "event_type", nullable = false, length = 50) + private String eventType; + + @Column(nullable = false, columnDefinition = "TEXT") + private String payload; + + @Column(name = "created_at", nullable = false, updatable = false) + private LocalDateTime createdAt; + + public static EventOutbox create(String aggregateType, Long aggregateId, + String eventType, String payload) { + EventOutbox outbox = new EventOutbox(); + outbox.aggregateType = aggregateType; + outbox.aggregateId = aggregateId; + outbox.eventType = eventType; + outbox.payload = payload; + outbox.createdAt = LocalDateTime.now(); + return outbox; + } +} +``` + +### 4.3 Debezium CDC 구성 + +#### 4.3.1 MySQL binlog 활성화 + +```yaml +# docker/infra-compose.yml — mysql 서비스에 command 추가 +mysql: + image: mysql:8.0 + command: + - --log-bin=mysql-bin + - --binlog-format=ROW + - --binlog-row-image=FULL + - --server-id=1 + # ... 기존 설정 유지 +``` + +#### 4.3.2 Kafka Connect Docker 서비스 + +```yaml +# docker/infra-compose.yml — 서비스 추가 +kafka-connect: + image: debezium/connect:2.5 + container_name: kafka-connect + depends_on: + kafka: + condition: service_healthy + mysql: + condition: service_started + ports: + - "8083:8083" + environment: + GROUP_ID: 1 + BOOTSTRAP_SERVERS: kafka:9092 + CONFIG_STORAGE_TOPIC: _connect_configs + OFFSET_STORAGE_TOPIC: _connect_offsets + STATUS_STORAGE_TOPIC: _connect_status + CONFIG_STORAGE_REPLICATION_FACTOR: 1 + OFFSET_STORAGE_REPLICATION_FACTOR: 1 + STATUS_STORAGE_REPLICATION_FACTOR: 1 + KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + KEY_CONVERTER_SCHEMAS_ENABLE: "false" + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8083/connectors"] + interval: 10s + timeout: 5s + retries: 10 +``` + +**KRaft 모드 + Kafka Connect 내부 토픽 자동 생성 이슈:** + +현재 인프라는 KRaft 모드(ZooKeeper 없음)로 Kafka를 운영한다. Kafka Connect는 시작 시 내부 토픽 3개(`_connect_configs`, `_connect_offsets`, `_connect_status`)를 자동 생성하는데, KRaft 컨트롤러가 아직 준비되지 않은 시점에 생성을 시도하면 실패할 수 있다. + +**대응:** +1. `depends_on: kafka: condition: service_healthy` — Kafka 브로커의 healthcheck 통과 후 Connect 시작 +2. Connect의 `healthcheck.retries: 10` — 내부 토픽 생성 재시도 여유 확보 +3. 만약 Connect 시작 실패 시, 내부 토픽을 수동 생성: + +```bash +# Kafka Connect 내부 토픽 수동 생성 (KRaft 환경에서 자동 생성 실패 시) +docker exec kafka kafka-topics.sh --bootstrap-server localhost:9092 \ + --create --topic _connect_configs --partitions 1 --replication-factor 1 --config cleanup.policy=compact +docker exec kafka kafka-topics.sh --bootstrap-server localhost:9092 \ + --create --topic _connect_offsets --partitions 25 --replication-factor 1 --config cleanup.policy=compact +docker exec kafka kafka-topics.sh --bootstrap-server localhost:9092 \ + --create --topic _connect_status --partitions 5 --replication-factor 1 --config cleanup.policy=compact +``` + +#### 4.3.3 Debezium MySQL Connector + Outbox Event Router SMT + +```bash +#!/bin/bash +# docker/register-debezium-connector.sh + +curl -X POST http://localhost:8083/connectors -H "Content-Type: application/json" -d '{ + "name": "loopers-outbox-connector", + "config": { + "connector.class": "io.debezium.connector.mysql.MySqlConnector", + "tasks.max": "1", + + "database.hostname": "mysql", + "database.port": "3306", + "database.user": "root", + "database.password": "root", + "database.server.id": "184054", + "topic.prefix": "loopers", + + "database.include.list": "loopers", + "table.include.list": "loopers.event_outbox", + + "schema.history.internal.kafka.bootstrap.servers": "kafka:9092", + "schema.history.internal.kafka.topic": "_schema_history", + + "transforms": "outbox", + "transforms.outbox.type": "io.debezium.transforms.outbox.EventRouter", + "transforms.outbox.table.field.event.id": "id", + "transforms.outbox.table.field.event.key": "aggregate_id", + "transforms.outbox.table.field.event.type": "event_type", + "transforms.outbox.table.field.event.payload": "payload", + "transforms.outbox.route.by.field": "aggregate_type", + "transforms.outbox.route.topic.replacement": "${routedByValue}-events", + "transforms.outbox.table.fields.additional.placement": "event_type:header:eventType", + + "tombstones.on.delete": "false" + } +}' +``` + +**라우팅 결과:** + +| aggregate_type | 라우팅 토픽 | event_type 예시 | +|---|---|---| +| Product | `Product-events` → 별칭: `catalog-events` | LIKE_CREATED, LIKE_REMOVED | +| Order | `Order-events` → 별칭: `order-events` | ORDER_CREATED, ORDER_CANCELLED | + +> **토픽 라우팅 보완:** Debezium Outbox Event Router의 `route.topic.replacement`이 `${routedByValue}-events`로 동작하므로, aggregate_type 값을 소문자(`product`, `order`)로 저장하거나, RegexRouter SMT를 추가하여 `catalog-events`, `order-events`로 변환한다. 구현 시 최종 확정. + +### 4.4 토픽 설계 + +| 토픽 | Key | 이벤트 유형 | Producer | Consumer | +|---|---|---|---|---| +| `catalog-events` | productId | LIKE_CREATED, LIKE_REMOVED, PRODUCT_VIEWED | Debezium + commerce-api(조회수) | commerce-streamer | +| `order-events` | orderId | ORDER_CREATED, ORDER_CANCELLED | Debezium | commerce-streamer | +| `coupon-issue-requests` | couponId | COUPON_ISSUE_REQUESTED | commerce-api (직접) | commerce-streamer | + +**Key 설계 근거:** +- catalog-events key=productId → 같은 상품의 이벤트는 같은 파티션 → 순서 보장 +- order-events key=orderId → 같은 주문의 이벤트는 같은 파티션 +- coupon-issue-requests key=couponId → 같은 쿠폰의 발급 요청은 같은 파티션 + +**acks + min.insync.replicas 상관관계:** + +| 설정 조합 | 의미 | 메시지 유실 | 가용성 | +|---|---|---|---| +| `acks=all` + `replicas=1` + `min.insync.replicas=1` | **현재 (개발)** — 브로커 1대뿐이므로 acks=all ≡ acks=1 | 브로커 장애 시 유실 | 높음 | +| `acks=all` + `replicas=3` + `min.insync.replicas=2` | **프로덕션 권장** — Leader + 최소 1 Follower 기록 확인 | 2대 동시 장애 아닌 한 무유실 | 1대 장애까지 허용 | +| `acks=all` + `replicas=3` + `min.insync.replicas=3` | ISR 3대 모두 기록 확인 | 무유실 | 1대라도 장애 시 쓰기 불가 | + +> **핵심**: `acks=all`은 ISR(In-Sync Replicas) 전원에게 기록 확인을 요구하지만, 브로커가 1대뿐이면 `acks=1`과 동일하다. `acks=all`이 의미를 갖으려면 반드시 `min.insync.replicas ≥ 2` + `replicas ≥ 3`이 전제되어야 한다. + +```java +// commerce-api: com.loopers.infrastructure.kafka + +@Configuration +public class KafkaTopicConfig { + + // 개발 환경: 단일 브로커 → replicas=1 + // 프로덕션: replicas=3, min.insync.replicas=2 설정 필수 + // → .config("min.insync.replicas", "2") + + @Bean + public NewTopic catalogEvents() { + return TopicBuilder.name("catalog-events") + .partitions(3) + .replicas(1) // 프로덕션: .replicas(3) + .build(); + } + + @Bean + public NewTopic orderEvents() { + return TopicBuilder.name("order-events") + .partitions(3) + .replicas(1) // 프로덕션: .replicas(3) + .build(); + } + + @Bean + public NewTopic couponIssueRequests() { + return TopicBuilder.name("coupon-issue-requests") + .partitions(3) + .replicas(1) // 프로덕션: .replicas(3) + .build(); + } +} +``` + +### 4.5 Producer 설정 보완 + +```yaml +# modules/kafka/src/main/resources/kafka.yml — producer 섹션 보완 +spring: + kafka: + producer: + acks: all # 모든 ISR에 기록 확인 후 응답 → 메시지 유실 방지 + key-serializer: org.apache.kafka.common.serialization.StringSerializer + value-serializer: org.springframework.kafka.support.serializer.JsonSerializer + # retries 명시하지 않음 — enable.idempotence=true 시 기본값 Integer.MAX_VALUE + # retries를 직접 설정하면 idempotent producer의 무한 재시도 보장이 깨진다 + properties: + enable.idempotence: true # Producer 레벨 중복 발행 방지 + max.in.flight.requests.per.connection: 5 # idempotence 활성화 시 최대 5 + delivery.timeout.ms: 120000 # 재시도 포함 전체 발행 타임아웃 (2분) + linger.ms: 50 # 50ms 버퍼링 → 배치 효율 향상 + batch.size: 32768 # 32KB 배치 크기 + compression.type: lz4 # 압축 → 네트워크 I/O 감소 + Broker 디스크 절약 +``` + +**enable.idempotence=true와 retries의 관계:** +- `enable.idempotence=true`를 설정하면 Kafka는 내부적으로 `retries=Integer.MAX_VALUE`, `max.in.flight.requests.per.connection ≤ 5`를 강제한다. +- `retries: 3`을 명시하면 idempotent producer의 기본값(MAX_VALUE)을 **덮어쓴다** → 3회 재시도 후 포기 → 메시지 유실 가능. +- 재시도 횟수 대신 `delivery.timeout.ms`(기본 120초)로 **시간 기반 제어**가 올바르다. 이 시간 내에서 무한 재시도한다. + +**linger.ms + batch.size + compression.type의 원리 — Zero-Copy와 OS Page Cache:** + +Kafka의 높은 처리량은 두 가지 OS 수준 최적화에 기반한다: + +1. **Zero-Copy (sendfile 시스템콜)**: Broker가 Consumer에게 메시지를 전달할 때, 디스크 → 커널 버퍼 → 네트워크 소켓으로 직접 복사한다. 유저 스페이스로 데이터를 올리지 않으므로 CPU 사용량과 메모리 복사가 극적으로 줄어든다. +2. **OS Page Cache**: Broker는 메시지를 JVM 힙이 아닌 OS 페이지 캐시에 저장한다. 최근 메시지는 디스크 I/O 없이 메모리에서 바로 서빙된다. + +이 두 가지 최적화의 효율을 극대화하려면 **작은 메시지를 하나씩 보내는 대신, 배치로 묶어서 보내는 것**이 핵심이다: +- `linger.ms=50`: 50ms 동안 메시지를 버퍼에 모은 뒤 한 번에 전송 → 네트워크 라운드트립 감소 +- `batch.size=32768`: 32KB 단위로 배치 → Zero-Copy 시 큰 블록 전송으로 효율 증가 +- `compression.type=lz4`: 배치 단위 압축 → 네트워크 I/O 감소 + Broker 디스크 절약 + 페이지 캐시 적중률 향상 (같은 메모리에 더 많은 메시지 캐싱) + +### 4.6 Consumer 설정 보완 + +```yaml +# modules/kafka/src/main/resources/kafka.yml — consumer 섹션 수정 +spring: + kafka: + consumer: + group-id: loopers-default-consumer + key-deserializer: org.apache.kafka.common.serialization.StringDeserializer + value-deserializer: org.apache.kafka.common.serialization.ByteArrayDeserializer # 오타 수정 + auto-offset-reset: earliest # 신규 Consumer Group은 처음부터 읽기 (latest → 유실) + properties: + enable-auto-commit: false + isolation.level: read_committed # Debezium TX 메시지 — 커밋된 것만 읽기 +``` + +**설정 근거:** +- `auto-offset-reset: earliest` — 신규 Consumer Group이 토픽에 처음 참여할 때 `latest`(기본값)이면 기존 메시지를 건너뛴다. 이벤트 파이프라인에서 메시지 유실은 허용 불가. `earliest`로 설정하여 처음부터 읽는다. 중복은 event_handled가 걸러낸다. +- `isolation.level: read_committed` — Debezium이 Outbox 테이블의 INSERT를 binlog에서 읽을 때, TX가 커밋되기 전의 중간 상태도 발행될 수 있다. `read_committed`는 커밋된 메시지만 Consumer에게 노출한다. + +**SINGLE_LISTENER 추가 (KafkaConfig.java):** + +```java +// modules/kafka — KafkaConfig.java에 추가 + +public static final String SINGLE_LISTENER = "SINGLE_LISTENER_DEFAULT"; + +@Bean(name = SINGLE_LISTENER) +public ConcurrentKafkaListenerContainerFactory defaultSingleListenerContainerFactory( + KafkaProperties kafkaProperties, + ByteArrayJsonMessageConverter converter, + DefaultErrorHandler errorHandler +) { + Map consumerConfig = new HashMap<>(kafkaProperties.buildConsumerProperties()); + // SINGLE_LISTENER는 건별 CAS UPDATE — 처리 시간이 BATCH보다 길 수 있음 + consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 600_000); // 10분 + + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig)); + factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL); + factory.setMessageConverter(converter); + factory.setConcurrency(1); + factory.setBatchListener(false); // 단건 처리 + factory.setCommonErrorHandler(errorHandler); + return factory; +} +``` + +**max.poll.interval.ms 설정 근거:** +- SINGLE_LISTENER에서 건별 CAS UPDATE + UNIQUE INSERT + 상태 업데이트를 수행한다. +- DB 부하가 높은 시점에 처리가 지연되면, 기본값(5분) 내에 다음 poll()을 호출하지 못해 리밸런싱이 발생할 수 있다. +- 10분으로 여유를 두어 일시적 DB 지연 시에도 불필요한 리밸런싱을 방지한다. + +### 4.7 Error Handler + DLQ + +```java +// modules/kafka — KafkaConfig.java에 추가 + +@Bean +public DefaultErrorHandler errorHandler(KafkaTemplate kafkaTemplate) { + DeadLetterPublishingRecoverer recoverer = + new DeadLetterPublishingRecoverer(kafkaTemplate); + // 3회 재시도, 1초 간격 고정 백오프 + return new DefaultErrorHandler(recoverer, new FixedBackOff(1000L, 3)); +} +``` + +**동작:** +1. Consumer 메시지 처리 실패 시 1초 간격으로 최대 3회 재시도 +2. 3회 모두 실패 → DLT(Dead Letter Topic)로 이동 (원본 토픽명 + `.DLT`) +3. DLT 예시: `catalog-events.DLT`, `order-events.DLT`, `coupon-issue-requests.DLT` + +### 4.8 조회수 직접 Kafka 발행 + +```java +// commerce-api: com.loopers.application.event + +@Slf4j +@Component +@RequiredArgsConstructor +public class ProductViewKafkaPublisher { + + private final KafkaTemplate kafkaTemplate; + + @Async + @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) + public void handle(ProductViewedEvent event) { + // 조회는 TX 없음 → @EventListener로도 가능하지만 + // 일관성을 위해 @TransactionalEventListener 사용 (fallbackExecution = true 고려) + try { + kafkaTemplate.send("catalog-events", + String.valueOf(event.productId()), + Map.of( + "eventType", "PRODUCT_VIEWED", + "productId", event.productId(), + "memberId", event.memberId(), + "timestamp", Instant.now().toString() + )); + } catch (Exception e) { + log.warn("조회수 Kafka 발행 실패 — productId={}", event.productId(), e); + // 유실 허용: 조회수는 정확성보다 추세가 중요 + } + } +} +``` + +**Outbox 미경유 근거 (09 §3.10):** +- 조회 = 읽기 전용 (DB 쓰기 없음) → Outbox INSERT를 위한 별도 TX가 필요 +- 조회마다 DB 쓰기 1건 추가 = 성능 오버헤드 +- 조회수는 정확성보다 추세가 중요 (±수 건 허용) +- KafkaTemplate.send()는 내부적으로 배치 + 버퍼링 (효율적) + +### 4.9 Outbox 테이블 정리 + +```java +// commerce-batch: MetricsReconcileTasklet 또는 별도 스케줄러 + +// 1시간 보존 후 Batch DELETE +@Scheduled(cron = "0 0 * * * *") // 매 시 정각 +public void cleanupOutbox() { + int deleted = entityManager.createNativeQuery( + "DELETE FROM event_outbox WHERE created_at < DATE_SUB(NOW(), INTERVAL 1 HOUR) LIMIT 10000" + ).executeUpdate(); + log.info("[OutboxCleanup] 삭제 건수: {}", deleted); +} +``` + +**규모 산정:** +- 좋아요: 일 100만 건, 주문: 일 50만 건, 조회: Outbox 미경유 +- event_outbox: 일 150만 건 (행당 ~500 bytes) +- Debezium이 binlog에서 읽으므로 테이블 누적 최소화 +- 1시간 보존 기준 최대 ~6.25만 건 → Batch DELETE ~1초 이내 + +--- + +## 5. product_metrics 집계 + +### 5.1 product_metrics DDL + +```sql +CREATE TABLE product_metrics ( + product_id BIGINT PRIMARY KEY, + like_count BIGINT NOT NULL DEFAULT 0, + view_count BIGINT NOT NULL DEFAULT 0, + sales_count BIGINT NOT NULL DEFAULT 0, + sales_amount BIGINT NOT NULL DEFAULT 0, + updated_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) +); +``` + +### 5.2 product_like_stats 흡수 + Product.like_count 유지 이유 + +**product_like_stats → product_metrics 흡수:** +- product_like_stats는 like_count만 보유 → product_metrics가 like_count + view_count + sales_count + sales_amount 통합 관리 +- LikeCountSyncTasklet → MetricsReconcileTasklet로 진화 +- product_like_stats 테이블은 product_metrics 마이그레이션 후 DROP + +**Product.like_count 컬럼 유지 (09 §3.6):** +- 정렬 인덱스 `idx_product_like_count(like_count DESC, id DESC)`가 이 컬럼 기준 +- 제거하면 좋아요순 정렬 시 product_metrics JOIN 필요 → 성능 하락 +- 비정규화 캐시로 유지, MetricsReconcileTasklet이 product_metrics 기준으로 보정 + +### 5.3 ProductMetrics Entity + +```java +// commerce-streamer: com.loopers.domain.metrics + +@Entity +@Table(name = "product_metrics") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class ProductMetrics { + + @Id + @Column(name = "product_id") + private Long productId; + + @Column(name = "like_count", nullable = false) + private long likeCount; + + @Column(name = "view_count", nullable = false) + private long viewCount; + + @Column(name = "sales_count", nullable = false) + private long salesCount; + + @Column(name = "sales_amount", nullable = false) + private long salesAmount; + + @Column(name = "updated_at", nullable = false) + private LocalDateTime updatedAt; +} +``` + +### 5.4 MetricsConsumer + +```java +// commerce-streamer: com.loopers.interfaces.consumer + +@Slf4j +@Component +@RequiredArgsConstructor +public class MetricsConsumer { + + private final EntityManager entityManager; + private final PlatformTransactionManager transactionManager; + + @KafkaListener( + topics = {"catalog-events", "order-events"}, + groupId = "metrics-collector", // 전용 Consumer Group + containerFactory = KafkaConfig.BATCH_LISTENER + ) + public void consume( + List>> messages, + Acknowledgment acknowledgment + ) { + for (ConsumerRecord> record : messages) { + String eventId = extractEventId(record); + + // INSERT-first 패턴: event_handled + 비즈니스 로직을 단일 TX로 처리 + TransactionStatus tx = transactionManager.getTransaction( + new DefaultTransactionDefinition()); + try { + int inserted = entityManager.createNativeQuery( + "INSERT IGNORE INTO event_handled (event_id) VALUES (:eventId)" + ).setParameter("eventId", eventId).executeUpdate(); + + if (inserted == 0) { + transactionManager.rollback(tx); + continue; // 멱등: 이미 처리된 이벤트 + } + + String eventType = extractEventType(record); + Map payload = record.value(); + + switch (eventType) { + case "LIKE_CREATED" -> upsertMetrics( + toLong(payload.get("productId")), "like_count", 1); + case "LIKE_REMOVED" -> upsertMetrics( + toLong(payload.get("productId")), "like_count", -1); + case "PRODUCT_VIEWED" -> upsertMetrics( + toLong(payload.get("productId")), "view_count", 1); + case "ORDER_CREATED" -> handleOrderCreated(payload); + case "ORDER_CANCELLED" -> handleOrderCancelled(payload); + default -> log.warn("알 수 없는 이벤트 타입: {}", eventType); + } + + transactionManager.commit(tx); + } catch (Exception e) { + transactionManager.rollback(tx); + log.error("메트릭 처리 실패 — eventId={}", eventId, e); + } + } + acknowledgment.acknowledge(); + } + + private void upsertMetrics(Long productId, String column, long delta) { + entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, " + column + ", updated_at) " + + "VALUES (:productId, :delta, NOW(6)) " + + "ON DUPLICATE KEY UPDATE " + + column + " = " + column + " + :delta, updated_at = NOW(6)" + ) + .setParameter("productId", productId) + .setParameter("delta", delta) + .executeUpdate(); + } +} +``` + +**Consumer Group 분리:** +- `metrics-collector`: MetricsConsumer 전용. catalog-events, order-events 구독. +- `coupon-issuer`: CouponIssueConsumer 전용. coupon-issue-requests 구독. +- 분리 이유: 같은 group-id를 공유하면, 한 Consumer의 처리 지연이 다른 Consumer의 리밸런싱을 유발한다. 쿠폰 발급(건별 CAS)과 메트릭 집계(배치 UPSERT)는 처리 특성이 완전히 다르므로 격리해야 한다. + +**BATCH_LISTENER 사용 이유:** +- catalog-events, order-events는 집계 연산 → 배치로 처리해도 정합성 문제 없음 +- 3000건/poll + manual ack → 높은 처리량 +- 개별 건 실패 시 배치 전체 재처리 → event_handled로 중복 방지 + +--- + +## 6. 멱등 처리 + +### 6.1 event_handled DDL + +```sql +CREATE TABLE event_handled ( + event_id VARCHAR(100) PRIMARY KEY, + handled_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + INDEX idx_event_handled_handled_at (handled_at) +); +``` + +### 6.2 멱등 처리 흐름 + +**원자성 보장 — INSERT-first 패턴:** + +기존 설계(비즈니스 로직 → event_handled INSERT)의 문제: 비즈니스 로직 성공 후, event_handled INSERT 전에 Consumer가 크래시하면 → 재시작 시 같은 메시지를 다시 처리 → **비즈니스 로직 중복 실행**. UPSERT(+1, -1)처럼 멱등하지 않은 연산에서 데이터 정합성이 깨진다. + +``` +Consumer가 메시지 수신: + 1. event_id 추출 (record header 또는 payload) + 2. [단일 TX 시작] + 2-1. INSERT IGNORE INTO event_handled (event_id) VALUES (?) + → affected rows = 0이면 skip (이미 처리됨) → ack → TX rollback + 2-2. 비즈니스 로직 실행 (UPSERT product_metrics 또는 CouponIssue INSERT) + 3. [TX 커밋] + 4. ack +``` + +**핵심: event_handled INSERT와 비즈니스 로직이 동일 트랜잭션 안에 있어야 한다.** + +- event_handled INSERT를 **먼저** 시도: 중복이면 즉시 skip → 불필요한 비즈니스 로직 실행 방지 +- INSERT 성공 → 비즈니스 로직 실행 → TX 커밋: 비즈니스 로직 실패 시 event_handled도 함께 롤백 +- TX 커밋 후 크래시 → 재시작 시 event_handled에 이미 존재 → skip → **중복 실행 불가** + +**INSERT IGNORE 패턴 vs SELECT 후 INSERT:** +- `INSERT IGNORE`: PK 중복 시 에러 없이 무시, affected rows로 판단 → 단일 쿼리 + race condition 방지 +- `SELECT → INSERT`: 조회~삽입 사이에 다른 Consumer가 같은 event_id를 처리할 수 있음 → 불안전 + +### 6.3 event_id 생성 전략 + +| 소스 | event_id 형식 | 예시 | +|---|---|---| +| Debezium Outbox | `outbox:{event_outbox.id}` | `outbox:12345` | +| 직접 Kafka 발행 (조회수) | `view:{productId}:{timestamp}:{uuid 8자리}` | `view:100:1719820800000:a1b2c3d4` | +| 선착순 쿠폰 | `coupon-issue:{couponIssueRequestId}` | `coupon-issue:5678` | + +### 6.4 event_handled 정리 + +```sql +-- 7일 보존 후 삭제 (commerce-batch 또는 스케줄러) +DELETE FROM event_handled WHERE handled_at < DATE_SUB(NOW(), INTERVAL 7 DAY) LIMIT 10000; +``` + +**7일 근거:** +- Kafka retention 기본값 7일 → 7일 이전 메시지는 Kafka에서도 삭제됨 +- 재처리 가능 범위 = Kafka retention과 일치시킴 + +--- + +## 7. Step 3 — 선착순 쿠폰 발급 + +### 7.1 Coupon 모델 확장 DDL + +```sql +ALTER TABLE coupon +ADD COLUMN max_issuance_count INT NULL COMMENT 'NULL이면 무제한', +ADD COLUMN issued_count INT NOT NULL DEFAULT 0; +``` + +### 7.2 coupon_issue UNIQUE 제약 + +```sql +ALTER TABLE coupon_issue +ADD UNIQUE INDEX uk_coupon_issue_coupon_member (coupon_id, member_id); +``` + +**근거:** 같은 쿠폰 + 같은 유저 → INSERT 시 중복이면 예외 → 거절 + +### 7.3 coupon_issue_request DDL + Entity + +```sql +CREATE TABLE coupon_issue_request ( + id BIGINT AUTO_INCREMENT PRIMARY KEY, + coupon_id BIGINT NOT NULL, + member_id BIGINT NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'PENDING', -- PENDING / COMPLETED / REJECTED + reject_reason VARCHAR(100), + created_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + completed_at DATETIME(6), + INDEX idx_coupon_issue_request_member (member_id), + INDEX idx_coupon_issue_request_coupon (coupon_id) +); +``` + +```java +// commerce-api: com.loopers.domain.coupon + +@Entity +@Table(name = "coupon_issue_request") +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class CouponIssueRequest { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "coupon_id", nullable = false) + private Long couponId; + + @Column(name = "member_id", nullable = false) + private Long memberId; + + @Enumerated(EnumType.STRING) + @Column(nullable = false, length = 20) + private CouponIssueRequestStatus status; + + @Column(name = "reject_reason", length = 100) + private String rejectReason; + + @Column(name = "created_at", nullable = false, updatable = false) + private LocalDateTime createdAt; + + @Column(name = "completed_at") + private LocalDateTime completedAt; + + public static CouponIssueRequest create(Long couponId, Long memberId) { + CouponIssueRequest request = new CouponIssueRequest(); + request.couponId = couponId; + request.memberId = memberId; + request.status = CouponIssueRequestStatus.PENDING; + request.createdAt = LocalDateTime.now(); + return request; + } +} + +public enum CouponIssueRequestStatus { + PENDING, COMPLETED, REJECTED +} +``` + +### 7.4 발급 요청 API 흐름 + +``` +[사용자] → POST /api/v1/coupons/{couponId}/issue-request + +[commerce-api — CouponFacade.requestCouponIssue] + 1. Coupon 조회 + 만료 확인 + 2. coupon_issue_request INSERT (status = PENDING) + 3. Kafka에 COUPON_ISSUE_REQUESTED 발행 (key = couponId) + → KafkaTemplate.send("coupon-issue-requests", couponId, payload) + 4. 즉시 응답: { requestId, status: "PENDING" } +``` + +```java +// commerce-api: CouponFacade — 추가 메서드 + +@Transactional +public CouponIssueRequest requestCouponIssue(Long couponId, Long memberId) { + Coupon coupon = couponRepository.findById(couponId) + .orElseThrow(() -> new CoreException(ErrorType.NOT_FOUND, "쿠폰을 찾을 수 없습니다.")); + ZonedDateTime now = ZonedDateTime.now(clock); + if (now.isAfter(coupon.getExpiredAt())) { + throw new CoreException(ErrorType.BAD_REQUEST, "만료된 쿠폰입니다."); + } + + CouponIssueRequest request = CouponIssueRequest.create(couponId, memberId); + couponIssueRequestRepository.save(request); + + kafkaTemplate.send("coupon-issue-requests", + String.valueOf(couponId), + Map.of( + "requestId", request.getId(), + "couponId", couponId, + "memberId", memberId, + "timestamp", Instant.now().toString() + )); + + return request; +} +``` + +### 7.5 Consumer 처리 흐름 + +``` +[commerce-streamer — CouponIssueConsumer (SINGLE_LISTENER, groupId=coupon-issuer)] + + [단일 TX 시작] + 1. INSERT IGNORE INTO event_handled (event_id = "coupon-issue:{requestId}") + → affected rows = 0 → skip (이미 처리됨) → TX rollback + ack + + 2. CAS UPDATE — 수량 확인 + 발급 카운트 증가 + UPDATE coupon + SET issued_count = issued_count + 1 + WHERE id = :couponId + AND issued_count < max_issuance_count + AND deleted_at IS NULL; + → affected rows = 0 → 수량 소진 → REJECTED + + 3. CouponIssue INSERT (중복 발급 방지) + INSERT INTO coupon_issue (coupon_id, member_id, status, expired_at, created_at) + VALUES (:couponId, :memberId, 'AVAILABLE', :expiredAt, NOW()); + → DuplicateKeyException (uk_coupon_issue_coupon_member) → 이미 발급 → REJECTED + + 4. coupon_issue_request 상태 업데이트 + UPDATE coupon_issue_request + SET status = 'COMPLETED', completed_at = NOW() + WHERE id = :requestId; + + [TX 커밋] → ack + + * 비즈니스 로직 실패 시 event_handled도 함께 롤백 → 재시도 가능 +``` + +```java +// commerce-streamer: com.loopers.interfaces.consumer + +@Slf4j +@Component +@RequiredArgsConstructor +public class CouponIssueConsumer { + + private final EntityManager entityManager; + private final PlatformTransactionManager transactionManager; + + @KafkaListener( + topics = "coupon-issue-requests", + groupId = "coupon-issuer", // 전용 Consumer Group + containerFactory = KafkaConfig.SINGLE_LISTENER // 단건 처리 — 개별 에러 핸들링 + ) + public void consume(ConsumerRecord> record, + Acknowledgment acknowledgment) { + Map payload = record.value(); + Long requestId = toLong(payload.get("requestId")); + String eventId = "coupon-issue:" + requestId; + Long couponId = toLong(payload.get("couponId")); + Long memberId = toLong(payload.get("memberId")); + + // INSERT-first 패턴: event_handled + 비즈니스 로직을 단일 TX로 처리 + TransactionStatus tx = transactionManager.getTransaction( + new DefaultTransactionDefinition()); + try { + int inserted = entityManager.createNativeQuery( + "INSERT IGNORE INTO event_handled (event_id) VALUES (:eventId)" + ).setParameter("eventId", eventId).executeUpdate(); + + if (inserted == 0) { + transactionManager.rollback(tx); + acknowledgment.acknowledge(); + return; // 멱등: 이미 처리됨 + } + + processCouponIssue(requestId, couponId, memberId); + transactionManager.commit(tx); + } catch (Exception e) { + transactionManager.rollback(tx); + log.error("쿠폰 발급 처리 실패 — requestId={}", requestId, e); + // 실패 시 event_handled도 롤백됨 → 재시도 가능 + // DLQ로 이동 시 rejectRequest 처리는 ErrorHandler에서 수행 + } + + acknowledgment.acknowledge(); + } + + private void processCouponIssue(Long requestId, Long couponId, Long memberId) { + // CAS UPDATE — 수량 확인 + 발급 카운트 증가 + int updated = entityManager.createNativeQuery( + "UPDATE coupon SET issued_count = issued_count + 1 " + + "WHERE id = :couponId AND issued_count < max_issuance_count " + + "AND deleted_at IS NULL" + ).setParameter("couponId", couponId).executeUpdate(); + + if (updated == 0) { + rejectRequest(requestId, "수량 소진"); + return; + } + + // CouponIssue INSERT + try { + entityManager.createNativeQuery( + "INSERT INTO coupon_issue (coupon_id, member_id, status, expired_at, created_at) " + + "SELECT :couponId, :memberId, 'AVAILABLE', c.expired_at, NOW() " + + "FROM coupon c WHERE c.id = :couponId" + ).setParameter("couponId", couponId) + .setParameter("memberId", memberId) + .executeUpdate(); + } catch (Exception e) { + // UNIQUE 제약 위반 → 이미 발급됨 → issued_count 롤백 + entityManager.createNativeQuery( + "UPDATE coupon SET issued_count = issued_count - 1 WHERE id = :couponId" + ).setParameter("couponId", couponId).executeUpdate(); + rejectRequest(requestId, "이미 발급된 쿠폰"); + return; + } + + // 성공 상태 업데이트 + entityManager.createNativeQuery( + "UPDATE coupon_issue_request SET status = 'COMPLETED', completed_at = NOW() " + + "WHERE id = :requestId" + ).setParameter("requestId", requestId).executeUpdate(); + } +} +``` + +### 7.6 동시성 제어 — Kafka만으로 부족한 이유 + DB CAS가 핵심 + +``` +오해: "key=couponId → 같은 파티션 → 순차 소비 → 동시성 해결" + +현실 (09 §4.3): + 1. Consumer 장애 → Rebalancing → 메시지 재처리 (At Least Once) + → 같은 요청이 2번 처리될 수 있음 + 2. Consumer Group 내 파티션 재할당 중 중복 소비 가능 + 3. 배치 리스너의 경우 동일 couponId의 여러 요청이 같은 배치에 포함 + +결론: + Kafka = "폭주 요청 버퍼링 + 순서 힌트" (부하 완충) + DB CAS UPDATE = "수량 제어의 핵심" (정확성 보장) + UNIQUE 제약 = "중복 발급 방지의 최종 방어선" +``` + +**SINGLE_LISTENER 사용 이유 (09 §11):** +- 건별 CAS UPDATE + 개별 에러 핸들링이 필요 +- BATCH_LISTENER에서 배치 내 부분 실패 처리가 복잡 +- 쿠폰 발급은 집계와 달리 건별 정확성이 중요 + +### 7.7 결과 확인 — Polling + +``` +[사용자] → GET /api/v1/coupons/issue-requests/{requestId} + → coupon_issue_request 조회 + → { requestId, status: "PENDING" | "COMPLETED" | "REJECTED", rejectReason } +``` + +**Polling 선택 근거 (09 §4.7):** +- 구현 단순, 인프라 추가 불필요 +- 쿠폰 발급은 수 초 내 완료 → 1~2회 polling이면 충분 +- SSE/WebSocket은 커넥션 유지 오버헤드 + +--- + +## 8. Redis 설정 보완 + +```java +// modules/redis: RedisConfig.java — lettuceConnectionFactory 메서드 수정 + +private LettuceConnectionFactory lettuceConnectionFactory( + int database, + RedisNodeInfo master, + List replicas, + Consumer customizer +) { + LettuceClientConfiguration.LettuceClientConfigurationBuilder builder = + LettuceClientConfiguration.builder() + .commandTimeout(Duration.ofMillis(500)); // ← 추가 + if (customizer != null) customizer.accept(builder); + // ... 이하 동일 +} +``` + +**근거 (09 §12.2):** +- 현재: 타임아웃 미설정 → Redis 장애 시 스레드 무한 대기 가능 +- 정상 응답 ~1ms, 500ms 초과 = 장애 판단 +- Lettuce NIO multiplexing이므로 커넥션 풀 불필요 — 타임아웃만으로 보호 + +--- + +## 9. 전체 흐름 통합 + +### 9.1 좋아요 분리 후 전체 흐름도 + +``` +[사용자] POST /api/v1/products/{productId}/likes + │ + ▼ +[LikeFacade.addLike — TX] + Like INSERT + event_outbox INSERT + → TX commit + │ + ├─ [AFTER_COMMIT — 동기] + │ ├─ Product.incrementLikeCount (best-effort) + │ └─ 캐시 무효화 (evictProductDetail + evictProductList) + │ + └─ [event_outbox — MySQL binlog] + → Debezium → catalog-events 토픽 + → [commerce-streamer] MetricsConsumer + → product_metrics.like_count UPSERT + → event_handled INSERT +``` + +### 9.2 주문 분리 후 전체 흐름도 + +``` +[사용자] POST /api/v1/orders + │ + ▼ +[OrderFacade.createOrder — TX] + 재고 차감 + 쿠폰 적용 + 주문 저장 + event_outbox INSERT + → TX commit + │ + └─ [event_outbox — MySQL binlog] + → Debezium → order-events 토픽 + → [commerce-streamer] MetricsConsumer + → product_metrics.sales_count / sales_amount UPSERT (상품별) + → event_handled INSERT +``` + +### 9.3 조회수 전체 흐름도 + +``` +[사용자] GET /api/v1/products/{productId} + │ + ▼ +[ProductFacade.getProductDetailCached] + L1/L2 캐시 → DB → 응답 + │ + └─ [ApplicationEvent — ProductViewedEvent] + → [ProductViewKafkaPublisher — @Async] + → KafkaTemplate.send("catalog-events", productId, payload) + → [commerce-streamer] MetricsConsumer + → product_metrics.view_count UPSERT + → event_handled INSERT + +* Outbox 미경유 — 읽기 전용 연산, 유실 허용 +``` + +### 9.4 선착순 쿠폰 전체 흐름도 + +``` +[사용자] POST /api/v1/coupons/{couponId}/issue-request + │ + ▼ +[CouponFacade.requestCouponIssue — TX] + Coupon 검증 + coupon_issue_request INSERT (PENDING) + → KafkaTemplate.send("coupon-issue-requests", couponId, payload) + → 즉시 응답: { requestId, status: "PENDING" } + │ + └─ [Kafka — coupon-issue-requests 토픽] + → [commerce-streamer] CouponIssueConsumer (SINGLE_LISTENER) + → event_handled 확인 (멱등) + → CAS UPDATE coupon.issued_count (수량 확인) + → INSERT coupon_issue (UNIQUE 제약) + → UPDATE coupon_issue_request (COMPLETED / REJECTED) + → event_handled INSERT + +[사용자] GET /api/v1/coupons/issue-requests/{requestId} + → 결과 확인 (Polling) +``` + +### 9.5 주문 취소 전체 흐름도 + +``` +[사용자] DELETE /api/v1/orders/{orderId} + │ + ▼ +[OrderFacade.cancelOrder — TX] + order.cancel() + 재고 복원 + 쿠폰 복원 + event_outbox INSERT + → TX commit + │ + └─ [event_outbox — MySQL binlog] + → Debezium → order-events 토픽 + → [commerce-streamer] MetricsConsumer + → product_metrics.sales_count / sales_amount 차감 (상품별) + → event_handled INSERT +``` + +--- + +## 10. 정합성 안전망 + +### 10.1 MetricsReconcileTasklet (LikeCountSyncTasklet 진화) + +```java +// commerce-batch: com.loopers.batch.job.metricsreconcile.step + +@Slf4j +@RequiredArgsConstructor +@Component +public class MetricsReconcileTasklet implements Tasklet { + + private final EntityManager entityManager; + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + // 1단계: likes 테이블 기준 → product_metrics.like_count 보정 + log.info("[MetricsReconcile] 1단계: like_count 대사 시작"); + int likeCorrected = entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, like_count, updated_at) " + + "SELECT l.product_id, COUNT(*), NOW(6) FROM likes l GROUP BY l.product_id " + + "ON DUPLICATE KEY UPDATE like_count = VALUES(like_count), updated_at = NOW(6)" + ).executeUpdate(); + log.info("[MetricsReconcile] 1단계 완료 — 대사 행 수: {}", likeCorrected); + + // 2단계: product_metrics.like_count → Product.like_count 비정규화 보정 + log.info("[MetricsReconcile] 2단계: Product.like_count 드리프트 보정 시작"); + int productCorrected = entityManager.createNativeQuery( + "UPDATE product p JOIN product_metrics pm ON p.id = pm.product_id " + + "SET p.like_count = pm.like_count " + + "WHERE p.like_count != pm.like_count AND p.deleted_at IS NULL" + ).executeUpdate(); + log.info("[MetricsReconcile] 2단계 완료 — 보정된 상품 수: {}", productCorrected); + + // 3단계: order_items 기준 → product_metrics.sales_count/sales_amount 보정 + log.info("[MetricsReconcile] 3단계: sales_count/sales_amount 대사 시작"); + int salesCorrected = entityManager.createNativeQuery( + "INSERT INTO product_metrics (product_id, sales_count, sales_amount, updated_at) " + + "SELECT oi.product_id, SUM(oi.quantity), SUM(oi.price * oi.quantity), NOW(6) " + + "FROM order_items oi JOIN orders o ON oi.order_id = o.id " + + "WHERE o.status != 'CANCELLED' AND o.deleted_at IS NULL " + + "GROUP BY oi.product_id " + + "ON DUPLICATE KEY UPDATE " + + "sales_count = VALUES(sales_count), sales_amount = VALUES(sales_amount), " + + "updated_at = NOW(6)" + ).executeUpdate(); + log.info("[MetricsReconcile] 3단계 완료 — 대사 행 수: {}", salesCorrected); + + return RepeatStatus.FINISHED; + } +} +``` + +### 10.2 3중 안전망 + +``` +[1차] best-effort 즉시 반영 + → AFTER_COMMIT에서 incrementLikeCount (동기) + → 실패해도 Like 자체는 저장됨 + +[2차] Kafka 집계 + → Debezium → catalog-events → MetricsConsumer + → product_metrics에 정확한 이벤트 기반 집계 + +[3차] 배치 대사 + → MetricsReconcileTasklet + → 원본 데이터(likes, order_items) 기준 전수 대사 + → product_metrics 보정 + Product.like_count 비정규화 보정 +``` + +--- + +## 11. 패키지 구조 + +### 11.1 commerce-api 패키지 트리 + +``` +apps/commerce-api/src/main/java/com/loopers/ +├── application/ +│ ├── coupon/ +│ │ ├── CouponFacade.java [변경] requestCouponIssue 추가 +│ │ └── CouponApplyResult.java +│ ├── like/ +│ │ └── LikeFacade.java [변경] incrementLikeCount 제거, outbox + event 발행 +│ ├── order/ +│ │ └── OrderFacade.java [변경] outbox + event 발행 추가 +│ ├── product/ +│ │ ├── ProductFacade.java [변경] 조회수 이벤트 발행 추가 +│ │ └── ProductCachePort.java +│ └── event/ [신규] +│ └── ProductViewKafkaPublisher.java [신규] 조회수 직접 Kafka 발행 +├── domain/ +│ ├── coupon/ +│ │ ├── Coupon.java [변경] maxIssuanceCount, issuedCount 추가 +│ │ ├── CouponIssue.java +│ │ ├── CouponIssueRequest.java [신규] +│ │ ├── CouponIssueRequestStatus.java [신규] +│ │ └── CouponIssueRequestRepository.java [신규] +│ ├── event/ [신규] +│ │ ├── LikeCreatedEvent.java [신규] +│ │ ├── LikeRemovedEvent.java [신규] +│ │ ├── OrderCreatedEvent.java [신규] +│ │ ├── OrderCancelledEvent.java [신규] +│ │ ├── ProductViewedEvent.java [신규] +│ │ ├── EventOutbox.java [신규] +│ │ └── EventOutboxRepository.java [신규] +│ └── ... +├── infrastructure/ +│ ├── coupon/ +│ │ └── CouponIssueRequestJpaRepository.java [신규] +│ ├── event/ [신규] +│ │ └── EventOutboxJpaRepository.java [신규] +│ ├── kafka/ [신규] +│ │ ├── KafkaTopicConfig.java [신규] NewTopic 빈 정의 +│ │ └── AsyncConfig.java [신규] @Async 스레드 풀 +│ └── ... +├── interfaces/ +│ ├── api/ +│ │ ├── coupon/ +│ │ │ └── CouponController.java [변경] 발급 요청/결과 확인 API 추가 +│ │ ├── like/ +│ │ │ └── LikeController.java [변경] 캐시 무효화 인라인 코드 제거 +│ │ └── ... +│ └── listener/ [신규] +│ ├── LikeCountEventListener.java [신규] AFTER_COMMIT → incrementLikeCount +│ └── CacheEvictionEventListener.java [신규] AFTER_COMMIT → 캐시 무효화 +└── ... +``` + +### 11.2 commerce-streamer 패키지 트리 + +``` +apps/commerce-streamer/src/main/java/com/loopers/ +├── CommerceStreamerApplication.java +├── domain/ [신규] +│ ├── metrics/ [신규] +│ │ ├── ProductMetrics.java [신규] streamer 자체 Entity +│ │ └── ProductMetricsRepository.java [신규] +│ └── event/ [신규] +│ ├── EventHandled.java [신규] streamer 자체 Entity +│ └── EventHandledRepository.java [신규] +└── interfaces/ + └── consumer/ + ├── DemoKafkaConsumer.java (기존 유지) + ├── MetricsConsumer.java [신규] catalog-events + order-events + └── CouponIssueConsumer.java [신규] coupon-issue-requests +``` + +**commerce-streamer에서 Native SQL 사용 근거:** +- CouponIssueConsumer가 coupon, coupon_issue, coupon_issue_request 테이블에 접근 +- 이들은 commerce-api의 도메인 Entity → streamer에서 Entity를 공유하면 모듈 결합도 증가 +- commerce-batch의 LikeCountSyncTasklet이 `entityManager.createNativeQuery()`로 접근하는 기존 패턴 준수 +- Native SQL로 최소한의 접근만 수행 (CAS UPDATE, INSERT, status UPDATE) + +### 11.3 commerce-batch 패키지 + +``` +apps/commerce-batch/src/main/java/com/loopers/batch/job/ +├── likecountsync/ [변경 → metricsreconcile로 리네임] +│ ├── LikeCountSyncJobConfig.java [변경] → MetricsReconcileJobConfig.java +│ └── step/ +│ └── LikeCountSyncTasklet.java [변경] → MetricsReconcileTasklet.java +├── outboxcleanup/ [신규] +│ ├── OutboxCleanupJobConfig.java [신규] +│ └── step/ +│ └── OutboxCleanupTasklet.java [신규] event_outbox 1시간 보존 DELETE +├── eventhandledcleanup/ [신규] +│ ├── EventHandledCleanupJobConfig.java [신규] +│ └── step/ +│ └── EventHandledCleanupTasklet.java [신규] event_handled 7일 보존 DELETE +├── paymentrecovery/ (기존 유지) +└── reconciliation/ (기존 유지) +``` + +--- + +## 12. 의존성 + Docker 변경 + +### 12.1 commerce-api: `modules:kafka` 추가 + +```kotlin +// apps/commerce-api/build.gradle.kts +dependencies { + // ... 기존 의존성 ... + implementation(project(":modules:kafka")) // 추가 — 조회수 직접 Kafka 발행용 +} +``` + +### 12.2 infra-compose.yml 변경 + +```yaml +# 1. mysql 서비스: binlog 활성화 command 추가 +mysql: + image: mysql:8.0 + command: + - --log-bin=mysql-bin + - --binlog-format=ROW + - --binlog-row-image=FULL + - --server-id=1 + # ... 기존 ports, environment, volumes 유지 + +# 2. kafka-connect 서비스 추가 (§4.3.2 참조) +kafka-connect: + image: debezium/connect:2.5 + container_name: kafka-connect + depends_on: + kafka: + condition: service_healthy + mysql: + condition: service_started + ports: + - "8083:8083" + environment: + GROUP_ID: 1 + BOOTSTRAP_SERVERS: kafka:9092 + CONFIG_STORAGE_TOPIC: _connect_configs + OFFSET_STORAGE_TOPIC: _connect_offsets + STATUS_STORAGE_TOPIC: _connect_status + CONFIG_STORAGE_REPLICATION_FACTOR: 1 + OFFSET_STORAGE_REPLICATION_FACTOR: 1 + STATUS_STORAGE_REPLICATION_FACTOR: 1 + KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + KEY_CONVERTER_SCHEMAS_ENABLE: "false" + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8083/connectors"] + interval: 10s + timeout: 5s + retries: 10 +``` + +### 12.3 Debezium Connector 등록 스크립트 + +```bash +# docker/register-debezium-connector.sh +# infra-compose up 이후 실행 + +#!/bin/bash +set -e + +echo "Waiting for Kafka Connect to be ready..." +until curl -s http://localhost:8083/connectors > /dev/null 2>&1; do + sleep 2 +done + +echo "Registering Debezium MySQL Connector..." +curl -X POST http://localhost:8083/connectors \ + -H "Content-Type: application/json" \ + -d @- << 'EOF' +{ + "name": "loopers-outbox-connector", + "config": { + "connector.class": "io.debezium.connector.mysql.MySqlConnector", + "tasks.max": "1", + "database.hostname": "mysql", + "database.port": "3306", + "database.user": "root", + "database.password": "root", + "database.server.id": "184054", + "topic.prefix": "loopers", + "database.include.list": "loopers", + "table.include.list": "loopers.event_outbox", + "schema.history.internal.kafka.bootstrap.servers": "kafka:9092", + "schema.history.internal.kafka.topic": "_schema_history", + "transforms": "outbox", + "transforms.outbox.type": "io.debezium.transforms.outbox.EventRouter", + "transforms.outbox.table.field.event.id": "id", + "transforms.outbox.table.field.event.key": "aggregate_id", + "transforms.outbox.table.field.event.type": "event_type", + "transforms.outbox.table.field.event.payload": "payload", + "transforms.outbox.route.by.field": "aggregate_type", + "transforms.outbox.route.topic.replacement": "${routedByValue}-events", + "transforms.outbox.table.fields.additional.placement": "event_type:header:eventType", + "tombstones.on.delete": "false" + } +} +EOF + +echo "Connector registered successfully!" +curl -s http://localhost:8083/connectors/loopers-outbox-connector/status | python3 -m json.tool +``` + +--- + +## 13. 구현 계획 + +### Phase 1: ApplicationEvent 기반 분리 + +| # | 항목 | 대상 파일 | +|---|---|---| +| 1 | 이벤트 record 5개 생성 | domain/event/*.java | +| 2 | EventOutbox Entity + Repository | domain/event/, infrastructure/event/ | +| 3 | LikeFacade에서 incrementLikeCount 제거, Outbox INSERT + 이벤트 발행 | LikeFacade.java | +| 4 | LikeCountEventListener (AFTER_COMMIT, 동기) | interfaces/listener/ | +| 5 | CacheEvictionEventListener (AFTER_COMMIT, 동기) | interfaces/listener/ | +| 6 | LikeController에서 캐시 무효화 인라인 코드 제거 | LikeController.java | +| 7 | @Async 스레드 풀 설정 | infrastructure/kafka/AsyncConfig.java | + +### Phase 2: Kafka 인프라 + Debezium + +| # | 항목 | 대상 파일 | +|---|---|---| +| 8 | commerce-api build.gradle.kts에 `modules:kafka` 추가 | build.gradle.kts | +| 9 | kafka.yml Producer 보완 (acks, idempotence, linger.ms) | kafka.yml | +| 10 | kafka.yml Consumer value-deserializer 오타 수정 | kafka.yml | +| 11 | SINGLE_LISTENER ContainerFactory 추가 | KafkaConfig.java | +| 12 | DefaultErrorHandler + DLQ 설정 | KafkaConfig.java | +| 13 | NewTopic 빈 3개 선언 | KafkaTopicConfig.java | +| 14 | infra-compose.yml MySQL binlog command 추가 | infra-compose.yml | +| 15 | infra-compose.yml kafka-connect 서비스 추가 | infra-compose.yml | +| 16 | Debezium Connector 등록 스크립트 | docker/register-debezium-connector.sh | +| 17 | RedisConfig commandTimeout(500ms) 추가 | RedisConfig.java | + +### Phase 3: product_metrics Consumer + +| # | 항목 | 대상 파일 | +|---|---|---| +| 18 | event_outbox DDL 실행 | DDL | +| 19 | product_metrics DDL 실행 | DDL | +| 20 | event_handled DDL 실행 | DDL | +| 21 | ProductMetrics Entity (commerce-streamer) | domain/metrics/ | +| 22 | EventHandled Entity (commerce-streamer) | domain/event/ | +| 23 | MetricsConsumer (BATCH_LISTENER) | interfaces/consumer/ | +| 24 | ProductViewKafkaPublisher (@Async, 직접 Kafka) | application/event/ | +| 25 | ProductFacade 조회수 이벤트 발행 추가 | ProductFacade.java | +| 26 | OrderFacade Outbox INSERT + 이벤트 발행 추가 | OrderFacade.java | +| 27 | OrderFacade.cancelOrder Outbox INSERT 추가 | OrderFacade.java | + +### Phase 4: 선착순 쿠폰 + +| # | 항목 | 대상 파일 | +|---|---|---| +| 28 | Coupon 모델 확장 DDL (max_issuance_count, issued_count) | DDL + Coupon.java | +| 29 | coupon_issue UNIQUE 제약 추가 DDL | DDL | +| 30 | coupon_issue_request DDL + Entity | domain/coupon/ | +| 31 | CouponFacade.requestCouponIssue 추가 | CouponFacade.java | +| 32 | CouponController 발급 요청/결과 확인 API 추가 | CouponController.java | +| 33 | CouponIssueConsumer (SINGLE_LISTENER, Native SQL) | interfaces/consumer/ | + +### Phase 5: 배치 + 테스트 + +| # | 항목 | 대상 파일 | +|---|---|---| +| 34 | MetricsReconcileTasklet (LikeCountSyncTasklet 진화) | commerce-batch | +| 35 | OutboxCleanupTasklet (1시간 보존 DELETE) | commerce-batch | +| 36 | EventHandledCleanupTasklet (7일 보존 DELETE) | commerce-batch | +| 37 | ApplicationEvent 분리 단위 테스트 | commerce-api/test | +| 38 | Kafka Consumer 통합 테스트 (EmbeddedKafka) | commerce-streamer/test | +| 39 | 선착순 쿠폰 동시성 테스트 | commerce-streamer/test | +| 40 | Debezium E2E 테스트 (Testcontainers) | 통합 테스트 | + +--- + +## 14. 전체 DDL 요약 + +### 신규 테이블 (4개) + +| 테이블 | 용도 | 위치 | +|---|---|---| +| `event_outbox` | Debezium CDC용 Outbox | commerce-api TX 내 INSERT | +| `product_metrics` | 상품 집계 (좋아요, 조회수, 판매량) | commerce-streamer UPSERT | +| `event_handled` | 멱등 처리 (중복 이벤트 방지) | commerce-streamer INSERT | +| `coupon_issue_request` | 선착순 쿠폰 발급 요청 추적 | commerce-api INSERT, commerce-streamer UPDATE | + +### 변경 테이블 (2개) + +| 테이블 | 변경 내용 | +|---|---| +| `coupon` | `max_issuance_count INT NULL`, `issued_count INT DEFAULT 0` 컬럼 추가 | +| `coupon_issue` | `UNIQUE INDEX uk_coupon_issue_coupon_member (coupon_id, member_id)` 추가 | + +### 삭제 테이블 (1개) + +| 테이블 | 사유 | +|---|---| +| `product_like_stats` | product_metrics로 흡수 (마이그레이션 후 DROP) | + +--- + +## 핵심 설계 결정 요약 + +| 결정 | 내용 | 근거 (09 참조) | +|---|---|---| +| Debezium CDC | Poller 대신 binlog 기반 Outbox 발행 | §8 — 학습 가치 + 중복 발행 원천 해결 | +| event_outbox에 status 없음 | Debezium이 binlog에서 읽으므로 PENDING/PROCESSED 불필요 | §8.5 | +| incrementLikeCount 동기 | AFTER_COMMIT에서 best-effort, @Async 아님 | §2.7, §13 | +| @Async core=2, max=4 | DB/Redis 미사용 초경량 작업이므로 작은 풀 | §13 | +| SINGLE_LISTENER for 쿠폰 | 건별 CAS UPDATE + 개별 에러 핸들링 | §11 | +| commerce-streamer Native SQL | Coupon/CouponIssue 접근 시 Entity 미사용 | commerce-batch 기존 패턴 | +| Product.like_count 유지 | 정렬 인덱스 유지, 제거 시 성능 하락 | §3.6 | +| 조회수 Outbox 미경유 | 읽기 전용 → TX 없음 → 직접 Kafka 발행 | §3.10 | diff --git a/docs/design/09-event-review.md b/docs/design/09-event-review.md new file mode 100644 index 000000000..c4649849b --- /dev/null +++ b/docs/design/09-event-review.md @@ -0,0 +1,858 @@ +# 이벤트 파이프라인 리뷰 — 시니어 아키텍트 관점 + +--- + +## 0. 과제 범위 요약 + +| Step | 주제 | 핵심 | +|---|---|---| +| Step 1 | ApplicationEvent로 경계 나누기 | 핵심 로직 vs 부가 로직 판단 + 트랜잭션 분리 | +| Step 2 | Kafka 이벤트 파이프라인 | Outbox → Kafka → commerce-streamer, product_metrics 집계, 멱등 처리 | +| Step 3 | 선착순 쿠폰 발급 | API → Kafka 발행만 → Consumer 순차 처리, 수량 제한 동시성 제어 | + +--- + +## 1. 현재 코드베이스 분석 + +### 1.1 현재 인프라 상태 + +| 구성 요소 | 상태 | 비고 | +|---|---|---| +| commerce-api | Kafka 미사용 | 모든 흐름 동기 처리, kafka.yml 미임포트 | +| commerce-streamer | DemoKafkaConsumer 1개 | demo.internal.topic-v1 소비만 | +| modules/kafka | 설정 완료 | KafkaTemplate, BATCH_LISTENER (manual ack, concurrency 3, max poll 3000) | +| Docker Kafka | KRaft 모드 | 단일 브로커, port 19092 (외부), 토픽 자동 생성 비활성화 | + +### 1.2 현재 주문 흐름 (`OrderFacade.createOrder`) + +``` +[단일 TX — @Transactional] + 1. 상품 비관적 락 (deadlock 방지 위해 ID 정렬) + 2. 브랜드 조회 (N+1 방지) + 3. 스냅샷 생성 (OrderItem) + 4. 재고 차감 (Product.decreaseStock) + 5. 쿠폰 적용 (CouponFacade.applyCouponToOrder — CAS UPDATE) + 6. 주문 저장 (Order.create) + 7. 쿠폰-주문 연결 (CouponIssue.linkOrder) +[TX commit] +``` + +**문제점:** +- 부가 로직(유저 행동 로깅, 판매량 집계, 알림)이 존재하지 않지만, 추가된다면 TX 안에 들어갈 구조 +- 쿠폰 적용은 가격 계산에 직접 영향 → 핵심 로직 (분리 불가) + +### 1.3 현재 좋아요 흐름 (`LikeFacade.addLike`) + +``` +[단일 TX — @Transactional] + 1. 상품 존재 확인 + 2. 중복 좋아요 확인 (existsByMemberIdAndProductId) + 3. Like INSERT + 4. Product.incrementLikeCount (SQL atomic UPDATE) +[TX commit] + +[Controller에서 인라인 처리] + 5. 캐시 무효화 (productCachePort.evictProductDetail + evictProductList) +``` + +**문제점:** +- Like INSERT(핵심)와 likeCount UPDATE(부가/집계)가 같은 TX +- 집계 실패 시 좋아요 자체도 롤백됨 +- 캐시 무효화가 Controller에 인라인 — 관심사 분리 안 됨 + +### 1.4 현재 좋아요 집계 구조 + +``` +product_like_stats 테이블: + product_id (PK), like_count, synced_at + +LikeCountSyncTasklet (commerce-batch): + 1단계: likes COUNT(*) GROUP BY product_id → REPLACE INTO product_like_stats + 2단계: product_like_stats.like_count → Product.like_count 드리프트 보정 + +역할: Product.like_count의 정합성 안전망 (incrementLikeCount 누락 시 보정) +``` + +### 1.5 현재 상품 조회 흐름 + +``` +ProductFacade.getProductDetailCached(): + L1(Caffeine) → L2(Redis) → DB → 캐시 저장 + +조회수 추적: 없음 (7주차에서 신규 추가) +``` + +### 1.6 현재 쿠폰 구조 + +``` +Coupon: name, discountType, discountValue, minOrderAmount, expiredAt +CouponIssue: couponId, memberId, status(AVAILABLE/USED/EXPIRED), expiredAt + +수량 제한: 없음 → 7주차에서 선착순 수량 제한 추가 필요 +중복 발급 방지: 없음 (같은 쿠폰을 같은 유저가 여러 번 발급 가능) +``` + +--- + +## 2. Step 1 분석 — 핵심 vs 부가 로직 판단 기준 + +### 2.1 판단 프레임워크 + +``` +핵심 로직 = "이것이 실패하면 사용자 요청 자체가 실패해야 하는가?" + → YES: 핵심 TX 안에 유지 + → NO: 이벤트로 분리 가능 + +부가 로직 = "이것이 실패해도 사용자에게는 성공으로 보여야 하는가?" + → YES: 이벤트 분리 (eventual consistency) +``` + +### 2.2 주문 플로우 — 핵심 vs 부가 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| 재고 차감 | **핵심** | 재고 없으면 주문 불가. 즉시 검증 필요 | X | +| 쿠폰 적용 | **핵심** | 할인 금액이 totalPrice 계산에 직접 영향 | X | +| 주문 저장 | **핵심** | 주문 자체 | X | +| 유저 행동 로깅 | 부가 | 로깅 실패해도 주문은 성공해야 함 | O | +| 판매량 집계 | 부가 | 집계 실패해도 주문에 영향 없음 | O | +| 주문 알림 | 부가 | 알림 실패해도 주문은 완료 | O | + +``` +분리 후: + [TX] 재고 차감 + 쿠폰 적용 + 주문 저장 → commit + [AFTER_COMMIT] OrderCreatedEvent 발행 + → 유저 행동 로깅 (비동기) + → Outbox 기록 → Kafka → product_metrics.sales_count 집계 +``` + +### 2.3 좋아요 플로우 — 핵심 vs 부가 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| Like INSERT | **핵심** | 사용자 의도 (좋아요 누르기) | X | +| Product.incrementLikeCount | 부가 | "집계 실패와 무관하게 좋아요는 성공" — 과제 요구사항 | O | +| 캐시 무효화 | 부가 | 캐시 무효화 실패해도 좋아요는 성공해야 함 | O | + +``` +분리 후: + [TX] Like INSERT + Outbox 기록 → commit + [AFTER_COMMIT] LikeCreatedEvent 발행 + → Product.incrementLikeCount (best-effort, 같은 스레드) + → 캐시 무효화 + → Outbox → Kafka → product_metrics.like_count 집계 +``` + +> **incrementLikeCount를 완전히 제거하지 않는 이유:** +> 사용자가 좋아요 직후 목록을 새로고침하면 반영되어 있기를 기대한다. +> AFTER_COMMIT에서 best-effort로 실행하되, 실패해도 Like 자체는 이미 저장됨. +> product_metrics + 배치가 최종 정합성을 보장하는 안전망 역할. + +### 2.4 상품 조회 플로우 — 조회수 추적 + +| 처리 | 핵심/부가 | 판단 근거 | 이벤트 분리 | +|---|---|---|---| +| 상품 데이터 반환 | **핵심** | 사용자 요청 목적 | X | +| 조회수 기록 | 부가 | 조회수 기록 실패해도 상품은 보여야 함 | O | + +``` +분리 후: + [TX 없음 — 읽기] 상품 조회 + 캐시 + [이벤트] ProductViewedEvent 발행 (조회수 로깅) + → Outbox 기록 → Kafka → product_metrics.view_count 집계 +``` + +> **조회 이벤트는 Outbox를 경유할 필요가 있는가?** +> 조회는 DB 쓰기가 없으므로 Outbox TX에 묶을 수 없다. +> 선택지: +> A. 조회 시 별도 TX로 Outbox INSERT → 오버헤드 +> B. ApplicationEvent → 직접 Kafka 발행 (fire-and-forget) → 유실 가능 +> C. ApplicationEvent → Redis 버퍼 → 배치로 Kafka 발행 +> +> 조회수는 정확성보다 근사치가 중요. 일부 유실 허용 가능. +> → B 방식 (직접 Kafka 발행) 또는 메모리 버퍼 후 배치 발행이 실용적. +> → 08 설계에서 최종 결정. + +### 2.5 주문 취소 플로우 + +| 처리 | 핵심/부가 | 이벤트 분리 | +|---|---|---| +| Order.cancel() | **핵심** | X | +| 재고 복원 | **핵심** | X (재고 복원 실패 시 데이터 불일치) | +| 쿠폰 복원 | **핵심** | X (쿠폰 복원 실패 시 고객 손해) | +| 유저 행동 로깅 | 부가 | O | +| 판매량 차감 집계 | 부가 | O | + +### 2.6 @TransactionalEventListener phase 선택 기준 + +| phase | 실행 시점 | 적합한 용도 | +|---|---|---| +| BEFORE_COMMIT | TX 커밋 직전 | TX 안에서 추가 검증/기록이 필요할 때 | +| **AFTER_COMMIT** | TX 커밋 성공 후 | **부가 로직 (집계, 로깅, 알림, Kafka 발행)** | +| AFTER_ROLLBACK | TX 롤백 후 | 롤백 시 보상 작업 | +| AFTER_COMPLETION | TX 완료 후 (성공/실패 무관) | 리소스 정리 | + +**이 프로젝트에서는 AFTER_COMMIT이 기본.** +핵심 TX 성공 후에만 부가 로직을 실행해야 하므로. + +### 2.7 @Async 적용 판단 + +``` +@TransactionalEventListener(AFTER_COMMIT)만 쓰면: + → 같은 스레드에서 실행 + → 이벤트 리스너 완료까지 HTTP 응답 지연 + +@TransactionalEventListener(AFTER_COMMIT) + @Async: + → 별도 스레드에서 실행 + → HTTP 응답 즉시 반환 + → 단, 실패 시 사용자에게 노출 안 됨 (예외 은닉) + +판단: + - 유저 행동 로깅, Kafka 발행 → @Async (응답 지연 불필요) + - incrementLikeCount → 동기 (즉시 반영 UX, 단 실패해도 Like는 저장됨) + - 캐시 무효화 → 동기 (다음 조회 시 최신 데이터 보장) +``` + +--- + +## 3. Step 2 분석 — Kafka 이벤트 파이프라인 + +### 3.1 ApplicationEvent vs Kafka 경계 판단 + +``` +ApplicationEvent = 이 JVM 안에서 후속 처리를 트리거 + → 메모리 기반, 보존 없음, JVM 재시작 시 유실 + → 빠름, 의존성 없음 + +Kafka = 시스템 경계를 넘는 이벤트 전달 + → 디스크 보존, 재처리 가능, At Least Once + → 네트워크 I/O, 상대적 느림 + +판단 기준: + "이 이벤트가 다른 애플리케이션(commerce-streamer)에서 처리되어야 하는가?" + → YES: Kafka + → NO: ApplicationEvent만으로 충분 +``` + +| 이벤트 | ApplicationEvent | Kafka | 근거 | +|---|---|---|---| +| incrementLikeCount | O | X | 같은 JVM, 즉시 반영, DB UPDATE 1건 | +| 캐시 무효화 | O | X | 같은 JVM, Redis eviction | +| 유저 행동 로깅 | O | O | 내부 로깅 + 외부 데이터 파이프라인 | +| product_metrics 집계 | X | **O** | commerce-streamer에서 처리 | +| 선착순 쿠폰 발급 | X | **O** | commerce-streamer에서 처리 | + +### 3.2 Outbox와 ApplicationEvent의 역할 분리 + +``` +두 가지는 동시에 사용한다. 역할이 다르다. + +[TX 시작] + Like INSERT + Outbox INSERT (eventType: LIKE_CREATED, payload: {productId, memberId, ...}) +[TX commit] + +[AFTER_COMMIT — ApplicationEvent] + → incrementLikeCount (best-effort, 동기) + → 캐시 무효화 (동기) + → 유저 행동 로깅 (@Async) + +[Outbox Poller — 별도 스케줄러] + → Outbox PENDING 조회 → Kafka 발행 → Outbox PROCESSED + +ApplicationEvent가 하는 것: 즉시 반영이 필요한 내부 후속 처리 +Outbox가 하는 것: 시스템 경계를 넘는 이벤트의 보장 발행 +``` + +### 3.3 Outbox → Kafka 발행 흐름 + +``` +[commerce-api] + + 도메인 TX: + [TX] 도메인 데이터 변경 + event_outbox INSERT → commit + + Outbox Poller (@Scheduled, 5초): + 1. SELECT * FROM event_outbox WHERE status = 'PENDING' ORDER BY id LIMIT 100 + 2. 각 건에 대해 Kafka 발행 (KafkaTemplate.send()) + 3. 발행 성공 → UPDATE status = 'PROCESSED' + 4. 발행 실패 → retry_count++, 최대 초과 시 FAILED + 운영 알림 + + event_outbox 테이블: + id, aggregate_type, aggregate_id, event_type, payload(JSON), + status(PENDING/PROCESSED/FAILED), created_at, processed_at, retry_count +``` + +### 3.4 토픽 설계 + +| 토픽 | Key | 이벤트 유형 | Producer | Consumer | +|---|---|---|---|---| +| `catalog-events` | productId | PRODUCT_VIEWED, LIKE_CREATED, LIKE_REMOVED | commerce-api | commerce-streamer | +| `order-events` | orderId | ORDER_CREATED, ORDER_CANCELLED | commerce-api | commerce-streamer | +| `coupon-issue-requests` | couponId | COUPON_ISSUE_REQUESTED | commerce-api | commerce-streamer | + +**Key 설계 근거:** +- catalog-events key=productId → 같은 상품의 이벤트는 같은 파티션 → 순서 보장 +- order-events key=orderId → 같은 주문의 이벤트는 같은 파티션 +- coupon-issue-requests key=couponId → 같은 쿠폰의 발급 요청은 같은 파티션 → 순차 처리로 수량 제어 + +### 3.5 Consumer (commerce-streamer) 설계 + +``` +[commerce-streamer] + + catalog-events Consumer: + → PRODUCT_VIEWED: product_metrics.view_count += 1 + → LIKE_CREATED: product_metrics.like_count += 1 + → LIKE_REMOVED: product_metrics.like_count -= 1 + + order-events Consumer: + → ORDER_CREATED: product_metrics.sales_count += item.quantity (상품별) + → ORDER_CANCELLED: product_metrics.sales_count -= item.quantity + + coupon-issue-requests Consumer: + → COUPON_ISSUE_REQUESTED: 수량 확인 → 발급 or 거절 + + 공통: + - manual Ack (AckMode.MANUAL) + - event_handled 테이블로 멱등 처리 + - version/updated_at 기준 최신 이벤트만 반영 +``` + +### 3.6 product_metrics 테이블 설계 + +```sql +CREATE TABLE product_metrics ( + product_id BIGINT PRIMARY KEY, + like_count BIGINT NOT NULL DEFAULT 0, + view_count BIGINT NOT NULL DEFAULT 0, + sales_count BIGINT NOT NULL DEFAULT 0, + sales_amount BIGINT NOT NULL DEFAULT 0, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +); +``` + +**기존 product_like_stats와의 관계:** +- product_like_stats는 product_metrics로 흡수 (역할 확장) +- like_count + view_count + sales_count + sales_amount 통합 관리 +- LikeCountSyncTasklet → MetricsReconcileTasklet로 진화 + +**Product.like_count 컬럼은 유지:** +- 정렬 인덱스(idx_product_like_count)가 이 컬럼 기준 +- 제거하면 좋아요순 정렬 성능 하락 +- 비정규화 캐시로 유지, 배치가 product_metrics 기준으로 보정 + +### 3.7 멱등 처리 설계 + +```sql +CREATE TABLE event_handled ( + event_id VARCHAR(100) PRIMARY KEY, + handled_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); +``` + +**왜 event_handled와 event_log를 분리하는가?** + +| 구분 | event_handled | event_log (별도 설계 시) | +|---|---|---| +| 목적 | 멱등성 보장 (중복 체크) | 감사/분석/디버깅 | +| 데이터 | event_id만 (최소) | 전체 페이로드 | +| 조회 패턴 | PK lookup (O(1)) | 범위 검색, 필터링 | +| 수명 | 짧음 (7~30일이면 충분) | 장기 보존 (규제, 감사) | +| 크기 | 작음 (ID만) | 큼 (전체 이벤트 데이터) | + +분리하면: +- event_handled는 작고 빨라서 PK lookup이 O(1) 유지 +- event_log가 커져도 멱등 체크 성능에 영향 없음 +- 각각 독립적인 보존 정책 적용 가능 + +### 3.8 Producer 설정 + +```yaml +# acks=all: 모든 ISR에 기록 확인 후 응답 → 메시지 유실 방지 +# enable.idempotence=true: 중복 발행 방지 (Producer 레벨) +spring: + kafka: + producer: + acks: all + properties: + enable.idempotence: true + max.in.flight.requests.per.connection: 5 +``` + +### 3.9 Consumer 설정 + +```yaml +# enable-auto-commit: false → manual Ack +# auto-offset-reset: latest → 신규 Consumer는 최신 메시지부터 +spring: + kafka: + consumer: + enable-auto-commit: false + auto-offset-reset: latest + listener: + ack-mode: manual +``` + +### 3.10 조회 이벤트의 Outbox 경유 여부 + +``` +문제: + 상품 조회 = 읽기 전용 (DB 쓰기 없음) + → Outbox INSERT를 위한 별도 TX가 필요 + → 조회마다 DB 쓰기 1건 추가 = 오버헤드 + +선택지: + A. 별도 TX로 Outbox INSERT → 정확하지만 오버헤드 + B. 직접 Kafka 발행 (fire-and-forget) → 일부 유실 허용 + C. 메모리 버퍼 → 주기적 Kafka 발행 → 버퍼 유실 가능 (JVM 재시작) + D. Kafka 직접 발행 + 실패 시 로그 → 실용적 + +결정: D +근거: + - 조회수는 정확성보다 추세가 중요 (±수 건 허용) + - 조회마다 DB 쓰기를 추가하면 조회 TPS에 영향 + - KafkaTemplate.send()는 내부적으로 배치 + 버퍼링 (효율적) + - 발행 실패 시 에러 로그만 남기고, 배치로 보정하지 않음 +``` + +--- + +## 4. Step 3 분석 — 선착순 쿠폰 발급 + +### 4.1 현재 쿠폰 모델의 한계 + +``` +현재: + CouponFacade.issueCoupon(couponId, memberId) + → Coupon 조회 → 만료 확인 → CouponIssue 생성 + +부족한 것: + 1. 수량 제한 없음 (maxIssuanceCount) + 2. 중복 발급 방지 없음 (같은 쿠폰 + 같은 유저) + 3. 동기 처리 → 1만 명 동시 요청 시 DB 부하 +``` + +### 4.2 Kafka 기반 구조 + +``` +[사용자] → POST /api/v1/coupons/{couponId}/issue-request + → [commerce-api] + 1. 기본 검증 (쿠폰 존재, 만료 여부) + 2. coupon_issue_request 테이블에 PENDING 상태로 기록 + 3. Kafka에 COUPON_ISSUE_REQUESTED 발행 (key=couponId) + 4. 즉시 응답: { requestId, status: PENDING } + + → [Kafka] coupon-issue-requests 토픽 + + → [commerce-streamer] + 1. event_handled 확인 (멱등) + 2. Coupon.issuedCount 확인 (수량 초과?) + 3. 중복 발급 확인 (couponId + memberId) + 4. CouponIssue 생성 + Coupon.issuedCount++ (CAS UPDATE) + 5. coupon_issue_request 상태 업데이트 (COMPLETED / REJECTED) + +[사용자] → GET /api/v1/coupons/issue-requests/{requestId} + → 결과 조회 (PENDING / COMPLETED / REJECTED) +``` + +### 4.3 동시성 제어 — Kafka만으로는 불충분 + +``` +오해: "key=couponId → 같은 파티션 → 순차 소비 → 동시성 해결" + +현실: + 1. Consumer 장애 → Rebalancing → 메시지 재처리 (At Least Once) + → 같은 요청이 2번 처리될 수 있음 + 2. Consumer Group 내 파티션 재할당 중 중복 소비 가능 + 3. 배치 리스너 (현재 설정: 3000건/poll) → 배치 내에서는 순차이지만 + 동일 couponId의 여러 요청이 같은 배치에 포함될 수 있음 + +결론: + Kafka는 "부하 버퍼 + 순서 힌트"이지 "동시성 제어 수단"이 아님. + DB 레벨 동시성 제어가 반드시 필요. +``` + +### 4.4 DB 레벨 동시성 제어 + +```sql +-- 1. 수량 제한: CAS UPDATE (Compare-And-Swap) +UPDATE coupon +SET issued_count = issued_count + 1 +WHERE id = :couponId + AND issued_count < max_issuance_count + AND deleted_at IS NULL; +-- affected rows = 0 → 수량 소진 + +-- 2. 중복 발급 방지: UNIQUE 제약 +ALTER TABLE coupon_issue +ADD UNIQUE INDEX uk_coupon_issue_coupon_member (coupon_id, member_id); +-- INSERT 시 중복이면 예외 → 거절 +``` + +### 4.5 Coupon 모델 확장 + +``` +Coupon 테이블 추가 컬럼: + max_issuance_count INT -- NULL이면 무제한 + issued_count INT DEFAULT 0 -- 현재 발급 수 + +coupon_issue_request 테이블 (신규): + id BIGINT PK + coupon_id BIGINT NOT NULL + member_id BIGINT NOT NULL + status VARCHAR(20) -- PENDING / COMPLETED / REJECTED + reject_reason VARCHAR(100) + created_at DATETIME + completed_at DATETIME +``` + +### 4.6 Redis vs Kafka 선착순 처리 비교 + +``` +Redis 방식: + INCR coupon:{id}:count → 100 이하면 발급 + → 장점: 초고속 (O(1)), 원자적 카운트 + → 단점: Redis 장애 시 발급 불가, 영속성 약함 + +Kafka 방식: + API → Kafka → Consumer 순차 처리 → DB CAS UPDATE + → 장점: 부하 버퍼, 영속성 (디스크), 재처리 가능 + → 단점: Redis보다 느림 (ms vs ns), 순서 보장이 파티션 단위 + +이 프로젝트 선택: Kafka (과제 요구사항) + - 단, DB CAS UPDATE로 정확한 수량 제어 + - Kafka는 "폭주 요청 버퍼링" 역할 +``` + +### 4.7 발급 결과 확인 구조 + +``` +선택지: + A. Polling — GET /coupon-issue-requests/{requestId} + B. SSE (Server-Sent Events) + C. WebSocket + +결정: A (Polling) +근거: + - 구현 단순, 인프라 추가 불필요 + - 쿠폰 발급은 수 초 내 완료 → 1~2회 polling이면 충분 + - SSE/WebSocket은 커넥션 유지 오버헤드 +``` + +--- + +## 5. 아키텍트 점검 — 리스크 분석 + +### 5.1 AFTER_COMMIT 이벤트 실패 시 대응 + +``` +리스크: + AFTER_COMMIT에서 incrementLikeCount 실패 + → Like는 저장됨, likeCount는 업데이트 안 됨 → 불일치 + +대응: + 1. try-catch + 에러 로깅 (예외 전파 방지) + 2. product_metrics (Kafka 경유)가 정확한 집계값 보유 + 3. MetricsReconcileTasklet이 주기적으로 Product.like_count 보정 + → 3중 안전망: best-effort 즉시 반영 + Kafka 집계 + 배치 대사 +``` + +### 5.2 Outbox Poller 실패 시 + +``` +리스크: + Outbox Poller가 Kafka 발행 실패 → PENDING 상태 유지 + +대응: + - Poller가 재시도 (retry_count++) + - 최대 재시도 초과 시 FAILED + 운영 알림 + - Consumer 측 멱등 처리로 중복 발행 안전 +``` + +### 5.3 Consumer 처리 실패 시 + +``` +리스크: + commerce-streamer가 이벤트 처리 실패 + → manual Ack를 하지 않으면 Kafka가 재전달 + +대응: + - 재시도 가능: 멱등 처리로 안전 + - 반복 실패: DLQ (Dead Letter Queue)로 격리 + - DLQ 처리: 운영자 수동 확인 또는 별도 Consumer +``` + +### 5.4 product_metrics 정합성 + +``` +리스크: + Kafka 이벤트 유실/순서 역전 → product_metrics 부정확 + +대응: + - At Least Once + 멱등 처리 → 유실 방지 + - MetricsReconcileTasklet → 원본 데이터(likes, order_items) 기준 대사 + - product_metrics는 "실시간 근사치", 배치가 "정확한 값" 보정 +``` + +### 5.5 event_outbox vs payment_outbox + +``` +6주차에서 PaymentOutbox를 설계했다. +7주차에서 event_outbox를 추가한다. + +이 둘은 다른 테이블인가, 같은 테이블인가? + +분석: + PaymentOutbox: PG 호출 보장용 (event_type: PAYMENT_REQUEST) + event_outbox: Kafka 발행 보장용 (event_type: LIKE_CREATED, ORDER_CREATED, ...) + + 목적이 다르다: + PaymentOutbox → PG API 호출 재시도 + event_outbox → Kafka 메시지 발행 재시도 + + 처리 주체도 다르다: + PaymentOutbox → Outbox Poller가 PG 호출 + event_outbox → Outbox Poller가 Kafka 발행 + +결정: 별도 테이블로 분리 +근거: + - 단일 테이블에 두 가지 목적을 혼합하면 Poller 로직이 복잡해짐 + - PaymentOutbox는 PG 호출 + 상태 확인 로직 포함 (Kafka와 완전히 다름) + - 각각 독립적인 Poller, 독립적인 retry 정책 적용 가능 +``` + +--- + +## 6. Consumer Group 분리 (Nice-To-Have) + +### 6.1 현재 단일 Consumer Group + +``` +commerce-streamer (Consumer Group: loopers-default-consumer) + → catalog-events 소비 → product_metrics upsert + → order-events 소비 → product_metrics upsert + → coupon-issue-requests 소비 → 쿠폰 발급 +``` + +### 6.2 관심사별 Consumer Group 분리 + +``` +Consumer Group: metrics-collector + → catalog-events → product_metrics upsert (like, view) + → order-events → product_metrics upsert (sales) + +Consumer Group: coupon-issuer + → coupon-issue-requests → 선착순 쿠폰 발급 + +이점: + - 쿠폰 발급 실패가 metrics 집계에 영향 안 줌 + - 각 Consumer Group 독립 스케일링 가능 + - 장애 격리 +``` + +--- + +## 7. DLQ 구성 (Nice-To-Have) + +### 7.1 DLQ 설계 + +``` +반복 실패 메시지를 격리하여 정상 메시지 처리를 방해하지 않음. + +원본 토픽: catalog-events +DLQ 토픽: catalog-events.DLT (Dead Letter Topic) + +동작: + Consumer가 메시지 처리 3회 실패 + → DLQ 토픽으로 이동 + → 운영 알림 + → 수동 확인 후 재처리 or 폐기 +``` + +### 7.2 Spring Kafka DLQ 설정 + +```java +@Bean +public DefaultErrorHandler errorHandler(KafkaTemplate kafkaTemplate) { + DeadLetterPublishingRecoverer recoverer = + new DeadLetterPublishingRecoverer(kafkaTemplate); + return new DefaultErrorHandler(recoverer, new FixedBackOff(1000L, 3)); +} +``` + +--- + +## 8. 고도화 분석 — Outbox Poller 중복 처리 + +### 8.1 문제 + +다중 인스턴스에서 Outbox Poller가 같은 PENDING 행을 동시에 SELECT → 같은 이벤트를 Kafka에 2번 발행. + +### 8.2 선택지 분석 + +| 선택지 | 설명 | 문제 | +|---|---|---| +| SELECT FOR UPDATE SKIP LOCKED | 행 잠금, 잠긴 건 건너뜀 | **DB 커넥션 점유** — Kafka 장애 시 잠긴 행 재시도 불가 | +| Debezium CDC | binlog에서 직접 Kafka 발행 | 인프라 추가 (Kafka Connect) | +| 중복 허용 + 멱등 Consumer | 잠금 없이 SELECT → Consumer가 event_handled로 중복 제거 | Consumer PK lookup 1회 (~0.1ms) | + +### 8.3 결정: Debezium CDC + +- **근거 1**: 실무 적용 전 Debezium 설정 경험 확보에 의의 +- **근거 2**: 중복 발행 원천 해결 (binlog 오프셋 기반, 단일 처리) +- **근거 3**: Near real-time (Poller 5초 → Debezium 수백 ms) +- **근거 4**: DB 부하 없음 (SELECT 폴링 제거) + +### 8.4 Debezium 구성 + +``` +Docker 추가: + - kafka-connect (debezium/connect:2.5) + - MySQL binlog 활성화 (--log-bin, --binlog-format=ROW) + +Connector: + - Debezium MySQL Connector + - Outbox Event Router SMT + - route.by.field=aggregate_type → 토픽 라우팅 +``` + +### 8.5 Debezium 도입으로 달라지는 점 + +1. event_outbox에 status 컬럼 불필요 (PENDING/PROCESSED 구분 없음) +2. OutboxPollerScheduler 불필요 (Debezium이 대체) +3. 테이블 정리: 1시간 보존 후 단순 DELETE (최대 6.25만 건 → ~1초) +4. PaymentOutbox는 기존 Poller 유지 (PG 호출 전용, Kafka 발행이 아님) + +--- + +## 9. 고도화 분석 — Outbox 테이블 정리 전략 + +### 9.1 규모 산정 (쿠팡급 기준) + +``` +좋아요: 일 100만 건, 주문: 일 50만 건, 조회: Outbox 미경유 +→ event_outbox: 일 150만 건 (행당 ~500 bytes) +→ Debezium 도입 → 1시간 보존 기준 최대 6.25만 건 +``` + +### 9.2 선택지 비교 + +| 방법 | 정리 속도 | JPA 호환 | 복잡도 | 대규모 적합 | +|---|---|---|---|---| +| Batch DELETE | 소량 시 빠름 | O | 낮음 | Debezium 시 O | +| 라운드 로빈 | TRUNCATE O(1) | **X** (Native SQL) | 높음 | O | +| PARTITION DROP | O(1) | O | 중간 | O | +| Debezium + DELETE | 소량 DELETE | O | **낮음** | **O** | + +### 9.3 결정: Debezium + 단순 Batch DELETE + +Debezium이 binlog에서 읽으므로 테이블 누적이 발생하지 않음. +1시간 보존 후 DELETE → 최대 6.25만 건 → 부담 없음. + +라운드 로빈 미채택 이유: JPA Entity의 @Table(name) 고정 → Native Query 강제 → DIP 위반. +파티셔닝 미채택 이유: Debezium 덕에 테이블이 작게 유지 → 파티셔닝은 과도한 최적화. + +--- + +## 10. 고도화 분석 — Kafka와 아키텍처 관계 + +### 10.1 프로젝트 아키텍처 명명 + +현재 구조는 "멀티 프로세스 모듈러 아키텍처" — 3개 JVM, 공유 DB, 공유 레포. +모놀리스(단일 JVM)도 아니고, MSA(서비스별 DB)도 아님. + +### 10.2 Kafka 적용 지점 + +| # | 토픽 | Producer | Consumer | 목적 | +|---|---|---|---|---| +| 1 | catalog-events | commerce-api | commerce-streamer | 좋아요/조회수 → product_metrics | +| 2 | order-events | commerce-api | commerce-streamer | 판매량 → product_metrics | +| 3 | coupon-issue-requests | commerce-api | commerce-streamer | 선착순 쿠폰 버퍼링 | + +### 10.3 MSA 전환 불필요 + +- Kafka는 MSA 전용이 아닌 "프로세스 간 비동기 통신 인프라" +- 현재 멀티 프로세스에서 충분히 유효 +- MSA 전환 트리거: 팀 분리, 극단적 스케일 차이, 기술 스택 분리, 물리적 장애 격리 +- 현재 해당 없음 + +### 10.4 commerce-api에 modules:kafka 의존성 추가 + +Outbox INSERT는 commerce-api에서 발생 → Debezium이 발행하므로 KafkaTemplate 불필요. +단, 조회수는 Outbox 미경유 → 직접 Kafka 발행 → **KafkaTemplate 필요**. +→ commerce-api에 `implementation(project(":modules:kafka"))` 추가. + +--- + +## 11. 고도화 분석 — Kafka 설정 점검 + +### 11.1 발견된 문제점 + +| # | 문제 | 위치 | 심각도 | +|---|---|---|---| +| 1 | Consumer `value-serializer` → `value-deserializer` 오타 | kafka.yml:21 | 경미 (Converter가 대체) | +| 2 | Producer acks/idempotence 미설정 | kafka.yml:14-17 | **중요** (메시지 유실 가능) | +| 3 | auto.offset.reset 위치 (글로벌 → Consumer 전용) | kafka.yml:12 | 경미 | +| 4 | 단건 처리용 Consumer Factory 부재 | KafkaConfig.java | **중요** (쿠폰 발급용) | +| 5 | Error Handler / DLQ 미설정 | KafkaConfig.java | **중요** | +| 6 | 토픽 생성 전략 없음 | N/A | 중간 | + +### 11.2 보완 사항 → 08 반영 + +- Producer: acks=all, enable.idempotence=true, linger.ms=50, batch.size=32KB +- Consumer: value-deserializer 수정, SINGLE_LISTENER 추가 +- Error Handler: DefaultErrorHandler + DeadLetterPublishingRecoverer +- 토픽: @Bean NewTopic으로 선언적 생성 + +--- + +## 12. 고도화 분석 — Redis 설정 점검 + +### 12.1 현재 상태 + +- Master-Replica 구성 완료 (ReadFrom.REPLICA_PREFERRED) +- Lettuce NIO multiplexing (커넥션 풀 불필요) +- StringRedisSerializer (적절) + +### 12.2 보완 필요: 커맨드 타임아웃 + +현재: 타임아웃 미설정 → Redis 장애 시 스레드 무한 대기 가능. +보완: commandTimeout(Duration.ofMillis(500)) 추가. +근거: 정상 응답 ~1ms, 500ms 초과 = 장애 판단. + +--- + +## 13. 고도화 분석 — @Async 스레드 풀 + +### 13.1 @Async 작업 분류 + +| 작업 | @Async 여부 | DB | Redis | Kafka | +|---|---|---|---|---| +| incrementLikeCount | 동기 (Tomcat) | O | X | X | +| 캐시 무효화 | 동기 (Tomcat) | X | O | X | +| 유저 행동 로깅 | **@Async** | X | X | X | +| 조회수 Kafka 발행 | **@Async** | X | X | 논블로킹 | + +### 13.2 결정: core=2, max=4 + +@Async 작업은 DB/Redis 커넥션 불사용 → 초경량. +HikariCP(max 40)과 경합 없음. 큰 풀은 컨텍스트 스위칭만 유발. +CallerRunsPolicy로 큐 초과 시 배압. + +--- + +## 14. → 08 반영 사항 (설계 명세 반영 대기) + +| 반영 대상 | 내용 | +|---|---| +| Step 1 | ApplicationEvent 분리 대상 목록 + 리스너 설계 + @Async 스레드 풀 | +| Step 2 — Debezium | event_outbox DDL, Debezium Connector 설정, Kafka Connect Docker | +| Step 2 — Kafka | Producer 보완 (acks, idempotence), SINGLE_LISTENER, Error Handler, 토픽 선언 | +| Step 2 — Redis | commandTimeout 추가 | +| Step 2 — 집계 | product_metrics DDL, Consumer 설계 | +| Step 3 | Coupon 모델 확장, coupon_issue_request DDL, 동시성 제어 설계 | +| 의존성 | commerce-api에 modules:kafka 추가 | +| 패키지 구조 | commerce-api 이벤트 패키지, commerce-streamer Consumer 패키지 | +| 테이블 정리 | Debezium + 1시간 보존 + Batch DELETE | +| 테스트 | Phase별 테스트 전략 | From 2c7cb6a255428c0be4fad20ff923c3add037befb Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 15:27:34 +0900 Subject: [PATCH 08/14] =?UTF-8?q?feat:=20Kafka=20=EB=A6=AC=EB=B0=B8?= =?UTF-8?q?=EB=9F=B0=EC=8B=B1=20=EC=A0=84=EB=9E=B5=20=EB=B3=B4=EA=B0=95=20?= =?UTF-8?q?=E2=80=94=20CooperativeSticky=20+=20Static=20Membership?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - kafka.yml: partition.assignment.strategy=CooperativeStickyAssignor 명시 - KafkaConfig: group.instance.id 설정 (Static Membership, 배포 시 불필요한 리밸런싱 방지) - SINGLE_MAX_POLL_INTERVAL_MS: 10분 → 3분 (최악 케이스 대비 5배 마진, stuck 감지 3배 단축) - 블로그 소재: 리밸런싱 전략 분석 및 산술 근거 정리 --- blog/week7-rebalancing-strategy.md | 193 ++++++++++++++++++ .../com/loopers/confg/kafka/KafkaConfig.java | 9 +- modules/kafka/src/main/resources/kafka.yml | 1 + 3 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 blog/week7-rebalancing-strategy.md diff --git a/blog/week7-rebalancing-strategy.md b/blog/week7-rebalancing-strategy.md new file mode 100644 index 000000000..36cf77d6a --- /dev/null +++ b/blog/week7-rebalancing-strategy.md @@ -0,0 +1,193 @@ +Kafka 리밸런싱 전략 — 이커머스 이벤트 파이프라인에 적용한 설계와 근거 + +> 이 파일은 블로그 글과 PR 설명에 사용할 소재 정리다. + +--- + +## 배경: 리밸런싱이 왜 중요한가 + +Kafka Consumer Group에서 컨슈머가 추가/제거되면 파티션 재할당(리밸런싱)이 발생한다. 리밸런싱 동안 메시지 소비가 중단되므로, 대규모 트래픽 환경에서는 이 중단 시간이 곧 메시지 적체로 이어진다. + +우리 시스템에는 성격이 다른 두 가지 Consumer가 있다: + +| Consumer | 토픽 | 특성 | +|---|---|---| +| MetricsConsumer | catalog-events, order-events | 집계용, 순서 무관, 대량 배치 처리 | +| CouponIssueConsumer | coupon-issue-requests | 선착순 발급, 순서 중요, 건별 처리 | + +이 두 Consumer에 동일한 리밸런싱 설정을 적용하는 건 맞지 않다. 처리 특성이 다르면 리밸런싱 트리거 조건도 달라야 한다. + +--- + +## 설계 결정 1: Cooperative Sticky Assignor 명시 + +### 문제 + +Kafka의 기본 파티션 할당 전략인 Round Robin(Eager)은 리밸런싱 시 **모든 컨슈머의 파티션을 회수한 뒤 재할당**한다. 이 동안 전체 Consumer Group이 멈추는 Stop-the-world가 발생한다. + +### 결정 + +`CooperativeStickyAssignor`를 명시적으로 설정했다. + +```yaml +# kafka.yml +consumer: + properties: + partition.assignment.strategy: org.apache.kafka.clients.consumer.CooperativeStickyAssignor +``` + +### 근거 + +- Cooperative 프로토콜은 **변경이 필요한 파티션만** 재할당한다. 나머지 파티션은 기존 컨슈머가 계속 처리한다. +- Kafka 3.x+에서 기본값이긴 하지만, 버전 업그레이드 시 동작 변경 리스크를 방지하기 위해 명시한다. +- 운영 환경에서 "왜 이 전략인가?"를 코드만 보고 파악할 수 있어야 한다. + +### 트레이드오프 + +Cooperative Sticky는 리밸런싱이 2단계(revoke → assign)로 나뉘어 전체 시간이 약간 더 길 수 있다. 하지만 **소비 중단 없이** 진행되므로, 처리량 관점에서는 이점이 크다. + +--- + +## 설계 결정 2: Consumer 특성별 max.poll.interval.ms 분리 + +### 문제 + +`max.poll.interval.ms`는 "두 번의 poll() 사이 최대 허용 시간"이다. 이 시간을 초과하면 브로커가 해당 컨슈머를 죽은 것으로 판단하고 리밸런싱을 시작한다. 하지만 두 Consumer의 처리 시간이 근본적으로 다르다. + +### 결정 + +| 설정 | BATCH_LISTENER (MetricsConsumer) | SINGLE_LISTENER (CouponIssueConsumer) | +|---|---|---| +| max.poll.records | 3,000 | 1 | +| max.poll.interval.ms | **2분** | **3분** | +| session.timeout.ms | 60초 | 60초 | +| heartbeat.interval.ms | 20초 (session의 1/3) | 20초 | + +### 산술 근거 + +**MetricsConsumer (2분)**: +- 3,000건 × UPSERT 1건당 ~1ms = 최대 3초 +- 2분(120초) = 40배 마진 + +**CouponIssueConsumer (3분)**: +- 정상 처리: JSON 파싱 + INSERT IGNORE + CAS UPDATE + INSERT + UPDATE = ~10ms +- DLQ 재시도: FixedBackOff(1초 × 3회) = 3초 +- 커넥션 풀 고갈 최악 케이스: HikariCP connectionTimeout 30초 + 재시도 3초 = ~33.5초 +- 3분(180초) = 최악 케이스 대비 **5배 마진** + +### 왜 10분이 아니라 3분인가 + +초기에는 CouponIssueConsumer의 max.poll.interval.ms를 10분으로 설정했다. 하지만 검토 결과: + +- 10분은 정상 처리 대비 60,000배 마진 — 과도하다 +- 컨슈머가 실제로 stuck 되었을 때(deadlock, 무한루프), **10분간 감지 불가** +- 선착순 쿠폰 플래시 세일 기준 100 req/s × 600초 = **6만 건 처리 지연** +- 3분으로 줄이면 stuck 감지 시간 1/3로 단축, 커넥션 풀 고갈 최악 케이스에도 5배 마진 확보 + +**교훈**: 타임아웃 값은 "넉넉하게"가 아니라 "최악 케이스의 N배"로 설정해야 한다. 너무 짧으면 불필요한 리밸런싱, 너무 길면 장애 감지 지연. 산술적 근거 없이 설정하면 양쪽 다 위험하다. + +--- + +## 설계 결정 3: Static Membership (group.instance.id) + +### 문제 + +컨슈머가 재시작되면 브로커는 새로운 멤버로 인식하여 리밸런싱을 트리거한다. Rolling deployment 시 N개 인스턴스가 순차 재시작되면 N번의 리밸런싱이 발생한다. + +### 결정 + +`group.instance.id`를 호스트명 기반으로 설정한다. + +```java +// KafkaConfig.java +@Value("${HOSTNAME:local}") +private String hostname; + +// BATCH_LISTENER +consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, hostname + "-batch"); + +// SINGLE_LISTENER +consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, hostname + "-single"); +``` + +### 효과 + +- 컨슈머 재시작 시 `session.timeout.ms`(60초) 이내에 복귀하면 **리밸런싱 없이 기존 파티션 유지** +- Rolling deployment 시 불필요한 리밸런싱 방지 +- Kubernetes 환경에서 `HOSTNAME`은 Pod 이름으로 자동 설정됨 + +### 주의점 + +- `session.timeout.ms`를 초과하는 재시작은 여전히 리밸런싱 발생 +- 인스턴스가 영구 제거될 때는 해당 `group.instance.id`의 파티션이 `session.timeout.ms` 후에야 재할당됨 + +--- + +## 리밸런싱 발생 시 안전성: INSERT-first 멱등 패턴 + +리밸런싱으로 파티션이 재할당되면 **이미 처리했지만 ack 전인 메시지가 재처리**될 수 있다. 이 중복 소비에 대한 안전장치가 필요하다. + +### MetricsConsumer (BATCH, MANUAL ack) + +``` +3,000건 처리 중 1,500건째에 리밸런싱 발생 +→ ack.acknowledge() 호출 전이므로 3,000건 전체 재처리 +→ INSERT IGNORE event_handled로 1,500건은 멱등 스킵 +→ 나머지 1,500건만 실제 처리 +→ UPSERT product_metrics이므로 값이 꼬이지 않음 +``` + +### CouponIssueConsumer (SINGLE, MANUAL ack) + +``` +쿠폰 발급 처리 중 리밸런싱 발생 +→ ack 전이므로 해당 메시지 재처리 +→ INSERT IGNORE event_handled로 중복 감지 → 스킵 +→ 이미 발급된 쿠폰이 다시 발급되지 않음 +``` + +핵심은 **"리밸런싱을 막는 것"이 아니라 "리밸런싱이 발생해도 비즈니스가 깨지지 않는 구조"**를 만드는 것이다. + +--- + +## 전체 설정 요약 + +``` +┌──────────────────────────────────────────────────────────────┐ +│ Kafka Consumer 설정 │ +├──────────────────────────────────────────────────────────────┤ +│ [공통] │ +│ ├── partition.assignment.strategy: CooperativeStickyAssignor│ +│ ├── isolation.level: read_committed │ +│ ├── enable-auto-commit: false │ +│ └── ack-mode: MANUAL │ +│ │ +│ [BATCH_LISTENER — MetricsConsumer] │ +│ ├── max.poll.records: 3,000 │ +│ ├── max.poll.interval.ms: 120,000 (2분) │ +│ ├── session.timeout.ms: 60,000 (1분) │ +│ ├── heartbeat.interval.ms: 20,000 (20초) │ +│ ├── group.instance.id: ${HOSTNAME}-batch │ +│ ├── concurrency: 3 │ +│ └── 멱등: INSERT IGNORE event_handled + UPSERT │ +│ │ +│ [SINGLE_LISTENER — CouponIssueConsumer] │ +│ ├── max.poll.records: 1 │ +│ ├── max.poll.interval.ms: 180,000 (3분) │ +│ ├── session.timeout.ms: 60,000 (1분) │ +│ ├── heartbeat.interval.ms: 20,000 (20초) │ +│ ├── group.instance.id: ${HOSTNAME}-single │ +│ ├── concurrency: 1 │ +│ └── 멱등: INSERT IGNORE + CAS UPDATE + DLQ │ +└──────────────────────────────────────────────────────────────┘ +``` + +--- + +## 라이팅 포인트 + +1. **"리밸런싱을 막는 것 vs 리밸런싱에 안전한 것"** — 분산 시스템에서 장애를 완전히 막을 수는 없다. 막으려 하기보다 발생해도 안전한 구조를 만드는 게 Resilience다. + +2. **"타임아웃은 감으로 정하지 않는다"** — max.poll.interval.ms를 10분으로 잡으면 안전해 보이지만, stuck 컨슈머를 10분간 방치하는 것과 같다. 최악 케이스를 산출하고, 적절한 마진 배수를 곱하는 게 엔지니어링이다. + +3. **"같은 시스템 안에서도 Consumer마다 전략이 달라야 한다"** — 집계용 Consumer와 발급용 Consumer에 같은 타임아웃을 적용하는 건 "모든 API에 동일한 Circuit Breaker 임계값을 적용하는 것"과 같다. 도메인 특성이 다르면 인프라 설정도 달라야 한다. diff --git a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java index 4aae07ac4..08c9d5aa4 100644 --- a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java +++ b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java @@ -16,6 +16,8 @@ import org.springframework.kafka.support.converter.ByteArrayJsonMessageConverter; import org.springframework.util.backoff.FixedBackOff; +import org.springframework.beans.factory.annotation.Value; + import java.util.HashMap; import java.util.Map; @@ -26,13 +28,16 @@ public class KafkaConfig { public static final String BATCH_LISTENER = "BATCH_LISTENER_DEFAULT"; public static final String SINGLE_LISTENER = "SINGLE_LISTENER_DEFAULT"; + @Value("${HOSTNAME:local}") + private String hostname; + public static final int MAX_POLLING_SIZE = 3000; // read 3000 msg public static final int FETCH_MIN_BYTES = (1024 * 1024); // 1mb public static final int FETCH_MAX_WAIT_MS = 5 * 1000; // broker waiting time = 5s public static final int SESSION_TIMEOUT_MS = 60 * 1000; // session timeout = 1m public static final int HEARTBEAT_INTERVAL_MS = 20 * 1000; // heartbeat interval = 20s ( 1/3 of session_timeout ) public static final int MAX_POLL_INTERVAL_MS = 2 * 60 * 1000; // max poll interval = 2m - public static final int SINGLE_MAX_POLL_INTERVAL_MS = 10 * 60 * 1000; // max poll interval = 10m + public static final int SINGLE_MAX_POLL_INTERVAL_MS = 3 * 60 * 1000; // max poll interval = 3m @Bean public ProducerFactory producerFactory(KafkaProperties kafkaProperties) { @@ -74,6 +79,7 @@ public ConcurrentKafkaListenerContainerFactory defaultBatchListe consumerConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, SESSION_TIMEOUT_MS); consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, HEARTBEAT_INTERVAL_MS); consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, MAX_POLL_INTERVAL_MS); + consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, hostname + "-batch"); ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig)); @@ -95,6 +101,7 @@ public ConcurrentKafkaListenerContainerFactory singleListenerCon consumerConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, SESSION_TIMEOUT_MS); consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, HEARTBEAT_INTERVAL_MS); consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, SINGLE_MAX_POLL_INTERVAL_MS); + consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, hostname + "-single"); ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig)); diff --git a/modules/kafka/src/main/resources/kafka.yml b/modules/kafka/src/main/resources/kafka.yml index 0790bfb89..14d9153ec 100644 --- a/modules/kafka/src/main/resources/kafka.yml +++ b/modules/kafka/src/main/resources/kafka.yml @@ -29,6 +29,7 @@ spring: properties: enable-auto-commit: false isolation.level: read_committed + partition.assignment.strategy: org.apache.kafka.clients.consumer.CooperativeStickyAssignor listener: ack-mode: manual From 541233f5967fe45db8bd7049ebad734bf3628ed8 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 15:36:56 +0900 Subject: [PATCH 09/14] =?UTF-8?q?fix:=20=EC=98=A4=ED=94=84=EC=85=8B=20?= =?UTF-8?q?=EA=B4=80=EB=A6=AC=20=EC=A0=84=EB=9E=B5=20=EC=88=98=EC=A0=95=20?= =?UTF-8?q?=E2=80=94=20auto.offset.reset=20=EC=B6=A9=EB=8F=8C=20=EC=A0=9C?= =?UTF-8?q?=EA=B1=B0=20+=20DLQ=20=ED=99=9C=EC=84=B1=ED=99=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - kafka.yml: 전역 auto.offset.reset=latest 제거 (consumer earliest와 충돌) - CouponIssueConsumer: try-catch 제거 → 예외가 DefaultErrorHandler로 전파 - 기존: 예외를 삼키고 무조건 ack → DLQ 도달 불가 (at-most-once) - 수정: 예외 전파 → FixedBackOff(1초×3회) 재시도 → DLT 토픽 전송 (at-least-once) - 블로그 소재: 오프셋 전략 분석 및 DLQ 버그 발견/수정 기록 --- .../consumer/CouponIssueConsumer.java | 8 +- blog/week7-offset-strategy.md | 215 ++++++++++++++++++ modules/kafka/src/main/resources/kafka.yml | 1 - 3 files changed, 216 insertions(+), 8 deletions(-) create mode 100644 blog/week7-offset-strategy.md diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java index 0fa2b7e6a..a44778728 100644 --- a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java +++ b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java @@ -29,13 +29,7 @@ public class CouponIssueConsumer { ) public void consume(ConsumerRecord record, Acknowledgment ack) { TransactionTemplate tx = new TransactionTemplate(transactionManager); - - try { - tx.executeWithoutResult(status -> processRecord(record)); - } catch (Exception e) { - log.error("CouponIssueConsumer 처리 실패 — offset={}", record.offset(), e); - } - + tx.executeWithoutResult(status -> processRecord(record)); ack.acknowledge(); } diff --git a/blog/week7-offset-strategy.md b/blog/week7-offset-strategy.md new file mode 100644 index 000000000..035b8169f --- /dev/null +++ b/blog/week7-offset-strategy.md @@ -0,0 +1,215 @@ +Kafka 오프셋 전략 — 수동 커밋, At-Least-Once, 그리고 DLQ가 동작하지 않던 버그 + +> 이 파일은 블로그 글과 PR 설명에 사용할 소재 정리다. + +--- + +## 배경: 오프셋 관리가 중요한 이유 + +Kafka에서 "어디까지 읽었는가"를 추적하는 오프셋은 컨슈머가 직접 관리한다. 브로커는 모른다. 이 설계 덕분에 Kafka는 높은 처리량을 유지하지만, 오프셋을 잘못 관리하면 메시지 유실이나 중복 처리가 발생한다. + +우리 시스템에는 두 가지 Consumer가 있다: + +| Consumer | 토픽 | 실패 시 허용 수준 | +|---|---|---| +| MetricsConsumer | catalog-events, order-events | 유실 허용 (배치 보정) | +| CouponIssueConsumer | coupon-issue-requests | **유실 불허** (선착순 쿠폰) | + +같은 시스템이지만 실패 시 허용 수준이 다르다. 이 차이가 오프셋 커밋 전략에 직접적으로 영향을 준다. + +--- + +## 설계 결정 1: 수동 커밋 (enable-auto-commit=false + AckMode.MANUAL) + +### 자동 커밋의 문제 + +자동 커밋(`enable.auto.commit=true`)은 `auto.commit.interval.ms`(기본 5초) 주기로 커밋한다. 메시지를 poll 했지만 아직 처리하지 않은 시점에 커밋이 일어날 수 있다. 이 상태에서 컨슈머가 죽으면 메시지가 유실된다. + +``` +poll() → 3000건 수신 → [자동 커밋 발생] → 1500건째 처리 중 crash +→ 재시작 시 커밋된 오프셋부터 읽음 → 1500건 유실 +``` + +### 우리의 선택 + +```yaml +consumer: + properties: + enable-auto-commit: false +listener: + ack-mode: manual +``` + +모든 Factory에서 `AckMode.MANUAL` 적용. 비즈니스 로직이 완료된 후 명시적으로 `ack.acknowledge()`를 호출해야만 오프셋이 커밋된다. + +### 라이팅 포인트 + +"자동 커밋은 편리하지만, 편리함이 안전을 보장하지는 않는다. 메시지 유실이 허용되지 않는 도메인에서는 수동 커밋 외에 선택지가 없다." + +--- + +## 설계 결정 2: Consumer별 커밋 + 실패 처리 전략 분리 + +### MetricsConsumer: catch-and-continue + 배치 보정 + +```java +for (ConsumerRecord record : records) { + try { + tx.executeWithoutResult(status -> processRecord(record)); + } catch (Exception e) { + log.error("처리 실패", e); // 실패한 레코드는 스킵 + } +} +ack.acknowledge(); // 배치 전체 ack +``` + +실패한 레코드를 스킵하고 전체 배치를 ack한다. 이건 의도된 설계다: +- 집계 데이터는 즉시 정확하지 않아도 된다 +- MetricsReconcile 배치가 주기적으로 정합성을 보정한다 +- 하나의 실패 레코드 때문에 나머지 2,999건이 재처리되는 건 비효율적이다 + +### CouponIssueConsumer: 예외 전파 + DLQ + +```java +public void consume(ConsumerRecord record, Acknowledgment ack) { + TransactionTemplate tx = new TransactionTemplate(transactionManager); + tx.executeWithoutResult(status -> processRecord(record)); + ack.acknowledge(); +} +``` + +예외가 발생하면 `ack.acknowledge()`에 도달하지 못한다. 예외는 Spring Kafka의 `DefaultErrorHandler`로 전파되어: + +1. `FixedBackOff(1000L, 3)` — 1초 간격으로 3회 재시도 +2. 재시도 모두 실패 시 `DeadLetterPublishingRecoverer`가 `coupon-issue-requests.DLT` 토픽으로 전송 +3. DLT 메시지는 운영자 확인 후 재처리 + +### 이전 버그: DLQ가 동작하지 않았던 이유 + +초기 구현에서는 CouponIssueConsumer도 MetricsConsumer와 동일한 패턴을 사용했다: + +```java +// 버그가 있던 코드 +try { + tx.executeWithoutResult(status -> processRecord(record)); +} catch (Exception e) { + log.error("처리 실패", e); // 예외를 삼킴 +} +ack.acknowledge(); // 항상 ack → DLQ 도달 불가 +``` + +`DefaultErrorHandler + DeadLetterPublishingRecoverer`를 Factory에 설정했지만, `consume()` 메서드 내부에서 예외를 catch하고 ack까지 호출하므로 ErrorHandler에 예외가 전파되지 않았다. DLQ 설정이 사실상 죽은 코드였다. + +실패 시 흐름: +``` +DB 에러 → TransactionTemplate 롤백 → catch에서 로그만 → ack → 오프셋 커밋 +→ 메시지 재전달 불가, coupon_issue_request는 PENDING으로 영구 방치 +``` + +### 수정 후 흐름 + +``` +DB 에러 → TransactionTemplate 롤백 → 예외 전파 → DefaultErrorHandler +→ 1초 후 재시도 (최대 3회) → 여전히 실패 → DLT 토픽으로 전송 +→ 오프셋 커밋 → 다음 메시지 처리 계속 +``` + +### 라이팅 포인트 + +"DLQ를 설정했다고 동작하는 게 아니다. 예외가 ErrorHandler까지 전파되는 경로가 확보되어야 한다. try-catch로 예외를 삼키면 아무리 정교한 에러 핸들링 체인도 무용지물이 된다." + +"같은 시스템 안에서도 Consumer마다 실패 허용 수준이 다르다. 집계 데이터의 실패와 쿠폰 발급의 실패는 비즈니스 임팩트가 다르고, 그 차이가 코드 구조에 반영되어야 한다." + +--- + +## 발견 및 수정: auto.offset.reset 설정 충돌 + +### 문제 + +kafka.yml에 같은 Kafka 속성이 두 곳에 선언되어 있었다: + +```yaml +# 전역 properties +properties: + auto: + offset.reset: latest # ← latest + +# consumer 전용 +consumer: + auto-offset-reset: earliest # ← earliest +``` + +Spring Boot에서 consumer 전용이 전역을 오버라이드하므로 `earliest`가 적용되지만, 의도와 다른 값이 혼재하면: +- 코드 리뷰 시 어느 값이 적용되는지 혼란 +- Spring Boot 버전 업그레이드 시 merge 순서 변경 리스크 +- 죽은 설정이 남아있으면 "이게 왜 있지?" 질문을 유발 + +### 수정 + +전역 properties에서 `offset.reset: latest` 제거. consumer 전용 `auto-offset-reset: earliest`만 유지. + +### 왜 earliest인가 + +우리 시스템은 새 Consumer Group 배포 시 **과거 메시지부터 처리**해야 한다: +- MetricsConsumer: 기존 이벤트를 모두 집계해야 product_metrics가 정확 +- CouponIssueConsumer: 발급 요청이 누락되면 사용자 불만 + +`latest`는 "현재 시점 이후"만 처리하므로, 배포 직전까지 쌓인 메시지를 모두 유실한다. `earliest`는 대량 과거 메시지 처리 부담이 있지만, INSERT-first 멱등 패턴으로 중복을 방지하므로 안전하다. + +### 라이팅 포인트 + +"설정 파일에 같은 속성이 두 곳에 다른 값으로 존재하면, 현재 동작이 맞더라도 시한폭탄이다. 설정은 하나의 진실만 가져야 한다." + +--- + +## At-Least-Once와 멱등성의 관계 + +### 오프셋 커밋 타이밍과 메시지 보장 수준 + +| 시나리오 | MetricsConsumer | CouponIssueConsumer | +|---|---|---| +| 정상 처리 | exactly-once (멱등) | exactly-once (멱등) | +| 처리 중 crash (ack 전) | at-least-once → 멱등 스킵 | at-least-once → 멱등 스킵 | +| DB 에러로 처리 실패 | skip + ack (best-effort) | 재시도 3회 → DLQ | +| 리밸런싱으로 재전달 | at-least-once → 멱등 스킵 | at-least-once → 멱등 스킵 | + +### 멱등 패턴이 없으면 + +at-least-once는 "최소 한 번 처리"를 보장하지만, "정확히 한 번"은 보장하지 않는다. 멱등 패턴 없이 at-least-once를 사용하면: +- 좋아요 수가 중복 증가 +- 쿠폰이 중복 발급 +- 주문 집계가 뻥튀기 + +우리의 INSERT IGNORE event_handled 패턴은 "이미 처리한 이벤트인가?"를 DB 레벨에서 확인하여, at-least-once 전달 + exactly-once 처리를 달성한다. + +### 라이팅 포인트 + +"Kafka의 메시지 보장은 '전달(delivery)' 관점이다. at-least-once delivery가 at-least-once processing이 되지 않으려면, 컨슈머 측 멱등성이 필수다. 전달 보장과 처리 보장은 다른 레이어의 문제다." + +--- + +## 전체 오프셋 전략 요약 + +``` +┌──────────────────────────────────────────────────────┐ +│ 오프셋 관리 전략 │ +├──────────────────────────────────────────────────────┤ +│ [공통] │ +│ ├── enable-auto-commit: false │ +│ ├── ack-mode: MANUAL │ +│ ├── auto-offset-reset: earliest │ +│ └── isolation.level: read_committed │ +│ │ +│ [MetricsConsumer — best-effort] │ +│ ├── 실패 시: catch + log + skip │ +│ ├── 전체 배치 ack │ +│ ├── 보정: MetricsReconcile 배치 │ +│ └── 보장 수준: at-most-once (실패 시) + 배치 보정 │ +│ │ +│ [CouponIssueConsumer — 유실 불허] │ +│ ├── 실패 시: 예외 전파 → ErrorHandler │ +│ ├── 재시도: FixedBackOff(1초 × 3회) │ +│ ├── 최종 실패: DLT 토픽 전송 │ +│ └── 보장 수준: at-least-once + 멱등 │ +└──────────────────────────────────────────────────────┘ +``` diff --git a/modules/kafka/src/main/resources/kafka.yml b/modules/kafka/src/main/resources/kafka.yml index 14d9153ec..d45d780e9 100644 --- a/modules/kafka/src/main/resources/kafka.yml +++ b/modules/kafka/src/main/resources/kafka.yml @@ -9,7 +9,6 @@ spring: auto: create.topics.enable: false register.schemas: false - offset.reset: latest use.latest.version: true producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer From 5791dda778f3aeedec35fb0df3872df46296ffe3 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 17:52:23 +0900 Subject: [PATCH 10/14] =?UTF-8?q?feat:=20DomainEventPublisher=20=EC=B6=94?= =?UTF-8?q?=EC=83=81=ED=99=94=EB=A1=9C=20=EC=9D=B4=EB=B2=A4=ED=8A=B8=20?= =?UTF-8?q?=EB=B0=9C=ED=96=89=20=EB=B3=B4=EC=9D=BC=EB=9F=AC=ED=94=8C?= =?UTF-8?q?=EB=A0=88=EC=9D=B4=ED=8A=B8=20=EC=A0=9C=EA=B1=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Facade가 EventOutboxRepository + ObjectMapper + ApplicationEventPublisher 3개를 직접 조합하던 패턴을 DomainEventPublisher 인터페이스로 추상화. - DomainEventPublisher 도메인 인터페이스 추가 - DomainEventPublisherImpl 구현체 (Outbox + Spring Event 캡슐화) - LikeFacade, OrderFacade 의존 3→1 축소 --- .../loopers/application/like/LikeFacade.java | 37 +++------- .../application/order/OrderFacade.java | 40 +++-------- .../domain/event/DomainEventPublisher.java | 5 ++ .../event/DomainEventPublisherImpl.java | 34 +++++++++ .../application/like/LikeFacadeTest.java | 72 +++++++------------ .../application/order/OrderFacadeTest.java | 6 +- .../event/DomainEventPublisherImplTest.java | 71 ++++++++++++++++++ 7 files changed, 157 insertions(+), 108 deletions(-) create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/event/DomainEventPublisher.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/event/DomainEventPublisherImpl.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/infrastructure/event/DomainEventPublisherImplTest.java diff --git a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java index e848ab866..66d772a2f 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java @@ -1,9 +1,6 @@ package com.loopers.application.like; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.loopers.domain.event.EventOutbox; -import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.DomainEventPublisher; import com.loopers.domain.event.LikeCreatedEvent; import com.loopers.domain.event.LikeRemovedEvent; import com.loopers.domain.like.Like; @@ -12,7 +9,6 @@ import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; -import org.springframework.context.ApplicationEventPublisher; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -27,9 +23,7 @@ public class LikeFacade { private final LikeRepository likeRepository; private final ProductRepository productRepository; - private final EventOutboxRepository eventOutboxRepository; - private final ApplicationEventPublisher applicationEventPublisher; - private final ObjectMapper objectMapper; + private final DomainEventPublisher domainEventPublisher; @Transactional public void addLike(Long memberId, Long productId) { @@ -42,11 +36,9 @@ public void addLike(Long memberId, Long productId) { likeRepository.save(new Like(memberId, productId)); - EventOutbox outbox = EventOutbox.create("catalog", String.valueOf(productId), - "LIKE_CREATED", buildPayload(productId, memberId)); - eventOutboxRepository.save(outbox); - - applicationEventPublisher.publishEvent(new LikeCreatedEvent(productId, memberId)); + domainEventPublisher.publish("catalog", String.valueOf(productId), + "LIKE_CREATED", Map.of("productId", productId, "memberId", memberId), + new LikeCreatedEvent(productId, memberId)); } @Transactional @@ -58,25 +50,12 @@ public void removeLike(Long memberId, Long productId) { likeRepository.delete(likeOpt.get()); - EventOutbox outbox = EventOutbox.create("catalog", String.valueOf(productId), - "LIKE_REMOVED", buildPayload(productId, memberId)); - eventOutboxRepository.save(outbox); - - applicationEventPublisher.publishEvent(new LikeRemovedEvent(productId, memberId)); + domainEventPublisher.publish("catalog", String.valueOf(productId), + "LIKE_REMOVED", Map.of("productId", productId, "memberId", memberId), + new LikeRemovedEvent(productId, memberId)); } public List getLikesByMemberId(Long memberId) { return likeRepository.findAllByMemberId(memberId); } - - private String buildPayload(Long productId, Long memberId) { - try { - return objectMapper.writeValueAsString(Map.of( - "productId", productId, - "memberId", memberId - )); - } catch (JsonProcessingException e) { - throw new RuntimeException("이벤트 페이로드 직렬화 실패", e); - } - } } diff --git a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java index 767d276a8..086f40217 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java @@ -1,13 +1,10 @@ package com.loopers.application.order; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import com.loopers.application.coupon.CouponApplyResult; import com.loopers.application.coupon.CouponFacade; import com.loopers.domain.brand.Brand; import com.loopers.domain.brand.BrandRepository; -import com.loopers.domain.event.EventOutbox; -import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.DomainEventPublisher; import com.loopers.domain.event.OrderCancelledEvent; import com.loopers.domain.event.OrderCreatedEvent; import com.loopers.domain.event.OrderItemSnapshot; @@ -19,7 +16,6 @@ import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; -import org.springframework.context.ApplicationEventPublisher; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -40,9 +36,7 @@ public class OrderFacade { private final ProductRepository productRepository; private final BrandRepository brandRepository; private final CouponFacade couponFacade; - private final EventOutboxRepository eventOutboxRepository; - private final ApplicationEventPublisher applicationEventPublisher; - private final ObjectMapper objectMapper; + private final DomainEventPublisher domainEventPublisher; @Transactional public Order createOrder(Long memberId, List itemRequests) { @@ -125,11 +119,10 @@ public Order createOrder(Long memberId, List itemRequests, Lon .map(item -> new OrderItemSnapshot(item.getProductId(), item.getQuantity(), item.getProductPrice())) .toList(); - EventOutbox outbox = EventOutbox.create("order", String.valueOf(order.getId()), - "ORDER_CREATED", buildOrderPayload(order.getId(), memberId, eventItems)); - eventOutboxRepository.save(outbox); - - applicationEventPublisher.publishEvent(new OrderCreatedEvent(order.getId(), memberId, eventItems)); + domainEventPublisher.publish("order", String.valueOf(order.getId()), + "ORDER_CREATED", + Map.of("orderId", order.getId(), "memberId", memberId, "items", eventItems), + new OrderCreatedEvent(order.getId(), memberId, eventItems)); return order; } @@ -180,11 +173,10 @@ public void cancelOrder(Long orderId, Long memberId) { .map(item -> new OrderItemSnapshot(item.getProductId(), item.getQuantity(), item.getProductPrice())) .toList(); - EventOutbox outbox = EventOutbox.create("order", String.valueOf(orderId), - "ORDER_CANCELLED", buildOrderPayload(orderId, memberId, eventItems)); - eventOutboxRepository.save(outbox); - - applicationEventPublisher.publishEvent(new OrderCancelledEvent(orderId, memberId, eventItems)); + domainEventPublisher.publish("order", String.valueOf(orderId), + "ORDER_CANCELLED", + Map.of("orderId", orderId, "memberId", memberId, "items", eventItems), + new OrderCancelledEvent(orderId, memberId, eventItems)); } public List getOrdersByMemberId(Long memberId, ZonedDateTime startAt, ZonedDateTime endAt) { @@ -199,16 +191,4 @@ public List getAllOrders() { } public record OrderItemRequest(Long productId, int quantity) {} - - private String buildOrderPayload(Long orderId, Long memberId, List items) { - try { - return objectMapper.writeValueAsString(Map.of( - "orderId", orderId, - "memberId", memberId, - "items", items - )); - } catch (JsonProcessingException e) { - throw new RuntimeException("주문 이벤트 페이로드 직렬화 실패", e); - } - } } diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/event/DomainEventPublisher.java b/apps/commerce-api/src/main/java/com/loopers/domain/event/DomainEventPublisher.java new file mode 100644 index 000000000..3853f222e --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/event/DomainEventPublisher.java @@ -0,0 +1,5 @@ +package com.loopers.domain.event; + +public interface DomainEventPublisher { + void publish(String aggregateType, String aggregateId, String eventType, Object payload, Object event); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/DomainEventPublisherImpl.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/DomainEventPublisherImpl.java new file mode 100644 index 000000000..eb261cef6 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/DomainEventPublisherImpl.java @@ -0,0 +1,34 @@ +package com.loopers.infrastructure.event; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.DomainEventPublisher; +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import lombok.RequiredArgsConstructor; +import org.springframework.context.ApplicationEventPublisher; +import org.springframework.stereotype.Component; + +@Component +@RequiredArgsConstructor +public class DomainEventPublisherImpl implements DomainEventPublisher { + + private final EventOutboxRepository eventOutboxRepository; + private final ApplicationEventPublisher applicationEventPublisher; + private final ObjectMapper objectMapper; + + @Override + public void publish(String aggregateType, String aggregateId, String eventType, Object payload, Object event) { + String json = serializePayload(payload); + eventOutboxRepository.save(EventOutbox.create(aggregateType, aggregateId, eventType, json)); + applicationEventPublisher.publishEvent(event); + } + + private String serializePayload(Object payload) { + try { + return objectMapper.writeValueAsString(payload); + } catch (JsonProcessingException e) { + throw new RuntimeException("이벤트 페이로드 직렬화 실패", e); + } + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java index 69095d1f4..11fc250bd 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/like/LikeFacadeTest.java @@ -1,8 +1,6 @@ package com.loopers.application.like; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.loopers.domain.event.EventOutbox; -import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.DomainEventPublisher; import com.loopers.domain.event.LikeCreatedEvent; import com.loopers.domain.event.LikeRemovedEvent; import com.loopers.domain.like.Like; @@ -17,7 +15,6 @@ import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; -import org.springframework.context.ApplicationEventPublisher; import java.util.ArrayList; import java.util.List; @@ -30,22 +27,18 @@ class LikeFacadeTest { private LikeFacade likeFacade; private FakeLikeRepository likeRepository; private FakeProductRepository productRepository; - private List savedOutboxes; - private List publishedEvents; + private List publishedEvents; + + record PublishedEvent(String aggregateType, String aggregateId, String eventType, Object payload, Object event) {} @BeforeEach void setUp() { likeRepository = new FakeLikeRepository(); productRepository = new FakeProductRepository(); - savedOutboxes = new ArrayList<>(); publishedEvents = new ArrayList<>(); - EventOutboxRepository eventOutboxRepository = outbox -> { - savedOutboxes.add(outbox); - return outbox; - }; - ApplicationEventPublisher eventPublisher = publishedEvents::add; - likeFacade = new LikeFacade(likeRepository, productRepository, - eventOutboxRepository, eventPublisher, new ObjectMapper()); + DomainEventPublisher domainEventPublisher = (aggregateType, aggregateId, eventType, payload, event) -> + publishedEvents.add(new PublishedEvent(aggregateType, aggregateId, eventType, payload, event)); + likeFacade = new LikeFacade(likeRepository, productRepository, domainEventPublisher); } @Nested @@ -63,7 +56,6 @@ void addLike_savesLikeRecord_andIncrementsLikeCount() { assertThat(likeRepository.existsByMemberIdAndProductId(memberId, product.getId())).isTrue(); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(1); - // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("이미 좋아요한 상품에 다시 좋아요하면 멱등하게 처리된다 (likeCount 불변)") @@ -78,7 +70,6 @@ void addLike_whenAlreadyLiked_isIdempotent() { assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(1); assertThat(likeRepository.findAllByMemberId(memberId)).hasSize(1); - // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("존재하지 않는 상품에 좋아요하면 예외가 발생한다") @@ -101,7 +92,6 @@ void addLike_byMultipleMembers_accumulatesCount() { likeFacade.addLike(3L, product.getId()); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(3); - // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } } @@ -121,7 +111,6 @@ void removeLike_deletesLikeRecord_andDecrementsLikeCount() { assertThat(likeRepository.existsByMemberIdAndProductId(memberId, product.getId())).isFalse(); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(0); - // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } @DisplayName("좋아요하지 않은 상품의 좋아요를 취소해도 예외 없이 멱등하게 처리된다") @@ -133,78 +122,69 @@ void removeLike_whenNotLiked_isIdempotent() { likeFacade.removeLike(1L, product.getId()); assertThat(likeRepository.countByProductId(product.getId())).isEqualTo(0); - // likeCount는 이벤트 리스너에서 처리 (단위 테스트에서는 미검증) } } @Nested - @DisplayName("Outbox + 이벤트 발행 검증") - class OutboxAndEvent { + @DisplayName("DomainEventPublisher 호출 검증") + class DomainEventPublishing { - @DisplayName("좋아요 추가 시 EventOutbox가 저장되고 LikeCreatedEvent가 발행된다") + @DisplayName("좋아요 추가 시 DomainEventPublisher가 호출되고 LikeCreatedEvent가 발행된다") @Test - void addLike_savesOutboxAndPublishesEvent() { + void addLike_publishesDomainEvent() { Product product = productRepository.save( new Product(1L, "에어맥스", new Price(150000), new Stock(10))); likeFacade.addLike(1L, product.getId()); - assertThat(savedOutboxes).hasSize(1); - EventOutbox outbox = savedOutboxes.get(0); - assertThat(outbox.getAggregateType()).isEqualTo("catalog"); - assertThat(outbox.getAggregateId()).isEqualTo(String.valueOf(product.getId())); - assertThat(outbox.getEventType()).isEqualTo("LIKE_CREATED"); - assertThat(publishedEvents).hasSize(1); - assertThat(publishedEvents.get(0)).isInstanceOf(LikeCreatedEvent.class); - LikeCreatedEvent event = (LikeCreatedEvent) publishedEvents.get(0); + PublishedEvent published = publishedEvents.get(0); + assertThat(published.aggregateType()).isEqualTo("catalog"); + assertThat(published.aggregateId()).isEqualTo(String.valueOf(product.getId())); + assertThat(published.eventType()).isEqualTo("LIKE_CREATED"); + assertThat(published.event()).isInstanceOf(LikeCreatedEvent.class); + LikeCreatedEvent event = (LikeCreatedEvent) published.event(); assertThat(event.productId()).isEqualTo(product.getId()); assertThat(event.memberId()).isEqualTo(1L); } - @DisplayName("좋아요 취소 시 EventOutbox가 저장되고 LikeRemovedEvent가 발행된다") + @DisplayName("좋아요 취소 시 DomainEventPublisher가 호출되고 LikeRemovedEvent가 발행된다") @Test - void removeLike_savesOutboxAndPublishesEvent() { + void removeLike_publishesDomainEvent() { Product product = productRepository.save( new Product(1L, "에어맥스", new Price(150000), new Stock(10))); likeFacade.addLike(1L, product.getId()); - savedOutboxes.clear(); publishedEvents.clear(); likeFacade.removeLike(1L, product.getId()); - assertThat(savedOutboxes).hasSize(1); - EventOutbox outbox = savedOutboxes.get(0); - assertThat(outbox.getEventType()).isEqualTo("LIKE_REMOVED"); - assertThat(publishedEvents).hasSize(1); - assertThat(publishedEvents.get(0)).isInstanceOf(LikeRemovedEvent.class); + PublishedEvent published = publishedEvents.get(0); + assertThat(published.eventType()).isEqualTo("LIKE_REMOVED"); + assertThat(published.event()).isInstanceOf(LikeRemovedEvent.class); } - @DisplayName("이미 좋아요한 상품에 다시 좋아요하면 Outbox와 이벤트가 발행되지 않는다") + @DisplayName("이미 좋아요한 상품에 다시 좋아요하면 이벤트가 발행되지 않는다") @Test - void addLike_whenIdempotent_noOutboxOrEvent() { + void addLike_whenIdempotent_noEvent() { Product product = productRepository.save( new Product(1L, "에어맥스", new Price(150000), new Stock(10))); likeFacade.addLike(1L, product.getId()); - savedOutboxes.clear(); publishedEvents.clear(); likeFacade.addLike(1L, product.getId()); - assertThat(savedOutboxes).isEmpty(); assertThat(publishedEvents).isEmpty(); } - @DisplayName("좋아요하지 않은 상품을 취소하면 Outbox와 이벤트가 발행되지 않는다") + @DisplayName("좋아요하지 않은 상품을 취소하면 이벤트가 발행되지 않는다") @Test - void removeLike_whenNotLiked_noOutboxOrEvent() { + void removeLike_whenNotLiked_noEvent() { Product product = productRepository.save( new Product(1L, "에어맥스", new Price(150000), new Stock(10))); likeFacade.removeLike(1L, product.getId()); - assertThat(savedOutboxes).isEmpty(); assertThat(publishedEvents).isEmpty(); } } diff --git a/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java index 54fc96fcd..381209cff 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/order/OrderFacadeTest.java @@ -4,7 +4,7 @@ import com.loopers.application.coupon.CouponFacade; import com.loopers.domain.brand.Brand; import com.loopers.domain.coupon.*; -import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.DomainEventPublisher; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderItem; import com.loopers.domain.order.OrderStatus; @@ -54,9 +54,9 @@ void setUp() { KafkaTemplate kafkaTemplate = mock(KafkaTemplate.class); couponFacade = new CouponFacade(couponRepository, couponIssueRepository, issueRequestRepository, kafkaTemplate, new ObjectMapper(), Clock.systemDefaultZone()); - EventOutboxRepository eventOutboxRepository = outbox -> outbox; + DomainEventPublisher domainEventPublisher = (aggregateType, aggregateId, eventType, payload, event) -> {}; orderFacade = new OrderFacade(orderRepository, productRepository, brandRepository, - couponFacade, eventOutboxRepository, event -> {}, new ObjectMapper()); + couponFacade, domainEventPublisher); } @Nested diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/DomainEventPublisherImplTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/DomainEventPublisherImplTest.java new file mode 100644 index 000000000..5b6c6e354 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/DomainEventPublisherImplTest.java @@ -0,0 +1,71 @@ +package com.loopers.infrastructure.event; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.event.EventOutbox; +import com.loopers.domain.event.EventOutboxRepository; +import com.loopers.domain.event.LikeCreatedEvent; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.springframework.context.ApplicationEventPublisher; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +class DomainEventPublisherImplTest { + + private DomainEventPublisherImpl domainEventPublisher; + private List savedOutboxes; + private List publishedEvents; + + @BeforeEach + void setUp() { + savedOutboxes = new ArrayList<>(); + publishedEvents = new ArrayList<>(); + EventOutboxRepository eventOutboxRepository = outbox -> { + savedOutboxes.add(outbox); + return outbox; + }; + ApplicationEventPublisher applicationEventPublisher = publishedEvents::add; + domainEventPublisher = new DomainEventPublisherImpl( + eventOutboxRepository, applicationEventPublisher, new ObjectMapper()); + } + + @DisplayName("publish 호출 시 EventOutbox가 저장되고 ApplicationEvent가 발행된다") + @Test + void publish_savesOutboxAndPublishesEvent() { + Map payload = Map.of("productId", 1L, "memberId", 2L); + LikeCreatedEvent event = new LikeCreatedEvent(1L, 2L); + + domainEventPublisher.publish("catalog", "1", "LIKE_CREATED", payload, event); + + assertThat(savedOutboxes).hasSize(1); + EventOutbox outbox = savedOutboxes.get(0); + assertThat(outbox.getAggregateType()).isEqualTo("catalog"); + assertThat(outbox.getAggregateId()).isEqualTo("1"); + assertThat(outbox.getEventType()).isEqualTo("LIKE_CREATED"); + assertThat(outbox.getPayload()).contains("productId"); + assertThat(outbox.getPayload()).contains("memberId"); + + assertThat(publishedEvents).hasSize(1); + assertThat(publishedEvents.get(0)).isInstanceOf(LikeCreatedEvent.class); + } + + @DisplayName("직렬화 불가능한 payload 전달 시 RuntimeException이 발생한다") + @Test + void publish_withUnserializablePayload_throwsRuntimeException() { + Object unserializable = new Object() { + @SuppressWarnings("unused") + public Object getSelf() { return this; } + }; + + assertThatThrownBy(() -> + domainEventPublisher.publish("test", "1", "TEST", unserializable, new Object())) + .isInstanceOf(RuntimeException.class) + .hasMessageContaining("이벤트 페이로드 직렬화 실패"); + } +} From 723a328cf4ba4e9d805c261f9b36e80ce612c7bc Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 17:52:40 +0900 Subject: [PATCH 11/14] =?UTF-8?q?feat:=20ProductFacade.restoreStock()=20?= =?UTF-8?q?=EC=B6=94=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 결제 실패 시 재고 복원 로직(Redis INCR + DB increaseStock)을 Product 도메인 Facade에 캡슐화하여 cross-domain 직접 접근 제거. --- .../loopers/application/product/ProductFacade.java | 13 +++++++++++++ .../application/product/ProductFacadeTest.java | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java index 7a58a76fa..9fea3f358 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java @@ -9,6 +9,7 @@ import com.loopers.domain.product.vo.Stock; import com.loopers.domain.brand.Brand; import com.loopers.domain.brand.BrandRepository; +import com.loopers.infrastructure.redis.StockReservationRedisRepository; import com.loopers.interfaces.api.product.ProductDto; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; @@ -34,6 +35,7 @@ public class ProductFacade { private final LikeRepository likeRepository; private final ProductCachePort productCachePort; private final ApplicationEventPublisher applicationEventPublisher; + private final StockReservationRedisRepository stockRedisRepository; // ── 상품 상세 (캐시 적용) ── @@ -120,6 +122,17 @@ public List getAllProductsNoOptimization(String sort) { return results; } + // ── 재고 복원 (결제 실패/취소 시 호출) ── + + @Transactional + public void restoreStock(Long productId, int quantity) { + stockRedisRepository.increase(productId, quantity); + productRepository.findById(productId).ifPresent(product -> { + product.increaseStock(quantity); + productRepository.save(product); + }); + } + // ── 상품 CUD (캐시 무효화 포함) ── @Transactional diff --git a/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java b/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java index 1d0d57c77..7c3e29c78 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/product/ProductFacadeTest.java @@ -10,6 +10,7 @@ import com.loopers.fake.FakeLikeRepository; import com.loopers.fake.FakeProductCachePort; import com.loopers.fake.FakeProductRepository; +import com.loopers.fake.FakeStockReservationRedisRepository; import com.loopers.interfaces.api.product.ProductDto; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; @@ -38,7 +39,7 @@ void setUp() { brandRepository = new FakeBrandRepository(); likeRepository = new FakeLikeRepository(); productRepository.setBrandRepository(brandRepository); - productFacade = new ProductFacade(productRepository, brandRepository, likeRepository, new FakeProductCachePort(), event -> {}); + productFacade = new ProductFacade(productRepository, brandRepository, likeRepository, new FakeProductCachePort(), event -> {}, new FakeStockReservationRedisRepository()); } @Nested From 213ca062f0f5ef7a77179160a9feedb72b7fa16e Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 18:00:44 +0900 Subject: [PATCH 12/14] =?UTF-8?q?feat:=20PaymentStatusHistory=20=EA=B0=90?= =?UTF-8?q?=EC=82=AC=20=EB=A1=9C=EA=B7=B8=20=EB=8F=84=EC=9E=85=20=E2=80=94?= =?UTF-8?q?=20=EA=B2=B0=EC=A0=9C=20=EC=83=81=ED=83=9C=20=EC=A0=84=EC=9D=B4?= =?UTF-8?q?=20=EC=B6=94=EC=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Event Sourcing 경량 적용으로 payment_status_history 테이블 추가. 5-layer recovery 구조에서 상태 변경 경로를 추적할 수 없던 문제 해결. - PaymentStatusHistory 엔티티 + 리포지토리 추가 - PaymentModel에 @Transient pendingTransitions 전이 리스트 추가 - PaymentRepositoryImpl.save()에서 History 자동 기록 - 단위 테스트 + Fake 리포지토리 추가 --- .../loopers/domain/payment/PaymentModel.java | 25 ++++++++ .../domain/payment/PaymentStatusHistory.java | 52 ++++++++++++++++ .../PaymentStatusHistoryRepository.java | 8 +++ .../payment/PaymentRepositoryImpl.java | 10 ++- .../PaymentStatusHistoryJpaRepository.java | 10 +++ .../PaymentStatusHistoryRepositoryImpl.java | 25 ++++++++ .../domain/payment/PaymentModelTest.java | 61 ++++++++++++++++++ .../payment/PaymentStatusHistoryTest.java | 35 +++++++++++ .../FakePaymentStatusHistoryRepository.java | 62 +++++++++++++++++++ 9 files changed, 287 insertions(+), 1 deletion(-) create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistory.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistoryRepository.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryJpaRepository.java create mode 100644 apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryRepositoryImpl.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentStatusHistoryTest.java create mode 100644 apps/commerce-api/src/test/java/com/loopers/fake/FakePaymentStatusHistoryRepository.java diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentModel.java b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentModel.java index b9e230742..71c673341 100644 --- a/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentModel.java +++ b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentModel.java @@ -8,6 +8,10 @@ import lombok.Getter; import lombok.NoArgsConstructor; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + @Entity @Table(name = "payments", indexes = { @Index(name = "idx_payments_order_id", columnList = "order_id"), @@ -45,6 +49,19 @@ public class PaymentModel extends BaseEntity { @Column(name = "failure_reason") private String failureReason; + @Transient + private final List pendingTransitions = new ArrayList<>(); + + public record StatusTransition(PaymentStatus from, PaymentStatus to, String reason, String detail) {} + + public List getPendingTransitions() { + return Collections.unmodifiableList(pendingTransitions); + } + + public void clearPendingTransitions() { + pendingTransitions.clear(); + } + public static PaymentModel create(Long orderId, int amount, String cardType, String cardNo) { PaymentModel payment = new PaymentModel(); payment.orderId = orderId; @@ -56,26 +73,34 @@ public static PaymentModel create(Long orderId, int amount, String cardType, Str } public void markPending(String transactionKey, String pgProvider) { + PaymentStatus from = this.status; validateTransition(PaymentStatus.PENDING); this.status = PaymentStatus.PENDING; this.transactionKey = transactionKey; this.pgProvider = pgProvider; + pendingTransitions.add(new StatusTransition(from, PaymentStatus.PENDING, "PG_RESPONSE", null)); } public void markPaid() { + PaymentStatus from = this.status; validateTransition(PaymentStatus.PAID); this.status = PaymentStatus.PAID; + pendingTransitions.add(new StatusTransition(from, PaymentStatus.PAID, "PG_RESPONSE", null)); } public void markFailed(String reason) { + PaymentStatus from = this.status; validateTransition(PaymentStatus.FAILED); this.status = PaymentStatus.FAILED; this.failureReason = reason; + pendingTransitions.add(new StatusTransition(from, PaymentStatus.FAILED, "PG_RESPONSE", reason)); } public void markUnknown() { + PaymentStatus from = this.status; validateTransition(PaymentStatus.UNKNOWN); this.status = PaymentStatus.UNKNOWN; + pendingTransitions.add(new StatusTransition(from, PaymentStatus.UNKNOWN, "PG_RESPONSE", null)); } private void validateTransition(PaymentStatus target) { diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistory.java b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistory.java new file mode 100644 index 000000000..c7c023437 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistory.java @@ -0,0 +1,52 @@ +package com.loopers.domain.payment; + +import com.loopers.domain.BaseEntity; +import jakarta.persistence.*; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.NoArgsConstructor; + +/** + * 결제 상태 전이 감사 로그. + * + *

Event Sourcing의 경량 적용 — 모든 결제 상태 전이를 INSERT-only 로그로 기록한다. + * 5-layer recovery 구조에서 "언제, 어떤 경로로, 왜 상태가 바뀌었는가"를 추적한다.

+ * + * @see PaymentStatus + */ +@Entity +@Table(name = "payment_status_history", indexes = { + @Index(name = "idx_psh_payment_id", columnList = "payment_id") +}) +@Getter +@NoArgsConstructor(access = AccessLevel.PROTECTED) +public class PaymentStatusHistory extends BaseEntity { + + @Column(name = "payment_id", nullable = false) + private Long paymentId; + + @Enumerated(EnumType.STRING) + @Column(name = "from_status", nullable = false) + private PaymentStatus fromStatus; + + @Enumerated(EnumType.STRING) + @Column(name = "to_status", nullable = false) + private PaymentStatus toStatus; + + @Column(name = "reason", nullable = false, length = 50) + private String reason; + + @Column(name = "detail", length = 500) + private String detail; + + public static PaymentStatusHistory create(Long paymentId, PaymentStatus fromStatus, + PaymentStatus toStatus, String reason, String detail) { + PaymentStatusHistory h = new PaymentStatusHistory(); + h.paymentId = paymentId; + h.fromStatus = fromStatus; + h.toStatus = toStatus; + h.reason = reason; + h.detail = detail; + return h; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistoryRepository.java b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistoryRepository.java new file mode 100644 index 000000000..aef8dae62 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/payment/PaymentStatusHistoryRepository.java @@ -0,0 +1,8 @@ +package com.loopers.domain.payment; + +import java.util.List; + +public interface PaymentStatusHistoryRepository { + PaymentStatusHistory save(PaymentStatusHistory history); + List findAllByPaymentId(Long paymentId); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentRepositoryImpl.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentRepositoryImpl.java index 031d0a247..b043629e4 100644 --- a/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentRepositoryImpl.java +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentRepositoryImpl.java @@ -3,6 +3,7 @@ import com.loopers.domain.payment.PaymentModel; import com.loopers.domain.payment.PaymentRepository; import com.loopers.domain.payment.PaymentStatus; +import com.loopers.domain.payment.PaymentStatusHistory; import lombok.RequiredArgsConstructor; import org.springframework.stereotype.Repository; @@ -14,10 +15,17 @@ public class PaymentRepositoryImpl implements PaymentRepository { private final PaymentJpaRepository paymentJpaRepository; + private final PaymentStatusHistoryJpaRepository historyJpaRepository; @Override public PaymentModel save(PaymentModel payment) { - return paymentJpaRepository.save(payment); + PaymentModel saved = paymentJpaRepository.save(payment); + for (PaymentModel.StatusTransition t : payment.getPendingTransitions()) { + historyJpaRepository.save(PaymentStatusHistory.create( + saved.getId(), t.from(), t.to(), t.reason(), t.detail())); + } + payment.clearPendingTransitions(); + return saved; } @Override diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryJpaRepository.java new file mode 100644 index 000000000..63ebf0492 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryJpaRepository.java @@ -0,0 +1,10 @@ +package com.loopers.infrastructure.payment; + +import com.loopers.domain.payment.PaymentStatusHistory; +import org.springframework.data.jpa.repository.JpaRepository; + +import java.util.List; + +public interface PaymentStatusHistoryJpaRepository extends JpaRepository { + List findAllByPaymentIdOrderByCreatedAtAsc(Long paymentId); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryRepositoryImpl.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryRepositoryImpl.java new file mode 100644 index 000000000..7aa128179 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/payment/PaymentStatusHistoryRepositoryImpl.java @@ -0,0 +1,25 @@ +package com.loopers.infrastructure.payment; + +import com.loopers.domain.payment.PaymentStatusHistory; +import com.loopers.domain.payment.PaymentStatusHistoryRepository; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +@RequiredArgsConstructor +public class PaymentStatusHistoryRepositoryImpl implements PaymentStatusHistoryRepository { + + private final PaymentStatusHistoryJpaRepository jpaRepository; + + @Override + public PaymentStatusHistory save(PaymentStatusHistory history) { + return jpaRepository.save(history); + } + + @Override + public List findAllByPaymentId(Long paymentId) { + return jpaRepository.findAllByPaymentIdOrderByCreatedAtAsc(paymentId); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentModelTest.java b/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentModelTest.java index a594e57a0..e99d9daa0 100644 --- a/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentModelTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentModelTest.java @@ -153,4 +153,65 @@ void paid_toUnknown_throwsException() { .isEqualTo(ErrorType.BAD_REQUEST); } } + + @Nested + @DisplayName("상태 전이 이력 추적") + class TransitionTracking { + + @DisplayName("markPending → pendingTransitions에 REQUESTED→PENDING 기록") + @Test + void markPending_recordsTransition() { + PaymentModel payment = PaymentModel.create(1L, 5000, "SAMSUNG", "1234-5678-9012-3456"); + + payment.markPending("TX-001", "SIMULATOR"); + + assertThat(payment.getPendingTransitions()).hasSize(1); + PaymentModel.StatusTransition t = payment.getPendingTransitions().get(0); + assertThat(t.from()).isEqualTo(PaymentStatus.REQUESTED); + assertThat(t.to()).isEqualTo(PaymentStatus.PENDING); + assertThat(t.reason()).isEqualTo("PG_RESPONSE"); + } + + @DisplayName("markPending → markPaid 연속 호출 시 2개 전이 기록") + @Test + void markPending_thenMarkPaid_recordsTwoTransitions() { + PaymentModel payment = PaymentModel.create(1L, 5000, "SAMSUNG", "1234-5678-9012-3456"); + + payment.markPending("TX-001", "SIMULATOR"); + payment.markPaid(); + + assertThat(payment.getPendingTransitions()).hasSize(2); + assertThat(payment.getPendingTransitions().get(0).from()).isEqualTo(PaymentStatus.REQUESTED); + assertThat(payment.getPendingTransitions().get(0).to()).isEqualTo(PaymentStatus.PENDING); + assertThat(payment.getPendingTransitions().get(1).from()).isEqualTo(PaymentStatus.PENDING); + assertThat(payment.getPendingTransitions().get(1).to()).isEqualTo(PaymentStatus.PAID); + } + + @DisplayName("markFailed — detail에 실패 사유 포함") + @Test + void markFailed_recordsDetailWithReason() { + PaymentModel payment = PaymentModel.create(1L, 5000, "SAMSUNG", "1234-5678-9012-3456"); + payment.markPending("TX-001", "SIMULATOR"); + + payment.markFailed("한도초과"); + + PaymentModel.StatusTransition t = payment.getPendingTransitions().get(1); + assertThat(t.from()).isEqualTo(PaymentStatus.PENDING); + assertThat(t.to()).isEqualTo(PaymentStatus.FAILED); + assertThat(t.detail()).isEqualTo("한도초과"); + } + + @DisplayName("clearPendingTransitions — 전이 리스트 초기화") + @Test + void clearPendingTransitions_clearsAll() { + PaymentModel payment = PaymentModel.create(1L, 5000, "SAMSUNG", "1234-5678-9012-3456"); + payment.markPending("TX-001", "SIMULATOR"); + payment.markPaid(); + assertThat(payment.getPendingTransitions()).hasSize(2); + + payment.clearPendingTransitions(); + + assertThat(payment.getPendingTransitions()).isEmpty(); + } + } } diff --git a/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentStatusHistoryTest.java b/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentStatusHistoryTest.java new file mode 100644 index 000000000..85459cd88 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/domain/payment/PaymentStatusHistoryTest.java @@ -0,0 +1,35 @@ +package com.loopers.domain.payment; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class PaymentStatusHistoryTest { + + @DisplayName("create() — 모든 필드가 정확히 설정된다") + @Test + void create_allFieldsSet() { + PaymentStatusHistory history = PaymentStatusHistory.create( + 1L, PaymentStatus.PENDING, PaymentStatus.PAID, "CALLBACK", null); + + assertThat(history.getPaymentId()).isEqualTo(1L); + assertThat(history.getFromStatus()).isEqualTo(PaymentStatus.PENDING); + assertThat(history.getToStatus()).isEqualTo(PaymentStatus.PAID); + assertThat(history.getReason()).isEqualTo("CALLBACK"); + assertThat(history.getDetail()).isNull(); + } + + @DisplayName("create() — detail 포함") + @Test + void create_withDetail() { + PaymentStatusHistory history = PaymentStatusHistory.create( + 2L, PaymentStatus.PENDING, PaymentStatus.FAILED, "POLLING", "한도 초과"); + + assertThat(history.getPaymentId()).isEqualTo(2L); + assertThat(history.getFromStatus()).isEqualTo(PaymentStatus.PENDING); + assertThat(history.getToStatus()).isEqualTo(PaymentStatus.FAILED); + assertThat(history.getReason()).isEqualTo("POLLING"); + assertThat(history.getDetail()).isEqualTo("한도 초과"); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/fake/FakePaymentStatusHistoryRepository.java b/apps/commerce-api/src/test/java/com/loopers/fake/FakePaymentStatusHistoryRepository.java new file mode 100644 index 000000000..08243d4cc --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/fake/FakePaymentStatusHistoryRepository.java @@ -0,0 +1,62 @@ +package com.loopers.fake; + +import com.loopers.domain.BaseEntity; +import com.loopers.domain.payment.PaymentStatusHistory; +import com.loopers.domain.payment.PaymentStatusHistoryRepository; + +import java.lang.reflect.Field; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class FakePaymentStatusHistoryRepository implements PaymentStatusHistoryRepository { + + private final Map store = new ConcurrentHashMap<>(); + private long sequence = 1L; + + @Override + public PaymentStatusHistory save(PaymentStatusHistory history) { + if (history.getId() == null || history.getId() == 0L) { + long id = sequence++; + setBaseEntityId(history, id); + } + setCreatedAtIfAbsent(history); + store.put(history.getId(), history); + return history; + } + + @Override + public List findAllByPaymentId(Long paymentId) { + return store.values().stream() + .filter(h -> h.getPaymentId().equals(paymentId)) + .toList(); + } + + public List findAll() { + return new ArrayList<>(store.values()); + } + + private void setBaseEntityId(Object entity, long id) { + try { + Field idField = BaseEntity.class.getDeclaredField("id"); + idField.setAccessible(true); + idField.set(entity, id); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void setCreatedAtIfAbsent(Object entity) { + try { + Field createdAtField = BaseEntity.class.getDeclaredField("createdAt"); + createdAtField.setAccessible(true); + if (createdAtField.get(entity) == null) { + createdAtField.set(entity, ZonedDateTime.now()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} From 204542c4fa080425a899baa6cf04077fb8d97345 Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 18:01:52 +0900 Subject: [PATCH 13/14] =?UTF-8?q?feat:=205-layer=20recovery=20=EA=B2=BD?= =?UTF-8?q?=EB=A1=9C=EC=97=90=20PaymentStatusHistory=20=EA=B8=B0=EB=A1=9D?= =?UTF-8?q?=20=EC=A0=81=EC=9A=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CALLBACK, POLLING, WAL_RECOVERY, BATCH_RECOVERY 모든 복구 경로에서 상태 전이 이력을 기록하도록 적용. - PaymentRecoveryService: Facade 위임 + CALLBACK/POLLING History 기록 - WalRecoveryScheduler: WAL_RECOVERY History 기록 - PaymentRecoveryTasklet: SELECT→History INSERT→Status UPDATE 패턴 리팩토링 - 기존 테스트 7개 호환성 수정 --- .../payment/PaymentRecoveryService.java | 41 +++-- .../scheduler/WalRecoveryScheduler.java | 5 + .../payment/CallbackMissFaultTest.java | 27 +++- .../payment/DbFailureFaultTest.java | 2 +- .../payment/GhostPaymentFaultTest.java | 27 +++- .../payment/ManualRecoveryTest.java | 27 +++- .../payment/PaymentCallbackTest.java | 26 +++- .../payment/PaymentRecoveryServiceTest.java | 28 +++- .../scheduler/CallbackDlqSchedulerTest.java | 27 +++- .../step/PaymentRecoveryTasklet.java | 140 +++++++++++++----- 10 files changed, 271 insertions(+), 79 deletions(-) diff --git a/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentRecoveryService.java b/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentRecoveryService.java index e0a55748a..a949de62a 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentRecoveryService.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentRecoveryService.java @@ -1,16 +1,13 @@ package com.loopers.application.payment; -import com.loopers.domain.coupon.CouponIssue; -import com.loopers.domain.coupon.CouponIssueRepository; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderItem; import com.loopers.domain.order.OrderRepository; import com.loopers.domain.payment.*; -import com.loopers.domain.product.Product; -import com.loopers.domain.product.ProductRepository; import com.loopers.infrastructure.pg.PgPaymentStatusResponse; import com.loopers.infrastructure.pg.PgRouter; -import com.loopers.infrastructure.redis.StockReservationRedisRepository; import com.loopers.support.error.CoreException; import com.loopers.support.error.ErrorType; import lombok.RequiredArgsConstructor; @@ -30,7 +27,7 @@ *
  • CallbackInbox에 원본 저장 (RECEIVED)
  • *
  • 조건부 UPDATE로 Payment 상태 전이
  • *
  • SUCCESS → Order.pay() + Inbox PROCESSED
  • - *
  • FAILED → 재고 복원(Redis INCR + DB) + 쿠폰 복원 + Inbox PROCESSED
  • + *
  • FAILED → 재고 복원(ProductFacade) + 쿠폰 복원(CouponFacade) + Inbox PROCESSED
  • * * *

    Polling Hybrid: PENDING/UNKNOWN 상태 결제건을 주기적으로 PG 확인

    @@ -44,11 +41,11 @@ public class PaymentRecoveryService { private final PaymentRepository paymentRepository; + private final PaymentStatusHistoryRepository historyRepository; private final CallbackInboxRepository callbackInboxRepository; private final OrderRepository orderRepository; - private final ProductRepository productRepository; - private final CouponIssueRepository couponIssueRepository; - private final StockReservationRedisRepository stockRedisRepository; + private final ProductFacade productFacade; + private final CouponFacade couponFacade; private final PgRouter pgRouter; /** @@ -102,6 +99,10 @@ private void processPaymentTransition(PaymentModel payment, String pgStatus, Cal return; } + // 상태 전이 이력 기록 + historyRepository.save(PaymentStatusHistory.create( + payment.getId(), payment.getStatus(), targetStatus, "CALLBACK", null)); + // 상태 전이 성공 if (targetStatus == PaymentStatus.PAID) { handlePaymentSuccess(payment); @@ -127,23 +128,16 @@ private void handlePaymentFailure(PaymentModel payment) { Order order = orderRepository.findById(payment.getOrderId()).orElse(null); if (order == null) return; - // 재고 복원 (Redis INCR + DB) + // 재고 복원 → ProductFacade 위임 for (OrderItem item : order.getItems()) { - stockRedisRepository.increase(item.getProductId(), item.getQuantity()); - productRepository.findById(item.getProductId()).ifPresent(product -> { - product.increaseStock(item.getQuantity()); - productRepository.save(product); - }); + productFacade.restoreStock(item.getProductId(), item.getQuantity()); } log.info("재고 복원 완료: orderId={}", order.getId()); - // 쿠폰 복원 + // 쿠폰 복원 → CouponFacade 위임 if (order.getCouponIssueId() != null) { - couponIssueRepository.findById(order.getCouponIssueId()).ifPresent(couponIssue -> { - couponIssue.cancelUse(ZonedDateTime.now()); - couponIssueRepository.save(couponIssue); - log.info("쿠폰 복원 완료: couponIssueId={}", couponIssue.getId()); - }); + couponFacade.restoreCoupon(order.getCouponIssueId()); + log.info("쿠폰 복원 완료: couponIssueId={}", order.getCouponIssueId()); } } @@ -219,6 +213,8 @@ private void pollPgStatus(PaymentModel payment) { int affected = paymentRepository.updateStatusConditionally( payment.getId(), PaymentStatus.PAID, allowedStatuses); if (affected > 0) { + historyRepository.save(PaymentStatusHistory.create( + payment.getId(), payment.getStatus(), PaymentStatus.PAID, "POLLING", null)); handlePaymentSuccess(payment); log.info("Polling 복구 성공: paymentId={}, → PAID", payment.getId()); } @@ -227,6 +223,9 @@ private void pollPgStatus(PaymentModel payment) { int affected = paymentRepository.updateStatusConditionally( payment.getId(), PaymentStatus.FAILED, allowedStatuses); if (affected > 0) { + historyRepository.save(PaymentStatusHistory.create( + payment.getId(), payment.getStatus(), PaymentStatus.FAILED, + "POLLING", pgStatus.reason())); handlePaymentFailure(payment); log.info("Polling 복구: paymentId={}, → FAILED (reason={})", payment.getId(), pgStatus.reason()); diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/scheduler/WalRecoveryScheduler.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/scheduler/WalRecoveryScheduler.java index db6a4bb33..406bcc46d 100644 --- a/apps/commerce-api/src/main/java/com/loopers/infrastructure/scheduler/WalRecoveryScheduler.java +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/scheduler/WalRecoveryScheduler.java @@ -3,6 +3,8 @@ import com.loopers.domain.payment.PaymentModel; import com.loopers.domain.payment.PaymentRepository; import com.loopers.domain.payment.PaymentStatus; +import com.loopers.domain.payment.PaymentStatusHistory; +import com.loopers.domain.payment.PaymentStatusHistoryRepository; import com.loopers.infrastructure.payment.PaymentWalWriter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -27,6 +29,7 @@ public class WalRecoveryScheduler { private final PaymentWalWriter walWriter; private final PaymentRepository paymentRepository; + private final PaymentStatusHistoryRepository historyRepository; @Scheduled(fixedRate = 10_000) public void recoverFromWal() { @@ -78,6 +81,8 @@ private void processWalFile(Path walFile) { int affected = paymentRepository.updateStatusConditionally(payment.getId(), targetStatus, allowedStatuses); if (affected > 0) { + historyRepository.save(PaymentStatusHistory.create( + payment.getId(), payment.getStatus(), targetStatus, "WAL_RECOVERY", null)); log.info("WAL Recovery 성공: paymentId={}, newStatus={}", payment.getId(), targetStatus); } diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/CallbackMissFaultTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/CallbackMissFaultTest.java index 1f922a82a..964793412 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/CallbackMissFaultTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/CallbackMissFaultTest.java @@ -1,6 +1,11 @@ package com.loopers.application.payment; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; import com.loopers.domain.BaseEntity; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderStatus; import com.loopers.domain.payment.*; @@ -10,12 +15,16 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.lang.reflect.Field; +import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; /** * F7-3: 콜백 미수신 → Polling Hybrid 복구 시나리오. @@ -32,20 +41,32 @@ class CallbackMissFaultTest { private FakeOrderRepository orderRepository; private FakePgClient pgClient; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { paymentRepository = new FakePaymentRepository(); orderRepository = new FakeOrderRepository(); FakeCallbackInboxRepository callbackInboxRepository = new FakeCallbackInboxRepository(); FakeProductRepository productRepository = new FakeProductRepository(); - FakeCouponIssueRepository couponIssueRepository = new FakeCouponIssueRepository(); FakeStockReservationRedisRepository stockRedisRepository = new FakeStockReservationRedisRepository(); pgClient = new FakePgClient("SIMULATOR"); PgRouter pgRouter = new PgRouter(List.of(pgClient)); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), new FakeCouponIssueRepository(), + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); } @DisplayName("F7-3: PENDING → 콜백 미수신 → 10초 후 Polling → PG SUCCESS → PAID") diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/DbFailureFaultTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/DbFailureFaultTest.java index b73dd569b..f66ba5102 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/DbFailureFaultTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/DbFailureFaultTest.java @@ -37,7 +37,7 @@ class DbFailureFaultTest { void setUp() { paymentRepository = new FakePaymentRepository(); walWriter = new PaymentWalWriter(tempDir.toString(), new ObjectMapper()); - walRecovery = new WalRecoveryScheduler(walWriter, paymentRepository); + walRecovery = new WalRecoveryScheduler(walWriter, paymentRepository, new com.loopers.fake.FakePaymentStatusHistoryRepository()); } @DisplayName("F7-4: PG SUCCESS → DB 실패 → WAL 기록 → WAL Recovery → PAID") diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/GhostPaymentFaultTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/GhostPaymentFaultTest.java index 367ca6d68..fc458da52 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/GhostPaymentFaultTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/GhostPaymentFaultTest.java @@ -1,5 +1,10 @@ package com.loopers.application.payment; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderStatus; import com.loopers.domain.payment.*; @@ -9,12 +14,16 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.lang.reflect.Field; +import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; /** * F7-1: 유령 결제 복구 시나리오. @@ -34,6 +43,7 @@ class GhostPaymentFaultTest { private FakePaymentOutboxRepository outboxRepository; private FakePgClient pgClient; + @SuppressWarnings("unchecked") @BeforeEach void setUp() throws Exception { paymentRepository = new FakePaymentRepository(); @@ -41,7 +51,6 @@ void setUp() throws Exception { outboxRepository = new FakePaymentOutboxRepository(); FakeCallbackInboxRepository callbackInboxRepository = new FakeCallbackInboxRepository(); FakeProductRepository productRepository = new FakeProductRepository(); - FakeCouponIssueRepository couponIssueRepository = new FakeCouponIssueRepository(); FakeStockReservationRedisRepository stockRedisRepository = new FakeStockReservationRedisRepository(); pgClient = new FakePgClient("SIMULATOR"); PgRouter pgRouter = new PgRouter(List.of(pgClient)); @@ -52,9 +61,21 @@ void setUp() throws Exception { setField(paymentFacade, "initialWaitMs", 0L); setField(paymentFacade, "backoffMultiplier", 2); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), new FakeCouponIssueRepository(), + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); } @DisplayName("F7-1: 타임아웃 → UNKNOWN → Polling → PG SUCCESS 발견 → PAID 복구") diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/ManualRecoveryTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/ManualRecoveryTest.java index 46d50e490..487decb34 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/ManualRecoveryTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/ManualRecoveryTest.java @@ -1,5 +1,10 @@ package com.loopers.application.payment; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.payment.*; import com.loopers.domain.product.Product; @@ -11,10 +16,14 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; +import java.time.Clock; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; class ManualRecoveryTest { @@ -23,20 +32,32 @@ class ManualRecoveryTest { private FakeOrderRepository orderRepository; private FakePgClient pgClient; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { paymentRepository = new FakePaymentRepository(); FakeCallbackInboxRepository callbackInboxRepository = new FakeCallbackInboxRepository(); orderRepository = new FakeOrderRepository(); FakeProductRepository productRepository = new FakeProductRepository(); - FakeCouponIssueRepository couponIssueRepository = new FakeCouponIssueRepository(); FakeStockReservationRedisRepository stockRedisRepository = new FakeStockReservationRedisRepository(); pgClient = new FakePgClient("SIMULATOR"); PgRouter pgRouter = new PgRouter(List.of(pgClient)); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), new FakeCouponIssueRepository(), + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); } @DisplayName("U5-9: confirm API → PG 조회 → PAID 전이") diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentCallbackTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentCallbackTest.java index 3ece97d17..2088623e3 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentCallbackTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentCallbackTest.java @@ -1,6 +1,11 @@ package com.loopers.application.payment; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; import com.loopers.domain.coupon.CouponIssue; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.order.OrderStatus; import com.loopers.domain.payment.*; @@ -12,11 +17,15 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; +import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; class PaymentCallbackTest { @@ -29,6 +38,7 @@ class PaymentCallbackTest { private FakeStockReservationRedisRepository stockRedisRepository; private FakePgClient pgClient; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { paymentRepository = new FakePaymentRepository(); @@ -41,9 +51,21 @@ void setUp() { PgRouter pgRouter = new PgRouter(List.of(pgClient)); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), couponIssueRepository, + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); } private Order createOrderWithProduct() { diff --git a/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentRecoveryServiceTest.java b/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentRecoveryServiceTest.java index 98ef0106a..2c7b517a8 100644 --- a/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentRecoveryServiceTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/application/payment/PaymentRecoveryServiceTest.java @@ -1,6 +1,11 @@ package com.loopers.application.payment; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.application.product.ProductFacade; import com.loopers.domain.BaseEntity; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.payment.*; import com.loopers.domain.product.Product; @@ -12,12 +17,16 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.lang.reflect.Field; +import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; class PaymentRecoveryServiceTest { @@ -26,25 +35,36 @@ class PaymentRecoveryServiceTest { private FakeCallbackInboxRepository callbackInboxRepository; private FakeOrderRepository orderRepository; private FakeProductRepository productRepository; - private FakeCouponIssueRepository couponIssueRepository; private FakeStockReservationRedisRepository stockRedisRepository; private FakePgClient pgClient; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { paymentRepository = new FakePaymentRepository(); callbackInboxRepository = new FakeCallbackInboxRepository(); orderRepository = new FakeOrderRepository(); productRepository = new FakeProductRepository(); - couponIssueRepository = new FakeCouponIssueRepository(); stockRedisRepository = new FakeStockReservationRedisRepository(); pgClient = new FakePgClient("SIMULATOR"); PgRouter pgRouter = new PgRouter(List.of(pgClient)); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), new FakeCouponIssueRepository(), + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); } @DisplayName("U4-5: Polling — PG SUCCESS → PAID 전이") diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/scheduler/CallbackDlqSchedulerTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/scheduler/CallbackDlqSchedulerTest.java index 0fd017cdd..e8d428515 100644 --- a/apps/commerce-api/src/test/java/com/loopers/infrastructure/scheduler/CallbackDlqSchedulerTest.java +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/scheduler/CallbackDlqSchedulerTest.java @@ -1,7 +1,12 @@ package com.loopers.infrastructure.scheduler; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; import com.loopers.application.payment.PaymentRecoveryService; +import com.loopers.application.product.ProductFacade; import com.loopers.domain.BaseEntity; +import com.loopers.domain.coupon.CouponIssueRequest; +import com.loopers.domain.coupon.CouponIssueRequestRepository; import com.loopers.domain.order.Order; import com.loopers.domain.payment.*; import com.loopers.fake.*; @@ -9,12 +14,16 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.springframework.kafka.core.KafkaTemplate; import java.lang.reflect.Field; +import java.time.Clock; import java.time.ZonedDateTime; import java.util.List; +import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; class CallbackDlqSchedulerTest { @@ -23,20 +32,32 @@ class CallbackDlqSchedulerTest { private FakePaymentRepository paymentRepository; private FakeOrderRepository orderRepository; + @SuppressWarnings("unchecked") @BeforeEach void setUp() { callbackInboxRepository = new FakeCallbackInboxRepository(); paymentRepository = new FakePaymentRepository(); orderRepository = new FakeOrderRepository(); FakeProductRepository productRepository = new FakeProductRepository(); - FakeCouponIssueRepository couponIssueRepository = new FakeCouponIssueRepository(); FakeStockReservationRedisRepository stockRedisRepository = new FakeStockReservationRedisRepository(); FakePgClient pgClient = new FakePgClient("SIMULATOR"); PgRouter pgRouter = new PgRouter(List.of(pgClient)); + ProductFacade productFacade = new ProductFacade( + productRepository, new FakeBrandRepository(), new FakeLikeRepository(), + new FakeProductCachePort(), event -> {}, stockRedisRepository); + + CouponIssueRequestRepository issueRequestRepository = new CouponIssueRequestRepository() { + @Override public CouponIssueRequest save(CouponIssueRequest request) { return request; } + @Override public Optional findById(Long id) { return Optional.empty(); } + }; + CouponFacade couponFacade = new CouponFacade(new FakeCouponRepository(), new FakeCouponIssueRepository(), + issueRequestRepository, mock(KafkaTemplate.class), new ObjectMapper(), Clock.systemDefaultZone()); + PaymentRecoveryService recoveryService = new PaymentRecoveryService( - paymentRepository, callbackInboxRepository, orderRepository, - productRepository, couponIssueRepository, stockRedisRepository, pgRouter); + paymentRepository, new FakePaymentStatusHistoryRepository(), + callbackInboxRepository, orderRepository, + productFacade, couponFacade, pgRouter); dlqScheduler = new CallbackDlqScheduler(callbackInboxRepository, recoveryService); } diff --git a/apps/commerce-batch/src/main/java/com/loopers/batch/job/paymentrecovery/step/PaymentRecoveryTasklet.java b/apps/commerce-batch/src/main/java/com/loopers/batch/job/paymentrecovery/step/PaymentRecoveryTasklet.java index 7f8e5e0d0..894b2a27f 100644 --- a/apps/commerce-batch/src/main/java/com/loopers/batch/job/paymentrecovery/step/PaymentRecoveryTasklet.java +++ b/apps/commerce-batch/src/main/java/com/loopers/batch/job/paymentrecovery/step/PaymentRecoveryTasklet.java @@ -13,7 +13,6 @@ import org.springframework.stereotype.Component; import java.util.List; -import java.util.Map; /** * 결제 복구 배치 — REQUESTED/PENDING/UNKNOWN 상태 결제건 복구. @@ -41,50 +40,113 @@ public class PaymentRecoveryTasklet implements Tasklet { public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { log.info("[PaymentRecovery] 배치 복구 시작"); - // 1. REQUESTED — 1분 이상 경과 → FAILED - int requestedCount = entityManager.createNativeQuery( + int requestedCount = recoverRequested(); + int pendingCount = recoverPending(); + int unknownCount = recoverUnknown(); + + log.info("[PaymentRecovery] 배치 복구 완료: REQUESTED={}, PENDING={}, UNKNOWN={}", + requestedCount, pendingCount, unknownCount); + + return RepeatStatus.FINISHED; + } + + private int recoverRequested() { + List ids = entityManager.createNativeQuery( + "SELECT id FROM payments WHERE status = 'REQUESTED' " + + "AND created_at < NOW() - INTERVAL 1 MINUTE AND deleted_at IS NULL" + ).getResultList(); + + if (ids.isEmpty()) return 0; + + List targetIds = ids.stream().map(Number::longValue).toList(); + + entityManager.createNativeQuery( + "INSERT INTO payment_status_history (payment_id, from_status, to_status, reason, detail, created_at, updated_at) " + + "SELECT id, 'REQUESTED', 'FAILED', 'BATCH_RECOVERY', '배치 복구: PG 호출 누락 (1분 초과)', NOW(), NOW() " + + "FROM payments WHERE id IN :ids AND status = 'REQUESTED' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); + + int count = entityManager.createNativeQuery( "UPDATE payments SET status = 'FAILED', failure_reason = '배치 복구: PG 호출 누락 (1분 초과)' " + - "WHERE status = 'REQUESTED' AND created_at < NOW() - INTERVAL 1 MINUTE AND deleted_at IS NULL" - ).executeUpdate(); - log.info("[PaymentRecovery] REQUESTED → FAILED: {}건", requestedCount); + "WHERE id IN :ids AND status = 'REQUESTED' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); + + log.info("[PaymentRecovery] REQUESTED → FAILED: {}건", count); + return count; + } + + @SuppressWarnings("unchecked") + private int recoverPending() { + List ids = entityManager.createNativeQuery( + "SELECT id FROM payments WHERE status = 'PENDING' " + + "AND created_at < NOW() - INTERVAL 5 MINUTE AND deleted_at IS NULL" + ).getResultList(); + + if (ids.isEmpty()) return 0; + + List targetIds = ids.stream().map(Number::longValue).toList(); + + entityManager.createNativeQuery( + "INSERT INTO payment_status_history (payment_id, from_status, to_status, reason, detail, created_at, updated_at) " + + "SELECT id, 'PENDING', 'FAILED', 'BATCH_RECOVERY', '배치 복구: 콜백 미수신 (5분 초과)', NOW(), NOW() " + + "FROM payments WHERE id IN :ids AND status = 'PENDING' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); - // 2. PENDING — 5분 이상 경과 → FAILED - int pendingCount = entityManager.createNativeQuery( + int count = entityManager.createNativeQuery( "UPDATE payments SET status = 'FAILED', failure_reason = '배치 복구: 콜백 미수신 (5분 초과)' " + - "WHERE status = 'PENDING' AND created_at < NOW() - INTERVAL 5 MINUTE AND deleted_at IS NULL" - ).executeUpdate(); - log.info("[PaymentRecovery] PENDING(5분+) → FAILED: {}건", pendingCount); + "WHERE id IN :ids AND status = 'PENDING' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); - // 3. UNKNOWN — 일괄 FAILED 처리 (PG 확인 불가 상태) - int unknownCount = entityManager.createNativeQuery( - "UPDATE payments SET status = 'FAILED', failure_reason = '배치 복구: UNKNOWN 타임아웃' " + - "WHERE status = 'UNKNOWN' AND created_at < NOW() - INTERVAL 10 MINUTE AND deleted_at IS NULL" - ).executeUpdate(); - log.info("[PaymentRecovery] UNKNOWN(10분+) → FAILED: {}건", unknownCount); - - // 4. FAILED 전환된 결제건의 재고 복원 (PENDING → FAILED 건만, order_item 기반) - if (pendingCount > 0) { - List failedPayments = entityManager.createNativeQuery( - "SELECT p.order_id FROM payments p " + - "WHERE p.status = 'FAILED' AND p.failure_reason LIKE '%콜백 미수신%' " + - "AND p.updated_at >= NOW() - INTERVAL 1 MINUTE AND p.deleted_at IS NULL" - ).getResultList(); - - for (Object[] row : failedPayments) { - Long orderId = ((Number) row[0]).longValue(); - int restored = entityManager.createNativeQuery( - "UPDATE product p INNER JOIN order_item oi ON p.id = oi.product_id " + - "INNER JOIN orders o ON oi.order_id = o.id " + - "SET p.stock_quantity = p.stock_quantity + oi.quantity " + - "WHERE o.id = :orderId AND p.deleted_at IS NULL" - ).setParameter("orderId", orderId).executeUpdate(); - log.info("[PaymentRecovery] 재고 복원: orderId={}, items={}", orderId, restored); - } + log.info("[PaymentRecovery] PENDING(5분+) → FAILED: {}건", count); + + // FAILED 전환된 결제건의 재고 복원 + if (count > 0) { + restoreStockForFailedPayments(targetIds); } - log.info("[PaymentRecovery] 배치 복구 완료: REQUESTED={}, PENDING={}, UNKNOWN={}", - requestedCount, pendingCount, unknownCount); + return count; + } - return RepeatStatus.FINISHED; + private int recoverUnknown() { + List ids = entityManager.createNativeQuery( + "SELECT id FROM payments WHERE status = 'UNKNOWN' " + + "AND created_at < NOW() - INTERVAL 10 MINUTE AND deleted_at IS NULL" + ).getResultList(); + + if (ids.isEmpty()) return 0; + + List targetIds = ids.stream().map(Number::longValue).toList(); + + entityManager.createNativeQuery( + "INSERT INTO payment_status_history (payment_id, from_status, to_status, reason, detail, created_at, updated_at) " + + "SELECT id, 'UNKNOWN', 'FAILED', 'BATCH_RECOVERY', '배치 복구: UNKNOWN 타임아웃', NOW(), NOW() " + + "FROM payments WHERE id IN :ids AND status = 'UNKNOWN' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); + + int count = entityManager.createNativeQuery( + "UPDATE payments SET status = 'FAILED', failure_reason = '배치 복구: UNKNOWN 타임아웃' " + + "WHERE id IN :ids AND status = 'UNKNOWN' AND deleted_at IS NULL" + ).setParameter("ids", targetIds).executeUpdate(); + + log.info("[PaymentRecovery] UNKNOWN(10분+) → FAILED: {}건", count); + return count; + } + + @SuppressWarnings("unchecked") + private void restoreStockForFailedPayments(List paymentIds) { + List orderIds = entityManager.createNativeQuery( + "SELECT order_id FROM payments WHERE id IN :ids AND deleted_at IS NULL" + ).setParameter("ids", paymentIds).getResultList(); + + for (Number orderIdNum : orderIds) { + Long orderId = orderIdNum.longValue(); + int restored = entityManager.createNativeQuery( + "UPDATE product p INNER JOIN order_item oi ON p.id = oi.product_id " + + "INNER JOIN orders o ON oi.order_id = o.id " + + "SET p.stock_quantity = p.stock_quantity + oi.quantity " + + "WHERE o.id = :orderId AND p.deleted_at IS NULL" + ).setParameter("orderId", orderId).executeUpdate(); + log.info("[PaymentRecovery] 재고 복원: orderId={}, items={}", orderId, restored); + } } } From b9c4b7e8aad37bb93d59c4ebecc75c38365cffaf Mon Sep 17 00:00:00 2001 From: SukheeChoi <95064440+SukheeChoi@users.noreply.github.com> Date: Fri, 27 Mar 2026 18:02:03 +0900 Subject: [PATCH 14/14] =?UTF-8?q?docs:=20Decomposition=20+=20Data=20Manage?= =?UTF-8?q?ment=20=ED=8C=A8=ED=84=B4=20=EB=B6=84=EC=84=9D=20=EB=B8=94?= =?UTF-8?q?=EB=A1=9C=EA=B7=B8=20=EC=B6=94=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - week7-decomposition-analysis.md: 4가지 Decomposition 패턴 점검 + 개선 기록 - week7-data-management-patterns.md: 8가지 Data Management 패턴 점검 + PaymentStatusHistory 도출 과정 --- blog/week7-data-management-patterns.md | 146 +++++++++++++++++++++++++ blog/week7-decomposition-analysis.md | 138 +++++++++++++++++++++++ 2 files changed, 284 insertions(+) create mode 100644 blog/week7-data-management-patterns.md create mode 100644 blog/week7-decomposition-analysis.md diff --git a/blog/week7-data-management-patterns.md b/blog/week7-data-management-patterns.md new file mode 100644 index 000000000..d7ed67667 --- /dev/null +++ b/blog/week7-data-management-patterns.md @@ -0,0 +1,146 @@ +# Data Management 패턴으로 이커머스 결제 시스템 점검하기 + +> microservices.io의 8가지 Data Management 패턴으로 현재 프로젝트를 점검하고, Event Sourcing 경량 적용(PaymentStatusHistory)을 도출한 기록. + +--- + +## 1. 점검에 사용한 8가지 패턴 + +| 패턴 | 핵심 질문 | +|------|----------| +| **Database per Service** | 도메인별로 데이터가 격리되어 있는가? | +| **Saga** | 분산 트랜잭션을 어떻게 관리하는가? | +| **CQRS** | 읽기/쓰기 모델이 분리되어 있는가? | +| **Transactional Outbox** | 메시지 발행의 원자성을 어떻게 보장하는가? | +| **Polling Publisher** | Outbox 메시지를 어떻게 전달하는가? | +| **Event Sourcing** | 상태 변경 이력을 추적할 수 있는가? | +| **API Composition** | 여러 도메인의 데이터를 어떻게 조합하는가? | +| **Domain Event** | 도메인 간 통신에 이벤트를 활용하는가? | + +--- + +## 2. 패턴별 적용 현황 + +| 패턴 | 적용 상태 | 핵심 발견 | +|------|----------|----------| +| Database per Service | 부분 적용 | 배치 대사(reconciliation)에서 cross-domain JOIN 존재 — **의도적 설계** | +| Saga | Orchestration 적용 | PaymentFacade가 오케스트레이터, 5-layer recovery 구조 | +| CQRS | 암묵적 적용 | product_metrics(읽기 모델), Product.like_count(비정규화), Redis 재고 캐시 | +| Transactional Outbox | 이중 적용 | PaymentOutbox(상태 기반, Polling) + EventOutbox(무상태, Debezium CDC) | +| Polling Publisher | Polling + CDC 하이브리드 | OutboxPollerScheduler(5초) + Debezium(CDC) | +| Event Sourcing | **미적용** | 결제 상태 전이 이력 없음 → **개선 대상** | +| API Composition | 적용 | ProductFacade(Product + Brand), OrderFacade(Order + Product + Coupon) | +| Domain Event | 적용 | DomainEventPublisher → Outbox + Spring Event 이중 발행 | + +--- + +## 3. 핵심 발견: 5-layer Recovery의 추적 불가 문제 + +현재 결제 시스템은 5-layer recovery 구조로 상태를 복구한다: + +``` +Layer 1: Outbox Poller (5초) — 미처리 결제 재시도 +Layer 2: Callback DLQ (30초) — 미수신 콜백 재처리 +Layer 3: Payment Polling (10초) — PG 상태 직접 확인 +Layer 4: WAL Recovery (10초) — DB 실패 시 WAL 파일 복구 +Layer 5: Batch Recovery (1/5/10분) — 최종 안전망 +``` + +**문제**: "어떤 경로로 최종 상태에 도달했는가?"를 추적할 수 없다. + +- `PaymentModel.updatedAt`만 기록되고, `from_status` 정보가 유실 +- 콜백으로 PAID가 되었는지, Polling으로 PAID가 되었는지, 배치로 FAILED가 되었는지 알 수 없음 +- 결제 분쟁(dispute) 시 상태 변경 증거가 없음 + +--- + +## 4. 설계 선택: Event Sourcing 전체 vs History 테이블 + +### Event Sourcing 전체를 도입하지 않은 이유 + +| 항목 | Event Sourcing | History 테이블 | +|------|---------------|---------------| +| 상태 결정 방식 | 이벤트 재생으로 현재 상태 구성 | 현재 상태 + INSERT-only 로그 | +| 필요 인프라 | 이벤트 스토어 + 스냅샷 + CQRS 프로젝션 | 기존 DB에 테이블 1개 추가 | +| 복잡도 | 높음 (이벤트 버전닝, 스냅샷 전략) | 낮음 (INSERT만) | +| Payment 핵심 | "최종 상태가 무엇인가?" | 동일 | + +Payment의 핵심은 **"최종 상태가 무엇인가"**이지 "모든 이벤트를 재생해서 상태를 구성"하는 것이 아니다. History 테이블은 Event Sourcing의 감사(audit) 측면만 경량 적용한 것. + +--- + +## 5. 3가지 상태 변경 경로와 기록 전략 + +결제 상태가 변경되는 경로가 3가지 존재한다. 세 경로 모두 기록해야 History가 의미 있다. + +### 경로 1: Entity 메서드 (PaymentFacade) + +```java +// PaymentModel — @Transient 전이 리스트로 자동 추적 +public void markPaid() { + PaymentStatus from = this.status; + validateTransition(PaymentStatus.PAID); + this.status = PaymentStatus.PAID; + pendingTransitions.add(new StatusTransition(from, PaymentStatus.PAID, "PG_RESPONSE", null)); +} +``` + +`markPending → markPaid` 연속 호출 시 두 전이 모두 기록된다 (REQUESTED→PENDING, PENDING→PAID). +`PaymentRepositoryImpl.save()`에서 pendingTransitions를 자동으로 History 테이블에 INSERT. + +### 경로 2: JPQL 조건부 UPDATE (PaymentRecoveryService, WalRecoveryScheduler) + +```java +int affected = paymentRepository.updateStatusConditionally( + payment.getId(), PaymentStatus.PAID, allowedStatuses); +if (affected > 0) { + historyRepository.save(PaymentStatusHistory.create( + payment.getId(), payment.getStatus(), PaymentStatus.PAID, "CALLBACK", null)); +} +``` + +Entity 메서드를 우회하는 JPQL UPDATE이므로, 호출부에서 명시적으로 기록한다. + +### 경로 3: Native SQL (PaymentRecoveryTasklet) + +```java +// 복구 대상 ID 조회 → History INSERT → Status UPDATE 순서 +List ids = ... // SELECT id FROM payments WHERE status = 'REQUESTED' ... +entityManager.createNativeQuery( + "INSERT INTO payment_status_history (...) SELECT id, 'REQUESTED', 'FAILED', 'BATCH_RECOVERY', ... FROM payments WHERE id IN :ids" +).setParameter("ids", ids).executeUpdate(); +``` + +JPA 엔티티를 완전히 우회하는 배치 복구이므로, companion INSERT로 기록한다. + +--- + +## 6. 스킵한 개선점과 근거 + +분석 결과 도출된 다른 개선점들은 의도적으로 스킵했다: + +| 개선점 | 판단 | 근거 | +|--------|------|------| +| Batch cross-domain JOIN 분리 | SKIP | 대사(reconciliation)는 정확도 최우선. `payments JOIN orders JOIN coupon_issue`를 이벤트 기반 검증으로 바꾸면 오히려 정합성 검증 신뢰도가 낮아짐 | +| Payment CQRS 명시적 분리 | SKIP | 결제 조회 트래픽이 상품 조회 대비 미미. 별도 Read Model의 ROI가 낮음 | +| PaymentFacade 분리 (287 lines) | SKIP | 결제 오케스트레이션은 단일 유스케이스. TX-0/TX-1/TX-2 경계를 분리하면 오히려 흐름 파악이 어려워짐 | +| Multi-instance Outbox Poller | SKIP | PG orderId 멱등성이 중복 처리를 이미 방지. 스케일아웃 시 SELECT FOR UPDATE 추가하면 됨 | + +--- + +## 7. 산술 근거 + +- 피크 TPS 5,000 결제 요청 × 평균 2~3회 상태 전이 = 10,000~15,000 History INSERT/초 +- INSERT-only 테이블, 인덱스 1개 (payment_id) → MySQL 8.0 기준 수만 rows/초 처리 가능 +- 디스크: row당 ~100 bytes × 15,000/초 × 86,400초 ≈ **1.3GB/일** → created_at 기준 파티셔닝으로 관리 +- 기존 payments 테이블 쓰기 성능에 미치는 영향: 별도 테이블이므로 기존 UPDATE 쿼리에 추가 부하 없음 + +--- + +## 8. 라이팅 포인트 + +1. **Event Sourcing은 "전부 아니면 전무"가 아니다** — 감사 로그(audit trail)만 필요하면 History 테이블로 충분하다. "이벤트 재생으로 상태를 구성"하는 풀 Event Sourcing은 요구사항이 정당화할 때 도입한다. + +2. **"3가지 경로 모두 커버해야 한다"는 발견이 핵심** — Entity 메서드만 기록하면 JPQL/Native SQL 경로의 전이가 유실된다. 상태 변경 경로를 빠짐없이 파악하는 것이 History 설계의 출발점. + +3. **"스킵한다"도 설계 판단이다** — 8가지 패턴을 점검했지만 실제로 구현한 개선은 1가지. 나머지 4가지를 스킵한 근거를 기록하는 것이 "왜 이렇게 했는가?"에 답하는 것. diff --git a/blog/week7-decomposition-analysis.md b/blog/week7-decomposition-analysis.md new file mode 100644 index 000000000..03f7e560e --- /dev/null +++ b/blog/week7-decomposition-analysis.md @@ -0,0 +1,138 @@ +# Decomposition 패턴으로 이커머스 프로젝트 점검하기 + +> microservices.io의 Decomposition 패턴 4가지로 현재 프로젝트를 점검하고, 도출된 개선점을 실제로 적용한 기록. + +--- + +## 1. 점검에 사용한 4가지 패턴 + +| 패턴 | 핵심 질문 | +|------|----------| +| **Decompose by Business Capability** | 도메인별로 책임이 분리되어 있는가? | +| **Decompose by Subdomain** | Bounded Context 경계가 코드에 반영되어 있는가? | +| **Self-Contained Service** | 각 서비스가 동기 의존 없이 독립 동작 가능한가? | +| **Service per Team** | 팀 단위로 독립 배포·운영이 가능한 구조인가? | + +--- + +## 2. 점검 결과 + +### 잘 되어 있는 부분 + +- **패키지 구조**: `application/{domain}/`, `domain/{domain}/` 패턴으로 Business Capability별 분리 완료. +- **Facade 패턴**: 유스케이스 조율이 Application Layer에서 이루어지고, 도메인 로직은 Entity/VO에 캡슐화. +- **Aggregate 간 ID 참조**: Order → Product, Order → CouponIssue 등 느슨한 결합 유지. + +### 개선이 필요한 부분 + +1. **EventOutbox 보일러플레이트**: LikeFacade, OrderFacade가 각각 `EventOutboxRepository` + `ObjectMapper` + `ApplicationEventPublisher` 3개를 직접 조합. Outbox 저장 + 이벤트 발행 패턴이 중복. +2. **PaymentRecoveryService cross-domain 접근**: 결제 복구 서비스가 `ProductRepository`, `StockReservationRedisRepository`, `CouponIssueRepository`를 직접 사용. Product/Coupon 도메인의 내부 구현에 결합. +3. **Self-Contained Service 관점**: 결제 도메인이 상품 재고의 Redis + DB 이중 쓰기 패턴을 알고 있는 것은 도메인 경계 위반. + +--- + +## 3. 개선 1: DomainEventPublisher 추상화 + +### Before + +```java +// LikeFacade — 3개 인프라 의존 +private final EventOutboxRepository eventOutboxRepository; +private final ApplicationEventPublisher applicationEventPublisher; +private final ObjectMapper objectMapper; + +// Outbox + Event 발행 로직이 Facade에 직접 존재 +EventOutbox outbox = EventOutbox.create("catalog", productId, "LIKE_CREATED", buildPayload(...)); +eventOutboxRepository.save(outbox); +applicationEventPublisher.publishEvent(new LikeCreatedEvent(...)); +``` + +### After + +```java +// LikeFacade — 1개 도메인 인터페이스 의존 +private final DomainEventPublisher domainEventPublisher; + +// 한 줄로 완결 +domainEventPublisher.publish("catalog", productId, "LIKE_CREATED", + Map.of("productId", productId, "memberId", memberId), + new LikeCreatedEvent(productId, memberId)); +``` + +### 설계 포인트 + +- **DomainEventPublisher 인터페이스**는 `domain/event/`에 위치 → Facade가 인프라에 의존하지 않음. +- **DomainEventPublisherImpl**은 `infrastructure/event/`에 위치 → Outbox 저장 + Spring Event 발행을 캡슐화. +- 마이크로서비스 분리 시 구현체만 교체 (Outbox → Kafka 직접 발행)하면 Facade 코드 변경 없음. + +--- + +## 4. 개선 2: PaymentRecoveryService cross-domain 위임 + +### Before + +```java +// PaymentRecoveryService — Product/Coupon 도메인 직접 접근 +private final ProductRepository productRepository; +private final CouponIssueRepository couponIssueRepository; +private final StockReservationRedisRepository stockRedisRepository; + +private void handlePaymentFailure(PaymentModel payment) { + // Redis INCR + DB increaseStock 이중 쓰기를 직접 수행 + stockRedisRepository.increase(item.getProductId(), item.getQuantity()); + productRepository.findById(item.getProductId()).ifPresent(product -> { + product.increaseStock(item.getQuantity()); + productRepository.save(product); + }); + // 쿠폰 내부 상태 직접 조작 + couponIssueRepository.findById(couponIssueId).ifPresent(couponIssue -> { + couponIssue.cancelUse(ZonedDateTime.now()); + }); +} +``` + +### After + +```java +// PaymentRecoveryService — Facade 위임 +private final ProductFacade productFacade; +private final CouponFacade couponFacade; + +private void handlePaymentFailure(PaymentModel payment) { + for (OrderItem item : order.getItems()) { + productFacade.restoreStock(item.getProductId(), item.getQuantity()); + } + if (order.getCouponIssueId() != null) { + couponFacade.restoreCoupon(order.getCouponIssueId()); + } +} +``` + +### 설계 포인트 + +- **재고 복원 로직(Redis + DB)은 ProductFacade가 소유**: Product 도메인의 내부 구현을 외부에 노출하지 않음. +- **쿠폰 복원은 CouponFacade.restoreCoupon()**: 이미 존재하던 메서드를 활용. +- **OrderRepository는 유지**: 주문 상태 조회는 결제 도메인의 직접 관심사 (주문 → 결제 1:1 관계). + +--- + +## 5. Self-Contained Service 관점 + +결제 도메인을 분석하면: + +| 의존 대상 | 유형 | 판단 | +|----------|------|------| +| OrderRepository | 동기 조회 | 결제-주문 1:1이므로 허용 (같은 BC로 분류 가능) | +| ProductFacade.restoreStock() | Facade 호출 | 도메인 경계를 Facade로 격리 → 분리 시 이벤트 기반으로 전환 가능 | +| CouponFacade.restoreCoupon() | Facade 호출 | 동일 | +| PgRouter | 외부 시스템 | Circuit Breaker + Retry로 보호 완료 | + +Self-Contained Service의 핵심은 "동기 의존을 최소화"하는 것이지 "의존을 제거"하는 것이 아니다. 현재 모놀리스 구조에서는 Facade 위임이 적절하며, 마이크로서비스 분리 시 이벤트 기반(Saga)으로 전환하면 된다. + +--- + +## 6. 라이팅 포인트 + +1. **Decomposition 패턴은 마이크로서비스 전용이 아니다** — 모놀리스에서도 도메인 경계를 점검하는 체크리스트로 활용 가능. +2. **"추상화해야 하는가?"의 기준은 변경 가능성** — Outbox → Kafka 전환 시 Facade 코드를 건드려야 한다면, 지금 추상화할 근거가 있다. +3. **cross-domain 접근은 "동작하는가?"가 아니라 "분리 가능한가?"로 판단** — PaymentRecoveryService가 Product 도메인의 Redis 이중 쓰기를 알고 있으면, 재고 전략 변경 시 결제 코드도 수정해야 한다.