diff --git a/apps/commerce-api/build.gradle.kts b/apps/commerce-api/build.gradle.kts index b59f0207d..66bce973e 100644 --- a/apps/commerce-api/build.gradle.kts +++ b/apps/commerce-api/build.gradle.kts @@ -2,6 +2,7 @@ dependencies { // add-ons implementation(project(":modules:jpa")) implementation(project(":modules:redis")) + implementation(project(":modules:kafka")) implementation(project(":supports:jackson")) implementation(project(":supports:logging")) implementation(project(":supports:monitoring")) diff --git a/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java index 275705eca..64def1399 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponFacade.java @@ -3,6 +3,9 @@ import com.loopers.domain.coupon.CouponService; import com.loopers.domain.coupon.CouponTemplate; import com.loopers.domain.coupon.IssuedCoupon; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.outbox.OutboxEventService; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; @@ -22,18 +25,57 @@ public class CouponFacade { private final CouponService couponService; + private final CouponIssueRequestJpaRepository couponIssueRequestRepository; + private final OutboxEventService outboxEventService; - public CouponFacade(CouponService couponService) { + public CouponFacade(CouponService couponService, + CouponIssueRequestJpaRepository couponIssueRequestRepository, + OutboxEventService outboxEventService) { this.couponService = couponService; + this.couponIssueRequestRepository = couponIssueRequestRepository; + this.outboxEventService = outboxEventService; } - /** 쿠폰 발급 */ + /** + * 선착순 쿠폰 발급 요청 (비동기 — Kafka 기반) + * + * 1. 발급 요청 이력을 DB에 PENDING으로 저장 + * 2. Outbox에 이벤트 저장 (같은 TX — 원자성) + * 3. 즉시 202 응답 → 유저가 결과를 폴링 + * 4. Relay → Kafka → CouponIssueConsumer가 실제 발급 + * 5. Consumer가 요청 이력을 ISSUED/FAILED로 업데이트 + */ @Transactional - public IssueCouponResult issueCoupon(Long templateId, Long userId) { - IssuedCoupon issued = couponService.issue(templateId, userId); - return new IssueCouponResult(issued.getId(), issued.getStatus().name()); + public CouponIssueRequestResult requestCouponIssue(Long templateId, Long userId) { + String eventId = java.util.UUID.randomUUID().toString(); + + // 발급 요청 이력 저장 — 폴링 대상 + 추적용 + CouponIssueRequestEntity request = CouponIssueRequestEntity.create(templateId, userId, eventId); + couponIssueRequestRepository.save(request); + + // Outbox 저장 — 같은 TX (비즈니스 + Outbox 원자성) + outboxEventService.save( + "COUPON", templateId, + "CouponIssueRequestedEvent", + new CouponIssueRequestPayload(request.getId(), templateId, userId, eventId), + "coupon-issue-requests-v1", + String.valueOf(templateId) // key=couponTemplateId → 같은 쿠폰은 같은 파티션 + ); + + return new CouponIssueRequestResult(request.getId(), eventId, "PENDING"); } + /** 발급 결과 폴링 */ + @Transactional(readOnly = true) + public CouponIssueRequestResult getCouponIssueResult(Long requestId) { + CouponIssueRequestEntity request = couponIssueRequestRepository.findById(requestId) + .orElseThrow(() -> new IllegalArgumentException("발급 요청을 찾을 수 없습니다: " + requestId)); + return new CouponIssueRequestResult(request.getId(), request.getEventId(), request.getStatus().name()); + } + + public record CouponIssueRequestPayload(Long requestId, Long templateId, Long userId, String eventId) {} + public record CouponIssueRequestResult(Long requestId, String eventId, String status) {} + /** 내 쿠폰 목록 조회 */ @Transactional(readOnly = true) public CouponListResult getMyCoupons(Long userId) { @@ -74,8 +116,6 @@ public AvailableCouponListResult getAvailableCoupons() { return new AvailableCouponListResult(details); } - public record IssueCouponResult(Long issuedCouponId, String status) {} - public record IssuedCouponDetail( Long issuedCouponId, Long couponTemplateId, String couponName, String discountType, diff --git a/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponIssueProcessor.java b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponIssueProcessor.java new file mode 100644 index 000000000..1d1612baa --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/application/coupon/CouponIssueProcessor.java @@ -0,0 +1,137 @@ +package com.loopers.application.coupon; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.coupon.CouponService; +import com.loopers.domain.coupon.IssuedCoupon; +import com.loopers.support.error.CoreException; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.event.EventHandledEntity; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +/** + * 쿠폰 발급 처리 — @Transactional 보장 + * + * Consumer(Interfaces)에서 분리된 비즈니스 처리 Bean. + * → 프록시를 통한 호출 → @Transactional 정상 동작 + * → 쿠폰 발급 + 상태 업데이트 + 멱등성 기록이 같은 TX + * + * 비즈니스 실패(재고 소진, 중복 발급)는 BusinessFailureException으로 래핑: + * → Consumer에서 재시도 없이 즉시 처리 (이미 FAILED 기록 완료) + */ +@Service +public class CouponIssueProcessor { + + private static final Logger log = LoggerFactory.getLogger(CouponIssueProcessor.class); + + private final ObjectMapper objectMapper; + private final CouponService couponService; + private final CouponIssueRequestJpaRepository couponIssueRequestRepository; + private final EventHandledJpaRepository eventHandledRepository; + + public CouponIssueProcessor(ObjectMapper objectMapper, + CouponService couponService, + CouponIssueRequestJpaRepository couponIssueRequestRepository, + EventHandledJpaRepository eventHandledRepository) { + this.objectMapper = objectMapper; + this.couponService = couponService; + this.couponIssueRequestRepository = couponIssueRequestRepository; + this.eventHandledRepository = eventHandledRepository; + } + + /** + * 쿠폰 발급 요청 처리 + * + * @throws BusinessFailureException 비즈니스 실패 (재고 소진 등) — 재시도 불필요 + * @throws RuntimeException 인프라 장애 — ErrorHandler가 DLQ로 격리 + */ + @Transactional + public void process(String payload) { + JsonNode node; + try { + node = objectMapper.readTree(payload); + } catch (Exception e) { + log.error("[CouponProcessor] JSON 파싱 실패 — payload={}", payload, e); + return; + } + + String eventId = node.path("eventId").asText(null); + Long requestId = node.path("requestId").asLong(0); + Long templateId = node.path("templateId").asLong(0); + Long userId = node.path("userId").asLong(0); + + if (eventId == null || requestId == 0) { + log.warn("[CouponProcessor] 필수 필드 누락 — payload={}", payload); + return; + } + + // 멱등성 체크 + if (eventHandledRepository.existsByEventId(eventId)) { + log.warn("[CouponProcessor] 중복 스킵 — eventId={}", eventId); + return; + } + + // 발급 요청 조회 + CouponIssueRequestEntity request = couponIssueRequestRepository.findById(requestId) + .orElse(null); + if (request == null) { + log.error("[CouponProcessor] 발급 요청 없음 — requestId={}", requestId); + return; + } + + // 쿠폰 발급 시도 + try { + IssuedCoupon issued = couponService.issue(templateId, userId); + + request.markIssued(issued.getId()); + couponIssueRequestRepository.save(request); + eventHandledRepository.save(EventHandledEntity.of(eventId, "coupon-issue-requests-v1")); + + log.info("[CouponProcessor] 발급 성공 — templateId={}, userId={}, issuedCouponId={}", + templateId, userId, issued.getId()); + + } catch (CoreException e) { + // 비즈니스 실패 (재고 소진, 발급 한도 초과 등) + // CouponService.issue()가 같은 TX에 참여하므로, CoreException throw 시 + // TX가 rollback-only로 마킹된다. 여기서 markFailed()를 해도 커밋 시 롤백된다. + // → Consumer에서 별도 TX로 FAILED 기록을 위임한다. + throw new BusinessFailureException( + e.getMessage(), e, requestId, eventId); + } + } + + /** + * 비즈니스 실패 시 FAILED 기록 — 별도 TX (REQUIRES_NEW) + * + * process()의 TX가 rollback-only 상태이므로, 새 TX에서 FAILED + event_handled를 저장한다. + */ + @Transactional(propagation = org.springframework.transaction.annotation.Propagation.REQUIRES_NEW) + public void markFailedInNewTx(Long requestId, String eventId, String reason) { + CouponIssueRequestEntity request = couponIssueRequestRepository.findById(requestId) + .orElse(null); + if (request != null) { + request.markFailed(reason); + couponIssueRequestRepository.save(request); + } + eventHandledRepository.save(EventHandledEntity.of(eventId, "coupon-issue-requests-v1")); + } + + public static class BusinessFailureException extends RuntimeException { + private final Long requestId; + private final String eventId; + + public BusinessFailureException(String message, Throwable cause, Long requestId, String eventId) { + super(message, cause); + this.requestId = requestId; + this.eventId = eventId; + } + + public Long getRequestId() { return requestId; } + public String getEventId() { return eventId; } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java index 941587a66..c853389e2 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/like/LikeFacade.java @@ -9,6 +9,8 @@ import com.loopers.domain.like.ProductLike; import com.loopers.domain.product.Product; import com.loopers.domain.product.ProductService; +import com.loopers.domain.common.event.ProductLikedEvent; +import com.loopers.infrastructure.outbox.OutboxEventService; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; @@ -32,34 +34,59 @@ public class LikeFacade { private final ProductService productService; private final BrandService brandService; private final ProductCacheManager productCacheManager; + private final OutboxEventService outboxEventService; public LikeFacade(LikeService likeService, BrandLikeService brandLikeService, ProductService productService, BrandService brandService, - ProductCacheManager productCacheManager) { + ProductCacheManager productCacheManager, + OutboxEventService outboxEventService) { this.likeService = likeService; this.brandLikeService = brandLikeService; this.productService = productService; this.brandService = brandService; this.productCacheManager = productCacheManager; + this.outboxEventService = outboxEventService; } - /** 상품 좋아요 (상품 검증 → 좋아요 생성 → likeCount 증가 → 상세 캐시만 삭제) */ + /** + * 상품 좋아요 (상품 검증 → 좋아요 생성 → Outbox 이벤트 발행 → 상세 캐시 삭제) + * + * products.like_count 직접 증분은 하지 않음: + * Outbox → Kafka → CatalogMetricsProcessor가 + * product_metrics.like_count + products.like_count를 같은 TX에서 업데이트. + * 단일 파이프라인으로 정합성을 보장한다. (eventual consistency) + */ @Transactional public LikeResult likeProduct(Long userId, Long productId) { Product product = productService.getDisplayableProduct(productId); likeService.like(userId, productId); - productService.incrementLikeCount(productId); productCacheManager.registerDetailOnlyEvictAfterCommit(productId); + + // Outbox 저장 — 같은 TX (좋아요 집계 → catalog-events-v1) + outboxEventService.save("PRODUCT", productId, + "ProductLikedEvent", new ProductLikedEvent(userId, productId, true), + "catalog-events-v1", String.valueOf(productId)); + return new LikeResult(product.getLikeCount() + 1); } - /** 상품 좋아요 취소 (상품 존재 검증 → 좋아요 삭제 → likeCount 감소 → 상세 캐시만 삭제) */ + /** + * 상품 좋아요 취소 (상품 존재 검증 → 좋아요 삭제 → Outbox 이벤트 발행 → 상세 캐시 삭제) + * + * products.like_count 직접 감소는 하지 않음: + * CatalogMetricsProcessor가 단일 파이프라인으로 처리. + */ @Transactional public LikeResult unlikeProduct(Long userId, Long productId) { Product product = productService.getById(productId); likeService.unlike(userId, productId); - productService.decrementLikeCount(productId); productCacheManager.registerDetailOnlyEvictAfterCommit(productId); + + // Outbox 저장 — 같은 TX (좋아요 취소 집계 → catalog-events-v1) + outboxEventService.save("PRODUCT", productId, + "ProductUnlikedEvent", new ProductLikedEvent(userId, productId, false), + "catalog-events-v1", String.valueOf(productId)); + return new LikeResult(product.getLikeCount() - 1); } diff --git a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java index 9caf4b7cd..6dfac696c 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/order/OrderFacade.java @@ -26,6 +26,7 @@ import com.loopers.support.error.PointErrorType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.loopers.infrastructure.outbox.OutboxEventService; import org.springframework.stereotype.Component; import org.springframework.transaction.PlatformTransactionManager; import org.springframework.transaction.annotation.Transactional; @@ -66,6 +67,7 @@ public class OrderFacade { private final PaymentFacade paymentFacade; private final TransactionTemplate txTemplate; private final OrderCacheManager orderCacheManager; + private final OutboxEventService outboxEventService; public OrderFacade(OrderService orderService, UserAddressService userAddressService, ProductService productService, BrandService brandService, @@ -73,7 +75,8 @@ public OrderFacade(OrderService orderService, UserAddressService userAddressServ CouponService couponService, PointService pointService, PaymentFacade paymentFacade, PlatformTransactionManager txManager, - OrderCacheManager orderCacheManager) { + OrderCacheManager orderCacheManager, + OutboxEventService outboxEventService) { this.orderService = orderService; this.userAddressService = userAddressService; this.productService = productService; @@ -83,6 +86,7 @@ public OrderFacade(OrderService orderService, UserAddressService userAddressServ this.couponService = couponService; this.pointService = pointService; this.paymentFacade = paymentFacade; + this.outboxEventService = outboxEventService; this.txTemplate = new TransactionTemplate(txManager); this.txTemplate.setTimeout(30); this.orderCacheManager = orderCacheManager; diff --git a/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentFacade.java index 0e02a253b..2d566185f 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/payment/PaymentFacade.java @@ -27,6 +27,7 @@ import com.loopers.support.error.OrderErrorType; import com.loopers.support.error.PaymentErrorType; import com.loopers.support.error.PointErrorType; +import com.loopers.infrastructure.outbox.OutboxEventService; import io.github.resilience4j.bulkhead.BulkheadFullException; import io.github.resilience4j.bulkhead.annotation.Bulkhead; import io.github.resilience4j.circuitbreaker.CallNotPermittedException; @@ -63,6 +64,7 @@ public class PaymentFacade { private final CouponService couponService; private final ProductService productService; private final OrderCacheManager orderCacheManager; + private final OutboxEventService outboxEventService; private final CompensationDlqRepository compensationDlqRepository; private final org.springframework.transaction.support.TransactionTemplate txTemplate; @@ -71,6 +73,7 @@ public PaymentFacade(OrderService orderService, PaymentService paymentService, InventoryService inventoryService, PointService pointService, CouponService couponService, ProductService productService, OrderCacheManager orderCacheManager, + OutboxEventService outboxEventService, CompensationDlqRepository compensationDlqRepository, org.springframework.transaction.PlatformTransactionManager txManager) { this.orderService = orderService; @@ -80,6 +83,7 @@ public PaymentFacade(OrderService orderService, PaymentService paymentService, this.couponService = couponService; this.productService = productService; this.orderCacheManager = orderCacheManager; + this.outboxEventService = outboxEventService; this.compensationDlqRepository = compensationDlqRepository; this.txTemplate = new org.springframework.transaction.support.TransactionTemplate(txManager); this.txTemplate.setTimeout(30); @@ -160,6 +164,7 @@ public PaymentRequestResult requestPayment(Long orderId, Long userId, String pay Payment payment = paymentService.create( orderId, order.getTotalAmount(), paymentMethod, generateIdempotencyKey()); + Map productQtyMap = order.getItems().stream() .collect(Collectors.toMap(OrderItem::getProductId, OrderItem::getQuantity)); @@ -288,6 +293,15 @@ public void confirmPayment(Long orderId, String pgTxnId) { orderService.confirm(orderId, payment.getId(), payment.getPaymentMethod()); pointService.earn(order.getUserId(), order.getTotalAmount()); + // Outbox 저장 — 같은 TX (판매량 집계 → catalog-events-v1) + for (var entry : productQtyMap.entrySet()) { + outboxEventService.save("PRODUCT", entry.getKey(), + "OrderItemSoldEvent", + new com.loopers.domain.common.event.OrderItemSoldEvent( + orderId, Map.of(entry.getKey(), entry.getValue())), + "catalog-events-v1", String.valueOf(entry.getKey())); + } + orderCacheManager.registerEvictAfterCommit(order.getUserId()); }); } diff --git a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductEventListener.java b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductEventListener.java new file mode 100644 index 000000000..64295ff46 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductEventListener.java @@ -0,0 +1,72 @@ +package com.loopers.application.product; + +import com.loopers.domain.common.event.ProductViewedEvent; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.event.EventListener; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.scheduling.annotation.Async; +import org.springframework.stereotype.Component; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.nio.charset.StandardCharsets; + +/** + * 상품 조회 이벤트 리스너 — Kafka fire-and-forget 직접 발행 + * + * 상품 조회(ProductFacade.getProductDetail)는 TX가 없으므로 + * @TransactionalEventListener 대신 @EventListener를 사용한다. + * + * Outbox를 사용하지 않는 이유: + * - 조회에는 비즈니스 TX가 없음 → "같은 TX에 저장" 불가 + * - 조회 수 유실은 서비스 정합성에 영향 없음 (At-Most-Once 충분) + * - 매 요청마다 쓰기 TX를 여는 것은 커넥션 풀 낭비 + * + * Kafka fire-and-forget: + * - kafkaTemplate.send() 호출 후 결과 대기 없음 + * - 발행 실패 시 유실 허용 (조회 수는 보정 배치로 보완 가능) + */ +@Component +public class ProductEventListener { + + private static final Logger log = LoggerFactory.getLogger(ProductEventListener.class); + + private final KafkaTemplate kafkaTemplate; + private final ObjectMapper objectMapper; + + public ProductEventListener(KafkaTemplate kafkaTemplate, + ObjectMapper objectMapper) { + this.kafkaTemplate = kafkaTemplate; + this.objectMapper = objectMapper; + } + + @Async + @EventListener + public void handleProductViewed(ProductViewedEvent event) { + try { + String payload = objectMapper.writeValueAsString(event); + String partitionKey = String.valueOf(event.productId()); + + ProducerRecord record = new ProducerRecord<>( + "catalog-events-v1", null, partitionKey, payload); + record.headers() + .add(new RecordHeader("X-Event-Type", + "ProductViewedEvent".getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Aggregate-Type", + "PRODUCT".getBytes(StandardCharsets.UTF_8))); + + // fire-and-forget — 결과 대기 없음, 유실 허용 + kafkaTemplate.send(record); + + log.debug("[ProductEventListener] 조회 이벤트 발행 — productId={}", event.productId()); + + } catch (Exception e) { + // 발행 실패해도 비즈니스에 영향 없음 — 로깅만 + log.warn("[ProductEventListener] 조회 이벤트 발행 실패 — productId={}, error={}", + event.productId(), e.getMessage()); + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java index 7557eb84d..94d474b98 100644 --- a/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java +++ b/apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java @@ -9,6 +9,8 @@ import com.loopers.domain.product.ProductCursor; import com.loopers.domain.product.ProductService; import com.loopers.domain.product.ProductSortType; +import com.loopers.domain.common.event.ProductViewedEvent; +import org.springframework.context.ApplicationEventPublisher; import org.springframework.stereotype.Component; import java.util.HashMap; @@ -35,12 +37,15 @@ public class ProductFacade { private final ProductService productService; private final BrandService brandService; private final ProductCacheManager productCacheManager; + private final ApplicationEventPublisher eventPublisher; public ProductFacade(ProductService productService, BrandService brandService, - ProductCacheManager productCacheManager) { + ProductCacheManager productCacheManager, + ApplicationEventPublisher eventPublisher) { this.productService = productService; this.brandService = brandService; this.productCacheManager = productCacheManager; + this.eventPublisher = eventPublisher; } /** @@ -58,6 +63,11 @@ public ProductDetailResult getProductDetail(Long productId) { ProductDetailResult result = new ProductDetailResult(ProductInfo.from(product), BrandInfo.from(brand)); productCacheManager.putProductDetail(productId, result); + + // 상품 조회 이벤트 발행 — product_metrics 조회 수 집계 + 유저 행동 로깅 (추후 Kafka 전환) + // userId는 현재 컨텍스트에서 가져올 수 없으므로 null 허용 (비로그인 조회) + eventPublisher.publishEvent(new ProductViewedEvent(null, productId)); + return result; } diff --git a/apps/commerce-api/src/main/java/com/loopers/config/AsyncConfig.java b/apps/commerce-api/src/main/java/com/loopers/config/AsyncConfig.java new file mode 100644 index 000000000..8f007fe28 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/config/AsyncConfig.java @@ -0,0 +1,17 @@ +package com.loopers.config; + +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.EnableAsync; +import org.springframework.scheduling.annotation.EnableScheduling; + +/** + * 비동기 + 스케줄링 설정 + * + * @EnableAsync: @Async 이벤트 리스너용 + * @EnableScheduling: Outbox Relay Polling 스케줄러용 (@Scheduled) + */ +@Configuration +@EnableAsync +@EnableScheduling +public class AsyncConfig { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/config/KafkaTopicConfig.java b/apps/commerce-api/src/main/java/com/loopers/config/KafkaTopicConfig.java new file mode 100644 index 000000000..09805a94f --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/config/KafkaTopicConfig.java @@ -0,0 +1,103 @@ +package com.loopers.config; + +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.config.TopicConfig; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.config.TopicBuilder; + +/** + * Kafka Topic 설계 — 환경별 설정 외부화 + * + * 토픽 분리 기준: + * 1. 발행 방식이 다르면 토픽 분리 (Outbox vs 직접 발행) + * 2. Consumer 처리 로직이 다르면 토픽 분리 + * 3. 운영 모니터링 기준이 다르면 토픽 분리 + * + * 환경별 설정: + * - application-local.yml: replicas=1, min-insync=1 (단일 브로커) + * - application-prd.yml: replicas=3, min-insync=2 (3-브로커 클러스터) + */ +@Configuration +@EnableConfigurationProperties(TopicProperties.class) +public class KafkaTopicConfig { + + private final TopicProperties topicProperties; + + public KafkaTopicConfig(TopicProperties topicProperties) { + this.topicProperties = topicProperties; + } + + /** + * 상품 카탈로그 이벤트 — 좋아요/조회/판매량 집계 + * key=productId → 같은 상품의 이벤트를 순차 처리 → product_metrics upsert + */ + @Bean + public NewTopic catalogEventsTopic() { + var config = topicProperties.catalogEvents(); + return TopicBuilder.name(config.name()) + .partitions(config.partitions()) + .replicas(config.replicas()) + .config(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, String.valueOf(config.minInsyncReplicas())) + .build(); + } + + /** + * 주문 이벤트 — 주문 확정 후 포인트 적립 + * key=orderId → 같은 주문의 이벤트를 순차 처리 + */ + @Bean + public NewTopic orderEventsTopic() { + var config = topicProperties.orderEvents(); + return TopicBuilder.name(config.name()) + .partitions(config.partitions()) + .replicas(config.replicas()) + .config(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, String.valueOf(config.minInsyncReplicas())) + .build(); + } + + /** + * 쿠폰 발급 요청 (Command) — 선착순 쿠폰 발급 + * key=couponId → 같은 쿠폰의 요청을 순차 처리 → Lock 없이 동시성 제어 + */ + @Bean + public NewTopic couponIssueRequestsTopic() { + var config = topicProperties.couponIssueRequests(); + return TopicBuilder.name(config.name()) + .partitions(config.partitions()) + .replicas(config.replicas()) + .config(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, String.valueOf(config.minInsyncReplicas())) + .build(); + } + + /** + * 유저 행동 로깅 — 조회/클릭/좋아요/주문 + * key=userId → 유저별 행동 시간순 추적 + * Outbox 없이 직접 발행 (유실 허용) + */ + @Bean + public NewTopic userActivityEventsTopic() { + var config = topicProperties.userActivityEvents(); + return TopicBuilder.name(config.name()) + .partitions(config.partitions()) + .replicas(config.replicas()) + .config(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, String.valueOf(config.minInsyncReplicas())) + .config(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(config.retentionMs())) + .build(); + } + + /** + * DLQ — 처리 실패 메시지 격리 + */ + @Bean + public NewTopic dlqTopic() { + var config = topicProperties.pipelineDlq(); + return TopicBuilder.name(config.name()) + .partitions(config.partitions()) + .replicas(config.replicas()) + .config(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, String.valueOf(config.minInsyncReplicas())) + .config(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(config.retentionMs())) + .build(); + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/config/TopicProperties.java b/apps/commerce-api/src/main/java/com/loopers/config/TopicProperties.java new file mode 100644 index 000000000..7bc50bd37 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/config/TopicProperties.java @@ -0,0 +1,46 @@ +package com.loopers.config; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +/** + * Kafka 토픽 설정 바인딩 + * + * 환경별 토픽 설정을 외부화: + * - application-local.yml: replicas=1, min-insync=1 + * - application-prd.yml: replicas=3, min-insync=2 + * + * 애플리케이션 코드가 인프라 관심사(브로커 클러스터 구성)를 알 필요 없음 + */ +@ConfigurationProperties(prefix = "kafka.topic") +public record TopicProperties( + TopicConfig catalogEvents, + TopicConfig orderEvents, + TopicConfig couponIssueRequests, + TopicConfig userActivityEvents, + TopicConfig pipelineDlq +) { + public record TopicConfig( + String name, + int partitions, + int replicas, + int minInsyncReplicas, + Long retentionMs + ) { + public TopicConfig { + if (name == null || name.isBlank()) { + throw new IllegalArgumentException("Topic name must not be blank"); + } + if (partitions <= 0) { + throw new IllegalArgumentException("Partitions must be positive"); + } + if (replicas <= 0) { + throw new IllegalArgumentException("Replicas must be positive"); + } + if (minInsyncReplicas <= 0 || minInsyncReplicas > replicas) { + throw new IllegalArgumentException( + "min-insync-replicas must be positive and <= replicas" + ); + } + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderConfirmedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderConfirmedEvent.java new file mode 100644 index 000000000..d22c44cec --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderConfirmedEvent.java @@ -0,0 +1,21 @@ +package com.loopers.domain.common.event; + +/** + * 주문 확정 이벤트 — "주문이 결제 완료되어 확정됐다" + * + * 발행 시점: TX2 커밋 이후 (AFTER_COMMIT) + * 소비자: 포인트 적립 (현재 ApplicationEvent, 추후 Kafka 전환) + * + * 같은 TX에 묶인 것들 (이벤트로 분리 ❌): + * 결제 승인, 재고 커밋, 주문 상태 PAID — TX2에서 원자적 처리 + * + * 이벤트로 분리한 것 (✅): + * 포인트 적립 — 실패해도 CS 보정 가능, 금전 가치이므로 추후 Kafka + */ +public record OrderConfirmedEvent( + Long orderId, + Long userId, + int totalAmount, + Long paymentId +) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderItemSoldEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderItemSoldEvent.java new file mode 100644 index 000000000..2d7b7bef3 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/OrderItemSoldEvent.java @@ -0,0 +1,17 @@ +package com.loopers.domain.common.event; + +import java.util.Map; + +/** + * 상품 판매 이벤트 — "주문 확정으로 상품이 판매됐다" + * + * 발행 시점: TX2 커밋 이후 (AFTER_COMMIT) + * 소비자: product_metrics 집계 (판매량 upsert) + * + * productQtyMap: productId → 판매 수량 + */ +public record OrderItemSoldEvent( + Long orderId, + Map productQtyMap +) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductLikedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductLikedEvent.java new file mode 100644 index 000000000..1054246ee --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductLikedEvent.java @@ -0,0 +1,21 @@ +package com.loopers.domain.common.event; + +/** + * 상품 좋아요 이벤트 — "유저가 상품을 좋아요했다" + * + * 발행 시점: 좋아요 TX 커밋 이후 (AFTER_COMMIT) + * 소비자: product_metrics 집계 (현재 ApplicationEvent, 추후 Kafka 전환) + * + * 같은 TX에 묶인 것 (이벤트로 분리 ❌): + * 좋아요 저장 (ProductLike) + 좋아요 수 증가 (atomic UPDATE) + * → 좋아요 수가 즉시 반영돼야 하므로 같은 TX + * + * 이벤트로 분리한 것 (✅): + * product_metrics 집계 — eventual consistency 허용 + */ +public record ProductLikedEvent( + Long userId, + Long productId, + boolean liked // true=좋아요, false=좋아요 취소 +) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductViewedEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductViewedEvent.java new file mode 100644 index 000000000..ff9a291cd --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/ProductViewedEvent.java @@ -0,0 +1,15 @@ +package com.loopers.domain.common.event; + +/** + * 상품 조회 이벤트 — "유저가 상품 상세를 조회했다" + * + * 발행 시점: 상품 조회 API 호출 시 (TX 없이 발행 가능) + * 소비자: + * - product_metrics 집계 (조회 수 upsert) + * - 유저 행동 로깅 (분석용) + */ +public record ProductViewedEvent( + Long userId, + Long productId +) { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/domain/common/event/UserActivityEvent.java b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/UserActivityEvent.java new file mode 100644 index 000000000..e27e2e69a --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/domain/common/event/UserActivityEvent.java @@ -0,0 +1,36 @@ +package com.loopers.domain.common.event; + +import java.time.LocalDateTime; + +/** + * 유저 행동 이벤트 — "유저가 어떤 행동을 했다" + * + * 발행 시점: 행동 발생 즉시 + * 소비자: 유저 행동 로깅 (분석 저장소 append) + * + * 유실 허용 — 서비스 정합성에 영향 없음. + * 추후 Kafka로 전환 시 Outbox 없이 직접 발행. + */ +public record UserActivityEvent( + Long userId, + String activityType, // VIEW, CLICK, LIKE, ORDER, PAYMENT + String targetType, // PRODUCT, ORDER, COUPON + Long targetId, + LocalDateTime occurredAt +) { + public static UserActivityEvent view(Long userId, Long productId) { + return new UserActivityEvent(userId, "VIEW", "PRODUCT", productId, LocalDateTime.now()); + } + + public static UserActivityEvent like(Long userId, Long productId) { + return new UserActivityEvent(userId, "LIKE", "PRODUCT", productId, LocalDateTime.now()); + } + + public static UserActivityEvent order(Long userId, Long orderId) { + return new UserActivityEvent(userId, "ORDER", "ORDER", orderId, LocalDateTime.now()); + } + + public static UserActivityEvent payment(Long userId, Long orderId) { + return new UserActivityEvent(userId, "PAYMENT", "ORDER", orderId, LocalDateTime.now()); + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestEntity.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestEntity.java new file mode 100644 index 000000000..d0b0a0435 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestEntity.java @@ -0,0 +1,97 @@ +package com.loopers.infrastructure.coupon; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.EnumType; +import jakarta.persistence.Enumerated; +import jakarta.persistence.GeneratedValue; +import jakarta.persistence.GenerationType; +import jakarta.persistence.Id; +import jakarta.persistence.Index; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * 쿠폰 발급 요청 이력 — 비동기 발급의 추적/폴링용 + * + * API가 요청을 받으면 PENDING으로 생성하고 즉시 202 응답. + * Consumer가 처리 완료 후 ISSUED 또는 FAILED로 업데이트. + * 유저가 이 테이블을 폴링해서 발급 결과를 확인한다. + */ +@Entity +@Table(name = "coupon_issue_requests", indexes = { + @Index(name = "idx_coupon_issue_req_user_template", columnList = "user_id, coupon_template_id"), + @Index(name = "idx_coupon_issue_req_event_id", columnList = "event_id", unique = true) +}) +public class CouponIssueRequestEntity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "coupon_template_id", nullable = false) + private Long couponTemplateId; + + @Column(name = "user_id", nullable = false) + private Long userId; + + @Column(name = "event_id", nullable = false, unique = true, length = 100) + private String eventId; + + @Enumerated(EnumType.STRING) + @Column(nullable = false, length = 20) + private CouponIssueRequestStatus status; + + @Column(name = "issued_coupon_id") + private Long issuedCouponId; + + @Column(name = "failure_reason", length = 200) + private String failureReason; + + @Column(name = "requested_at", nullable = false) + private ZonedDateTime requestedAt; + + @Column(name = "processed_at") + private ZonedDateTime processedAt; + + protected CouponIssueRequestEntity() {} + + public static CouponIssueRequestEntity create(Long couponTemplateId, Long userId, String eventId) { + CouponIssueRequestEntity entity = new CouponIssueRequestEntity(); + entity.couponTemplateId = couponTemplateId; + entity.userId = userId; + entity.eventId = eventId; + entity.status = CouponIssueRequestStatus.PENDING; + entity.requestedAt = ZonedDateTime.now(); + return entity; + } + + public void markIssued(Long issuedCouponId) { + if (this.status != CouponIssueRequestStatus.PENDING) { + return; + } + this.status = CouponIssueRequestStatus.ISSUED; + this.issuedCouponId = issuedCouponId; + this.processedAt = ZonedDateTime.now(); + } + + public void markFailed(String reason) { + if (this.status != CouponIssueRequestStatus.PENDING) { + return; + } + this.status = CouponIssueRequestStatus.FAILED; + this.failureReason = reason; + this.processedAt = ZonedDateTime.now(); + } + + public Long getId() { return id; } + public Long getCouponTemplateId() { return couponTemplateId; } + public Long getUserId() { return userId; } + public String getEventId() { return eventId; } + public CouponIssueRequestStatus getStatus() { return status; } + public Long getIssuedCouponId() { return issuedCouponId; } + public String getFailureReason() { return failureReason; } + public ZonedDateTime getRequestedAt() { return requestedAt; } + public ZonedDateTime getProcessedAt() { return processedAt; } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java new file mode 100644 index 000000000..c23c36c08 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestJpaRepository.java @@ -0,0 +1,13 @@ +package com.loopers.infrastructure.coupon; + +import org.springframework.data.jpa.repository.JpaRepository; + +import java.util.Optional; + +public interface CouponIssueRequestJpaRepository extends JpaRepository { + + Optional findByEventId(String eventId); + + Optional findByUserIdAndCouponTemplateIdAndStatus( + Long userId, Long couponTemplateId, CouponIssueRequestStatus status); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestStatus.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestStatus.java new file mode 100644 index 000000000..be858b00a --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/coupon/CouponIssueRequestStatus.java @@ -0,0 +1,7 @@ +package com.loopers.infrastructure.coupon; + +public enum CouponIssueRequestStatus { + PENDING, // 요청 접수, Kafka 발행 대기 + ISSUED, // 발급 완료 + FAILED // 발급 실패 (재고 소진, 중복 발급 등) +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java new file mode 100644 index 000000000..cc5898c1c --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java @@ -0,0 +1,87 @@ +package com.loopers.infrastructure.dlq; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.support.SendResult; +import org.springframework.stereotype.Component; + +import java.nio.charset.StandardCharsets; +import java.time.ZonedDateTime; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * DLQ Publisher — 처리 실패한 메시지를 DLQ 토픽으로 격리 (commerce-api용) + * + * 동기 전송 (.get()): + * DLQ 전송도 실패하면 메시지가 영원히 유실된다. + * 비동기(fire-and-forget)로 하면 DLQ 전송 실패를 감지 못 함. + * 금전 가치 이벤트(쿠폰)는 DLQ 전송도 반드시 성공해야 함. + * + * 헤더: + * X-Original-Topic, X-Original-Partition, X-Original-Offset + * X-Error-Message, X-Error-Timestamp, X-Retry-Count + */ +@Component +public class DlqPublisher { + + private static final Logger log = LoggerFactory.getLogger(DlqPublisher.class); + private static final String DLQ_TOPIC = "pipeline-dlq-v1"; + + private final KafkaTemplate kafkaTemplate; + + public DlqPublisher(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + public void sendToDlq(ConsumerRecord record, Exception exception) { + sendToDlq(record, exception, 0); + } + + public void sendToDlq(ConsumerRecord record, Exception exception, int retryCount) { + try { + String errorMsg = exception.getMessage() != null + ? exception.getMessage() + : exception.getClass().getSimpleName(); + + ProducerRecord dlqRecord = new ProducerRecord<>( + DLQ_TOPIC, null, record.key(), record.value()); + + dlqRecord.headers() + .add(new RecordHeader("X-Original-Topic", + record.topic().getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Original-Partition", + String.valueOf(record.partition()).getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Original-Offset", + String.valueOf(record.offset()).getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Error-Message", + errorMsg.getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Error-Timestamp", + ZonedDateTime.now().toString().getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Retry-Count", + String.valueOf(retryCount).getBytes(StandardCharsets.UTF_8))); + + // 동기 전송 — DLQ 유실 방지 + SendResult result = kafkaTemplate.send(dlqRecord).get(10, TimeUnit.SECONDS); + + var metadata = result.getRecordMetadata(); + log.warn("[DLQ] 메시지 격리 완료 — dlqPartition={}, dlqOffset={}, " + + "originalTopic={}, originalPartition={}, originalOffset={}, retryCount={}, error={}", + metadata.partition(), metadata.offset(), + record.topic(), record.partition(), record.offset(), retryCount, errorMsg); + + } catch (ExecutionException | TimeoutException e) { + log.error("[DLQ] DLQ 전송 실패! 메시지 유실 위험 — topic={}, partition={}, offset={}, error={}", + record.topic(), record.partition(), record.offset(), e.getMessage(), e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.error("[DLQ] DLQ 전송 중단 — topic={}, partition={}, offset={}", + record.topic(), record.partition(), record.offset()); + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqReprocessingService.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqReprocessingService.java new file mode 100644 index 000000000..62ccb2713 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/dlq/DlqReprocessingService.java @@ -0,0 +1,76 @@ +package com.loopers.infrastructure.dlq; + +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * DLQ 재처리 서비스 + * + * DLQ에 격리된 메시지를 원본 토픽으로 재발행하는 프로세스: + * 1. event_handled에서 해당 eventId 삭제 (멱등성 레코드 초기화) + * 2. 원본 토픽으로 메시지 재발행 (동기) + * 3. 기존 Consumer가 정상 파이프라인으로 재처리 + * + * 실무 패턴: Alen 멘토 — "eventId 기반 event_handled 레코드 삭제 → 원본 토픽 재발행" + */ +@Service +public class DlqReprocessingService { + + private static final Logger log = LoggerFactory.getLogger(DlqReprocessingService.class); + + private final EventHandledJpaRepository eventHandledRepository; + private final KafkaTemplate kafkaTemplate; + + public DlqReprocessingService(EventHandledJpaRepository eventHandledRepository, + KafkaTemplate kafkaTemplate) { + this.eventHandledRepository = eventHandledRepository; + this.kafkaTemplate = kafkaTemplate; + } + + /** + * 멱등성 레코드 삭제 + 원본 토픽 재발행 + */ + @Transactional + public void reprocess(String eventId, String originalTopic, String partitionKey, String payload) { + // 1. 멱등성 레코드 삭제 + deleteEventHandled(eventId); + + // 2. 원본 토픽으로 재발행 (동기 — 재발행 실패 시 예외) + try { + ProducerRecord record = new ProducerRecord<>( + originalTopic, null, partitionKey, payload); + + kafkaTemplate.send(record).get(10, TimeUnit.SECONDS); + + log.info("[DlqReprocess] 재발행 완료 — eventId={}, topic={}, key={}", + eventId, originalTopic, partitionKey); + + } catch (ExecutionException | TimeoutException e) { + log.error("[DlqReprocess] 재발행 실패 — eventId={}, topic={}, error={}", + eventId, originalTopic, e.getMessage()); + throw new RuntimeException("DLQ 재발행 실패: " + e.getMessage(), e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("DLQ 재발행 중단", e); + } + } + + /** + * 멱등성 레코드만 삭제 (재발행은 별도) + * + * @return true = 삭제됨, false = 레코드 없었음 + */ + @Transactional + public boolean deleteEventHandled(String eventId) { + return eventHandledRepository.deleteByEventId(eventId) > 0; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java new file mode 100644 index 000000000..8f299211b --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java @@ -0,0 +1,57 @@ +package com.loopers.infrastructure.event; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.GeneratedValue; +import jakarta.persistence.GenerationType; +import jakarta.persistence.Id; +import jakarta.persistence.Index; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * 멱등성 처리 테이블 — 이미 처리한 이벤트를 기록 (인프라 전용) + * + * Consumer가 메시지 처리 시: + * 1. event_id로 조회 → 이미 있으면 중복 스킵 + * 2. 비즈니스 로직 + INSERT를 같은 TX → 롤백 시 재처리 가능 + * + * UNIQUE 제약 조건으로 동시 처리 방어 (DB 레벨 동시성 제어) + */ +@Entity +@Table(name = "event_handled", indexes = { + @Index(name = "idx_event_handled_event_id", columnList = "event_id", unique = true), + @Index(name = "idx_event_handled_topic_handled", columnList = "topic, handled_at") +}) +public class EventHandledEntity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "event_id", nullable = false, unique = true, length = 100) + private String eventId; + + @Column(nullable = false, length = 100) + private String topic; + + @Column(name = "handled_at", nullable = false) + private ZonedDateTime handledAt; + + protected EventHandledEntity() { + } + + public static EventHandledEntity of(String eventId, String topic) { + EventHandledEntity entity = new EventHandledEntity(); + entity.eventId = eventId; + entity.topic = topic; + entity.handledAt = ZonedDateTime.now(); + return entity; + } + + public Long getId() { return id; } + public String getEventId() { return eventId; } + public String getTopic() { return topic; } + public ZonedDateTime getHandledAt() { return handledAt; } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java new file mode 100644 index 000000000..0cc1b4966 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java @@ -0,0 +1,10 @@ +package com.loopers.infrastructure.event; + +import org.springframework.data.jpa.repository.JpaRepository; + +public interface EventHandledJpaRepository extends JpaRepository { + + boolean existsByEventId(String eventId); + + int deleteByEventId(String eventId); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/metrics/MetricsReconciliationScheduler.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/metrics/MetricsReconciliationScheduler.java new file mode 100644 index 000000000..b4f48272b --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/metrics/MetricsReconciliationScheduler.java @@ -0,0 +1,92 @@ +package com.loopers.infrastructure.metrics; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +/** + * product_metrics 보정 스케줄러 + * + * 이벤트 기반 집계(CatalogMetricsProcessor)는 이벤트 유실/중복 시 + * product_metrics 값이 실제 DB 원본과 drift할 수 있다. + * + * DB 원본(product_likes, order_items)을 기준으로 + * product_metrics와 products.like_count를 주기적으로 보정한다. + * + * 보정 주기: 매일 00시 + * 보정 방식: DB 원본 COUNT → UPDATE + * + * 주의: + * 이벤트가 아직 미도착 상태에서 보정하면 delta가 오히려 틀어질 수 있음. + * 따라서 하루 1회 수준이 적절. 긴급 시 수동 보정 API 별도 제공 가능. + */ +@Component +public class MetricsReconciliationScheduler { + + private static final Logger log = LoggerFactory.getLogger(MetricsReconciliationScheduler.class); + + private final JdbcTemplate jdbcTemplate; + + public MetricsReconciliationScheduler(JdbcTemplate jdbcTemplate) { + this.jdbcTemplate = jdbcTemplate; + } + + /** + * like_count 보정 — product_likes 테이블 기준 + * + * product_metrics.like_count + products.like_count 모두 보정. + */ + @Scheduled(cron = "0 0 0 * * *") + public void reconcileLikeCount() { + log.info("[MetricsReconciliation] like_count 보정 시작"); + + int metricsUpdated = jdbcTemplate.update(""" + UPDATE product_metrics pm + SET pm.like_count = ( + SELECT COUNT(*) + FROM product_likes pl + WHERE pl.product_id = pm.product_id + AND pl.deleted_at IS NULL + ), + pm.updated_at = NOW() + """); + + int productsUpdated = jdbcTemplate.update(""" + UPDATE products p + SET p.like_count = ( + SELECT COUNT(*) + FROM product_likes pl + WHERE pl.product_id = p.id + AND pl.deleted_at IS NULL + ), + p.updated_at = NOW() + """); + + log.info("[MetricsReconciliation] like_count 보정 완료 — metrics={}건, products={}건", + metricsUpdated, productsUpdated); + } + + /** + * sales_count 보정 — order_items 테이블 기준 (확정 주문만) + */ + @Scheduled(cron = "0 10 0 * * *") + public void reconcileSalesCount() { + log.info("[MetricsReconciliation] sales_count 보정 시작"); + + int updated = jdbcTemplate.update(""" + UPDATE product_metrics pm + SET pm.sales_count = ( + SELECT COALESCE(SUM(oi.quantity), 0) + FROM order_items oi + JOIN orders o ON o.id = oi.order_id + WHERE oi.product_id = pm.product_id + AND o.status = 'CONFIRMED' + ), + pm.updated_at = NOW() + """); + + log.info("[MetricsReconciliation] sales_count 보정 완료 — {}건", updated); + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventEntity.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventEntity.java new file mode 100644 index 000000000..5bae81350 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventEntity.java @@ -0,0 +1,144 @@ +package com.loopers.infrastructure.outbox; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.EnumType; +import jakarta.persistence.Enumerated; +import jakarta.persistence.GeneratedValue; +import jakarta.persistence.GenerationType; +import jakarta.persistence.Id; +import jakarta.persistence.Index; +import jakarta.persistence.PrePersist; +import jakarta.persistence.PreUpdate; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * Outbox 이벤트 Entity (인프라 전용 — 도메인 객체 아님) + * + * 비즈니스 TX 안에서 이벤트를 저장하고, + * Relay가 주기적으로 PENDING 이벤트를 Kafka로 발행한다. + */ +@Entity +@Table(name = "outbox_event", indexes = { + @Index(name = "idx_outbox_status_created", columnList = "status, created_at"), + @Index(name = "idx_outbox_status_updated", columnList = "status, updated_at"), + // PENDING 조회 최적화 (Phase 1) + @Index(name = "idx_outbox_pending_created", columnList = "created_at"), + // PROCESSING 조회 최적화 (Phase 2) + @Index(name = "idx_outbox_processing_created", columnList = "created_at"), + // PROCESSING 복구 최적화 (updated_at 기준) + @Index(name = "idx_outbox_processing_updated", columnList = "updated_at") +}) +public class OutboxEventEntity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "aggregate_type", nullable = false, length = 50) + private String aggregateType; + + @Column(name = "aggregate_id", nullable = false) + private Long aggregateId; + + @Column(name = "event_type", nullable = false, length = 100) + private String eventType; + + @Column(nullable = false, columnDefinition = "JSON") + private String payload; + + @Column(nullable = false, length = 100) + private String topic; + + @Column(name = "partition_key", length = 100) + private String partitionKey; + + @Enumerated(EnumType.STRING) + @Column(nullable = false, length = 20) + private OutboxStatus status; + + @Column(name = "retry_count", nullable = false) + private int retryCount; + + @Column(name = "created_at", nullable = false, updatable = false) + private ZonedDateTime createdAt; + + @Column(name = "updated_at", nullable = false) + private ZonedDateTime updatedAt; + + @Column(name = "published_at") + private ZonedDateTime publishedAt; + + @Column(name = "error_message", length = 500) + private String errorMessage; + + protected OutboxEventEntity() { + } + + @PrePersist + protected void onCreate() { + ZonedDateTime now = ZonedDateTime.now(); + if (createdAt == null) { + createdAt = now; + } + updatedAt = now; + } + + @PreUpdate + protected void onUpdate() { + updatedAt = ZonedDateTime.now(); + } + + public static OutboxEventEntity create(String aggregateType, Long aggregateId, + String eventType, String payload, + String topic, String partitionKey) { + OutboxEventEntity entity = new OutboxEventEntity(); + entity.aggregateType = aggregateType; + entity.aggregateId = aggregateId; + entity.eventType = eventType; + entity.payload = payload; + entity.topic = topic; + entity.partitionKey = partitionKey; + entity.status = OutboxStatus.PENDING; + entity.retryCount = 0; + entity.createdAt = ZonedDateTime.now(); + entity.updatedAt = ZonedDateTime.now(); + return entity; + } + + public void markProcessing() { + this.status = OutboxStatus.PROCESSING; + } + + public void markPublished() { + this.status = OutboxStatus.PUBLISHED; + this.publishedAt = ZonedDateTime.now(); + } + + public void markFailed(String errorMessage) { + this.status = OutboxStatus.FAILED; + this.retryCount++; + this.errorMessage = errorMessage; + } + + public void markRetry() { + this.status = OutboxStatus.PENDING; + this.retryCount++; + } + + public Long getId() { return id; } + public String getAggregateType() { return aggregateType; } + public Long getAggregateId() { return aggregateId; } + public String getEventType() { return eventType; } + public String getPayload() { return payload; } + public String getTopic() { return topic; } + public String getPartitionKey() { return partitionKey; } + public OutboxStatus getStatus() { return status; } + public int getRetryCount() { return retryCount; } + public ZonedDateTime getCreatedAt() { return createdAt; } + public ZonedDateTime getUpdatedAt() { return updatedAt; } + public ZonedDateTime getPublishedAt() { return publishedAt; } + public String getErrorMessage() { return errorMessage; } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventJpaRepository.java new file mode 100644 index 000000000..b4d543c40 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventJpaRepository.java @@ -0,0 +1,47 @@ +package com.loopers.infrastructure.outbox; + +import jakarta.persistence.LockModeType; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Lock; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; + +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Optional; + +public interface OutboxEventJpaRepository extends JpaRepository { + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'PENDING' ORDER BY o.createdAt ASC LIMIT :limit") + List findPendingEvents(@Param("limit") int limit); + + /** + * PENDING 이벤트를 조회하고 즉시 락을 획득한다 (FOR UPDATE SKIP LOCKED) + * 멀티 인스턴스 환경에서 중복 발행을 방지한다. + */ + @Query(value = """ + SELECT * FROM outbox_event + WHERE status = 'PENDING' + ORDER BY created_at ASC + LIMIT :limit + FOR UPDATE SKIP LOCKED + """, nativeQuery = true) + List findPendingEventsForUpdate(@Param("limit") int limit); + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'PROCESSING' ORDER BY o.createdAt ASC LIMIT :limit") + List findProcessingEvents(@Param("limit") int limit); + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'FAILED' AND o.retryCount < :maxRetry ORDER BY o.createdAt ASC LIMIT :limit") + List findRetryableEvents(@Param("maxRetry") int maxRetry, @Param("limit") int limit); + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'PUBLISHED' AND o.publishedAt < :before") + List findPublishedBefore(@Param("before") ZonedDateTime before); + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'PENDING' ORDER BY o.createdAt ASC LIMIT 1") + Optional findOldestPendingEvent(); + + @Query("SELECT o FROM OutboxEventEntity o WHERE o.status = 'PROCESSING' AND o.updatedAt < :threshold ORDER BY o.updatedAt ASC") + List findStalledProcessingEvents(@Param("threshold") ZonedDateTime threshold); + + long countByStatus(OutboxStatus status); +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventService.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventService.java new file mode 100644 index 000000000..7288abfd3 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxEventService.java @@ -0,0 +1,50 @@ +package com.loopers.infrastructure.outbox; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +/** + * Outbox 이벤트 저장 서비스 + * + * 비즈니스 TX 안에서 호출된다. + * @Transactional(REQUIRED)이므로 호출자의 TX에 참여한다. + */ +@Service +public class OutboxEventService { + + private static final Logger log = LoggerFactory.getLogger(OutboxEventService.class); + + private final OutboxEventJpaRepository outboxEventJpaRepository; + private final ObjectMapper objectMapper; + + public OutboxEventService(OutboxEventJpaRepository outboxEventJpaRepository, + ObjectMapper objectMapper) { + this.outboxEventJpaRepository = outboxEventJpaRepository; + this.objectMapper = objectMapper; + } + + @Transactional + public OutboxEventEntity save(String aggregateType, Long aggregateId, + String eventType, Object payload, + String topic, String partitionKey) { + String jsonPayload; + try { + jsonPayload = objectMapper.writeValueAsString(payload); + } catch (JsonProcessingException e) { + throw new RuntimeException("Outbox 페이로드 직렬화 실패", e); + } + + OutboxEventEntity entity = OutboxEventEntity.create( + aggregateType, aggregateId, eventType, jsonPayload, topic, partitionKey); + OutboxEventEntity saved = outboxEventJpaRepository.save(entity); + + log.info("[Outbox] 이벤트 저장 — id={}, topic={}, key={}, type={}", + saved.getId(), topic, partitionKey, eventType); + + return saved; + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxMetrics.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxMetrics.java new file mode 100644 index 000000000..624c9bf72 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxMetrics.java @@ -0,0 +1,133 @@ +package com.loopers.infrastructure.outbox; + +import io.micrometer.core.instrument.Counter; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Timer; +import org.springframework.stereotype.Component; + +import java.time.Duration; +import java.time.ZonedDateTime; +import java.util.concurrent.TimeUnit; + +/** + * Outbox 이벤트 메트릭 수집 + * + * Prometheus로 노출되는 메트릭: + * - outbox.pending.count: PENDING 이벤트 수 + * - outbox.processing.count: PROCESSING 이벤트 수 + * - outbox.published.count: PUBLISHED 이벤트 수 + * - outbox.failed.count: FAILED 이벤트 수 + * - outbox.oldest.pending.age.seconds: 가장 오래된 PENDING 이벤트 나이 (초) + * - outbox.phase1.duration: Phase 1 처리 시간 + * - outbox.phase2.duration: Phase 2 처리 시간 + * - outbox.publish.success: 발행 성공 카운터 + * - outbox.publish.failed: 발행 실패 카운터 + */ +@Component +public class OutboxMetrics { + + private final MeterRegistry registry; + private final OutboxEventJpaRepository repository; + + // Counters + private final Counter publishSuccessCounter; + private final Counter publishFailedCounter; + + // Timers + private final Timer phase1Timer; + private final Timer phase2Timer; + + public OutboxMetrics(MeterRegistry registry, OutboxEventJpaRepository repository) { + this.registry = registry; + this.repository = repository; + + // Counters 초기화 + this.publishSuccessCounter = Counter.builder("outbox.publish.success") + .description("Kafka 발행 성공 횟수") + .register(registry); + + this.publishFailedCounter = Counter.builder("outbox.publish.failed") + .description("Kafka 발행 실패 횟수") + .register(registry); + + // Timers 초기화 + this.phase1Timer = Timer.builder("outbox.phase1.duration") + .description("Phase 1 (PENDING → PROCESSING) 처리 시간") + .register(registry); + + this.phase2Timer = Timer.builder("outbox.phase2.duration") + .description("Phase 2 (PROCESSING → Kafka) 처리 시간") + .register(registry); + + // Gauges 등록 (실시간 조회) + registerGauges(); + } + + /** + * Gauge 메트릭 등록 + * - 호출 시마다 DB 조회하여 실시간 값 반환 + */ + private void registerGauges() { + // PENDING 이벤트 수 + registry.gauge("outbox.pending.count", this, + metrics -> repository.countByStatus(OutboxStatus.PENDING)); + + // PROCESSING 이벤트 수 + registry.gauge("outbox.processing.count", this, + metrics -> repository.countByStatus(OutboxStatus.PROCESSING)); + + // PUBLISHED 이벤트 수 (최근 1시간) + registry.gauge("outbox.published.count", this, + metrics -> repository.countByStatus(OutboxStatus.PUBLISHED)); + + // FAILED 이벤트 수 + registry.gauge("outbox.failed.count", this, + metrics -> repository.countByStatus(OutboxStatus.FAILED)); + + // 가장 오래된 PENDING 이벤트 나이 (초) + registry.gauge("outbox.oldest.pending.age.seconds", this, + metrics -> calculateOldestPendingAge()); + } + + /** + * 가장 오래된 PENDING 이벤트의 나이(초) 계산 + * - 0: PENDING 없음 + * - N: 가장 오래된 이벤트가 N초 전에 생성됨 + */ + private double calculateOldestPendingAge() { + return repository.findOldestPendingEvent() + .map(event -> { + Duration age = Duration.between(event.getCreatedAt(), ZonedDateTime.now()); + return (double) age.getSeconds(); + }) + .orElse(0.0); + } + + /** + * Phase 1 처리 시간 기록 + */ + public void recordPhase1Duration(long durationMillis) { + phase1Timer.record(durationMillis, TimeUnit.MILLISECONDS); + } + + /** + * Phase 2 처리 시간 기록 + */ + public void recordPhase2Duration(long durationMillis) { + phase2Timer.record(durationMillis, TimeUnit.MILLISECONDS); + } + + /** + * 발행 성공 기록 + */ + public void recordPublishSuccess() { + publishSuccessCounter.increment(); + } + + /** + * 발행 실패 기록 + */ + public void recordPublishFailure() { + publishFailedCounter.increment(); + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxRelayService.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxRelayService.java new file mode 100644 index 000000000..552feb217 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxRelayService.java @@ -0,0 +1,296 @@ +package com.loopers.infrastructure.outbox; + +import jakarta.annotation.PreDestroy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.support.SendResult; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.time.ZonedDateTime; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Outbox Relay — 2단계 Polling 방식 (PROCESSING 상태 + 동기 .get()) + * + * Phase 1: PENDING → PROCESSING (빠름, 락 보유) + * - FOR UPDATE SKIP LOCKED로 멀티 인스턴스 중복 방지 + * - 상태만 변경하고 빠르게 커밋 (락 최소 보유) + * + * Phase 2: PROCESSING → Kafka 발행 (느림, 병렬 가능) + * - Partition Key별 순차 처리 (순서 보장) + * - 병렬 스트림으로 처리량 극대화 + * + * 동기 방식 근거: + * - 비동기 whenComplete + @Transactional 충돌 → markPublished() DB 미반영 (kafka-pipeline-lab에서 실증) + * - Outbox는 "안전한 발행"이 목적. 속도보다 정확성 우선. + * - 실무 표준: 동기 Polling → CDC 전환 경로 (케브님 멘토링) + * + * 예상 효과: + * - 처리량: 10건/초 → 500~5,000건/초 (50~500배) + * - 중복 발행: 완전 방지 (PROCESSING + FOR UPDATE SKIP LOCKED) + * - 지연: 최대 1초 (fixedDelay=1000) + */ +@Service +public class OutboxRelayService { + + private static final Logger log = LoggerFactory.getLogger(OutboxRelayService.class); + private static final int BATCH_SIZE = 500; // 50 → 500 (10배 증가) + private static final int MAX_RETRY = 5; + private static final int SHUTDOWN_TIMEOUT_SECONDS = 30; + + private final OutboxEventJpaRepository outboxEventJpaRepository; + private final KafkaTemplate kafkaTemplate; + private final org.springframework.transaction.support.TransactionTemplate transactionTemplate; + private final OutboxMetrics metrics; + + private final AtomicBoolean shuttingDown = new AtomicBoolean(false); + private volatile CountDownLatch phase2Latch; + + public OutboxRelayService(OutboxEventJpaRepository outboxEventJpaRepository, + KafkaTemplate kafkaTemplate, + org.springframework.transaction.PlatformTransactionManager transactionManager, + OutboxMetrics metrics) { + this.outboxEventJpaRepository = outboxEventJpaRepository; + this.kafkaTemplate = kafkaTemplate; + this.transactionTemplate = new org.springframework.transaction.support.TransactionTemplate(transactionManager); + this.metrics = metrics; + } + + /** + * Phase 1: PENDING → PROCESSING 전환 (빠름, 락 최소 보유) + * + * FOR UPDATE SKIP LOCKED로 멀티 인스턴스 환경에서 중복 조회 방지 + * - 인스턴스 A가 row 1~500 락 획득 → 인스턴스 B는 row 501~1000 조회 + * - 상태만 변경하고 빠르게 커밋 → 락 해제 + */ + @Scheduled(fixedDelay = 1000) // 5초 → 1초 (5배 빠른 폴링) + @Transactional + public void markPendingAsProcessing() { + long startTime = System.currentTimeMillis(); + + List pendingEvents = outboxEventJpaRepository.findPendingEventsForUpdate(BATCH_SIZE); + + if (pendingEvents.isEmpty()) { + return; + } + + for (OutboxEventEntity event : pendingEvents) { + event.markProcessing(); + } + outboxEventJpaRepository.saveAll(pendingEvents); + + long duration = System.currentTimeMillis() - startTime; + metrics.recordPhase1Duration(duration); + + log.info("[Relay Phase 1] PROCESSING 전환 {}건 ({}ms)", pendingEvents.size(), duration); + } + + /** + * Phase 2: PROCESSING → Kafka 발행 (느림, 병렬 처리) + * + * Partition Key별로 그룹핑하여 순차 처리 (순서 보장) + * - 같은 Partition Key는 순차 발행 + * - 다른 Partition Key는 병렬 발행 + */ + @Scheduled(fixedDelay = 1000) + public void publishProcessingEvents() { + if (shuttingDown.get()) { + log.info("[Relay Phase 2] Shutting down, skipping this cycle"); + return; + } + + phase2Latch = new CountDownLatch(1); + try { + executePhase2(); + } finally { + phase2Latch.countDown(); + } + } + + private void executePhase2() { + long startTime = System.currentTimeMillis(); + + List processingEvents = outboxEventJpaRepository.findProcessingEvents(BATCH_SIZE); + + if (processingEvents.isEmpty()) { + return; + } + + log.info("[Relay Phase 2] PROCESSING 이벤트 {}건 발행 시작", processingEvents.size()); + + // Partition Key별 그룹핑 (순서 보장) + var groupedByPartitionKey = processingEvents.stream() + .collect(java.util.stream.Collectors.groupingBy( + OutboxEventEntity::getPartitionKey, + java.util.LinkedHashMap::new, + java.util.stream.Collectors.toList() + )); + + // Partition Key별 병렬 발행 (다른 Key는 병렬, 같은 Key는 순차) + groupedByPartitionKey.values().parallelStream().forEach(events -> { + for (OutboxEventEntity event : events) { + try { + publishToKafka(event); + } catch (Exception e) { + // 개별 이벤트 실패해도 다른 이벤트 계속 처리 + log.error("[Relay Phase 2] Unexpected error processing event {}: {}", + event.getId(), e.getMessage(), e); + event.markFailed("Unexpected error: " + e.getMessage()); + metrics.recordPublishFailure(); + } + } + }); + + // 상태 업데이트 (PUBLISHED or FAILED) + transactionTemplate.executeWithoutResult(status -> { + outboxEventJpaRepository.saveAll(processingEvents); + }); + + long duration = System.currentTimeMillis() - startTime; + metrics.recordPhase2Duration(duration); + + log.info("[Relay Phase 2] 상태 업데이트 완료 {}건 ({}ms)", processingEvents.size(), duration); + } + + /** + * PROCESSING 복구 로직 (5분 간격) + * + * 5분 이상 PROCESSING 상태인 이벤트를 PENDING으로 복원 + * - Phase 2 실패 시 복구 + * - 앱 크래시 후 재시작 시 복구 + */ + @Scheduled(fixedDelay = 300000) // 5분 + @Transactional + public void recoverStalledProcessingEvents() { + ZonedDateTime threshold = ZonedDateTime.now().minusMinutes(5); + List stalledEvents = outboxEventJpaRepository.findStalledProcessingEvents(threshold); + + if (stalledEvents.isEmpty()) { + return; + } + + log.warn("[Relay] PROCESSING 5분 이상 경과 {}건 → PENDING 복원", stalledEvents.size()); + stalledEvents.forEach(OutboxEventEntity::markRetry); + outboxEventJpaRepository.saveAll(stalledEvents); + } + + @Scheduled(fixedDelay = 30000) + @Transactional + public void retryFailedEvents() { + List retryableEvents = outboxEventJpaRepository.findRetryableEvents(MAX_RETRY, BATCH_SIZE); + if (retryableEvents.isEmpty()) { + return; + } + + log.info("[Relay] FAILED 이벤트 {}건 재시도", retryableEvents.size()); + for (OutboxEventEntity event : retryableEvents) { + event.markRetry(); + publishToKafka(event); + } + } + + @Scheduled(fixedDelay = 3600000) + @Transactional + public void cleanupPublishedEvents() { + ZonedDateTime cutoff = ZonedDateTime.now().minusDays(7); + List oldEvents = outboxEventJpaRepository.findPublishedBefore(cutoff); + if (!oldEvents.isEmpty()) { + outboxEventJpaRepository.deleteAll(oldEvents); + log.info("[Relay] PUBLISHED 이벤트 {}건 정리 (7일 이전)", oldEvents.size()); + } + } + + private void publishToKafka(OutboxEventEntity event) { + try { + // Kafka 헤더에 eventType, aggregateType, outboxId를 포함 + // → Consumer가 payload 파싱 없이 이벤트 타입을 판별할 수 있음 + var producerRecord = new org.apache.kafka.clients.producer.ProducerRecord( + event.getTopic(), null, event.getPartitionKey(), event.getPayload()); + producerRecord.headers() + .add("X-Event-Type", event.getEventType().getBytes(java.nio.charset.StandardCharsets.UTF_8)) + .add("X-Aggregate-Type", event.getAggregateType().getBytes(java.nio.charset.StandardCharsets.UTF_8)) + .add("X-Outbox-Id", String.valueOf(event.getId()).getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + SendResult result = kafkaTemplate + .send(producerRecord) + .get(10, TimeUnit.SECONDS); + + var metadata = result.getRecordMetadata(); + log.info("[Relay] 발행 성공 — outboxId={}, topic={}, partition={}, offset={}", + event.getId(), event.getTopic(), metadata.partition(), metadata.offset()); + event.markPublished(); + metrics.recordPublishSuccess(); + + } catch (ExecutionException e) { + log.error("[Relay] 발행 실패 — outboxId={}, error={}", event.getId(), e.getCause().getMessage()); + event.markFailed(e.getCause().getMessage()); + metrics.recordPublishFailure(); + } catch (TimeoutException e) { + log.error("[Relay] 발행 타임아웃 — outboxId={}", event.getId()); + event.markFailed("Kafka send timeout (10s)"); + metrics.recordPublishFailure(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + event.markFailed("Interrupted"); + metrics.recordPublishFailure(); + } + } + + /** + * Graceful Shutdown + * + * 앱 종료 시: + * 1. 새로운 Phase 2 실행 중단 + * 2. 현재 실행 중인 Phase 2 완료 대기 (최대 30초) + * 3. 미완료 PROCESSING → PENDING 복원 + */ + @PreDestroy + public void onShutdown() { + log.info("[Relay] Graceful shutdown started"); + shuttingDown.set(true); + + // 현재 실행 중인 Phase 2 완료 대기 + CountDownLatch currentLatch = phase2Latch; + if (currentLatch != null) { + try { + boolean completed = currentLatch.await(SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (completed) { + log.info("[Relay] Phase 2 completed gracefully"); + } else { + log.warn("[Relay] Phase 2 did not complete within {}s, proceeding with recovery", + SHUTDOWN_TIMEOUT_SECONDS); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("[Relay] Shutdown interrupted"); + } + } + + // 미완료 PROCESSING → PENDING 복원 + recoverProcessingOnShutdown(); + + log.info("[Relay] Graceful shutdown completed"); + } + + @Transactional + protected void recoverProcessingOnShutdown() { + List processingEvents = outboxEventJpaRepository + .findProcessingEvents(Integer.MAX_VALUE); + + if (!processingEvents.isEmpty()) { + log.info("[Relay] Recovering {} PROCESSING events to PENDING on shutdown", + processingEvents.size()); + + processingEvents.forEach(event -> event.markRetry()); + outboxEventJpaRepository.saveAll(processingEvents); + } + } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxStatus.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxStatus.java new file mode 100644 index 000000000..93a68fdac --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/outbox/OutboxStatus.java @@ -0,0 +1,8 @@ +package com.loopers.infrastructure.outbox; + +public enum OutboxStatus { + PENDING, // 발행 대기 + PROCESSING, // 발행 중 (멀티 인스턴스 중복 방지) + PUBLISHED, // 발행 완료 + FAILED // 발행 실패 +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java new file mode 100644 index 000000000..458738b06 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java @@ -0,0 +1,80 @@ +package com.loopers.infrastructure.product; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * 상품 메트릭 집계 테이블 (인프라 전용) + * + * Consumer가 이벤트를 수신하면 이 테이블에 upsert: + * - 조회 수 (view_count) + * - 좋아요 수 (like_count) + * - 판매량 (sales_count) + * + * PK = product_id → 상품당 1 row → upsert로 집계 + */ +@Entity +@Table(name = "product_metrics") +public class ProductMetricsEntity { + + @Id + @Column(name = "product_id") + private Long productId; + + @Column(name = "view_count", nullable = false) + private long viewCount; + + @Column(name = "like_count", nullable = false) + private long likeCount; + + @Column(name = "sales_count", nullable = false) + private long salesCount; + + @Column(name = "updated_at", nullable = false) + private ZonedDateTime updatedAt; + + protected ProductMetricsEntity() { + } + + public static ProductMetricsEntity create(Long productId) { + ProductMetricsEntity entity = new ProductMetricsEntity(); + entity.productId = productId; + entity.viewCount = 0; + entity.likeCount = 0; + entity.salesCount = 0; + entity.updatedAt = ZonedDateTime.now(); + return entity; + } + + public void incrementViewCount() { + this.viewCount++; + this.updatedAt = ZonedDateTime.now(); + } + + public void incrementLikeCount() { + this.likeCount++; + this.updatedAt = ZonedDateTime.now(); + } + + public void decrementLikeCount() { + if (this.likeCount > 0) { + this.likeCount--; + } + this.updatedAt = ZonedDateTime.now(); + } + + public void addSalesCount(int quantity) { + this.salesCount += quantity; + this.updatedAt = ZonedDateTime.now(); + } + + public Long getProductId() { return productId; } + public long getViewCount() { return viewCount; } + public long getLikeCount() { return likeCount; } + public long getSalesCount() { return salesCount; } + public ZonedDateTime getUpdatedAt() { return updatedAt; } +} diff --git a/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java new file mode 100644 index 000000000..4f8a260dd --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java @@ -0,0 +1,6 @@ +package com.loopers.infrastructure.product; + +import org.springframework.data.jpa.repository.JpaRepository; + +public interface ProductMetricsJpaRepository extends JpaRepository { +} diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/admin/DlqAdminController.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/admin/DlqAdminController.java new file mode 100644 index 000000000..2a47dc379 --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/admin/DlqAdminController.java @@ -0,0 +1,66 @@ +package com.loopers.interfaces.api.admin; + +import com.loopers.infrastructure.dlq.DlqReprocessingService; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Map; + +/** + * DLQ 재처리 어드민 API + * + * DLQ(pipeline-dlq-v1)에 쌓인 메시지를 수동으로 재처리하기 위한 API. + * + * 재처리 프로세스: + * 1. event_handled에서 해당 eventId 삭제 (멱등성 레코드 초기화) + * 2. 원본 토픽으로 메시지 재발행 + * 3. 기존 Consumer가 정상 파이프라인으로 재처리 + */ +@RestController +@RequestMapping("/admin/dlq") +public class DlqAdminController { + + private final DlqReprocessingService dlqReprocessingService; + + public DlqAdminController(DlqReprocessingService dlqReprocessingService) { + this.dlqReprocessingService = dlqReprocessingService; + } + + /** + * 특정 eventId의 멱등성 레코드 삭제 + 원본 토픽 재발행 + */ + @PostMapping("/reprocess") + public ResponseEntity> reprocess(@RequestBody DlqReprocessRequest request) { + dlqReprocessingService.reprocess(request.eventId(), request.originalTopic(), + request.partitionKey(), request.payload()); + return ResponseEntity.ok(Map.of( + "status", "REPROCESSED", + "eventId", request.eventId(), + "originalTopic", request.originalTopic() + )); + } + + /** + * 특정 eventId의 멱등성 레코드만 삭제 (재발행은 별도 수행) + */ + @DeleteMapping("/event-handled/{eventId}") + public ResponseEntity> deleteEventHandled(@PathVariable String eventId) { + boolean deleted = dlqReprocessingService.deleteEventHandled(eventId); + return ResponseEntity.ok(Map.of( + "eventId", eventId, + "deleted", deleted + )); + } + + public record DlqReprocessRequest( + String eventId, + String originalTopic, + String partitionKey, + String payload + ) {} +} diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponApiSpec.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponApiSpec.java index 4b504e787..415ba1bf8 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponApiSpec.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponApiSpec.java @@ -9,10 +9,14 @@ @Tag(name = "Coupon API", description = "쿠폰 API") public interface CouponApiSpec { - @Operation(summary = "쿠폰 발급", description = "쿠폰 템플릿 기반으로 쿠폰을 발급합니다.") - ApiResponse issueCoupon( + @Operation(summary = "선착순 쿠폰 발급 요청", description = "비동기 FCFS 발급 요청. 202 응답 후 결과를 polling합니다.") + ApiResponse requestCouponIssue( @AuthUser User user, Long couponId); + @Operation(summary = "쿠폰 발급 결과 조회", description = "비동기 발급 요청의 처리 결과를 polling합니다.") + ApiResponse getCouponIssueResult( + @AuthUser User user, Long requestId); + @Operation(summary = "내 쿠폰 목록 조회", description = "본인이 보유한 쿠폰 목록을 조회합니다.") ApiResponse getMyCoupons(@AuthUser User user); diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java index 94c56f72a..37c675298 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponController.java @@ -4,9 +4,11 @@ import com.loopers.domain.user.User; import com.loopers.interfaces.api.ApiResponse; import com.loopers.support.auth.AuthUser; +import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import java.util.List; @@ -20,14 +22,33 @@ public CouponController(CouponFacade couponFacade) { this.couponFacade = couponFacade; } + /** + * 선착순 쿠폰 발급 요청 (비동기 FCFS) + * + * 모든 쿠폰 발급은 Kafka 파이프라인을 통해 처리된다. + * 202 Accepted 응답 후 클라이언트가 결과를 polling. + */ @PostMapping("/api/v1/coupons/{couponId}/issue") + @ResponseStatus(HttpStatus.ACCEPTED) @Override - public ApiResponse issueCoupon( + public ApiResponse requestCouponIssue( @AuthUser User user, @PathVariable Long couponId) { - CouponFacade.IssueCouponResult result = couponFacade.issueCoupon(couponId, user.getId()); - return ApiResponse.success(new CouponResponse.IssueCouponResponse( - result.issuedCouponId(), result.status())); + CouponFacade.CouponIssueRequestResult result = + couponFacade.requestCouponIssue(couponId, user.getId()); + return ApiResponse.success(new CouponResponse.CouponIssueRequestResponse( + result.requestId(), result.eventId(), result.status())); + } + + @GetMapping("/api/v1/coupons/issue-requests/{requestId}") + @Override + public ApiResponse getCouponIssueResult( + @AuthUser User user, + @PathVariable Long requestId) { + CouponFacade.CouponIssueRequestResult result = + couponFacade.getCouponIssueResult(requestId); + return ApiResponse.success(new CouponResponse.CouponIssueRequestResponse( + result.requestId(), result.eventId(), result.status())); } @GetMapping("/api/v1/users/me/coupons") diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponResponse.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponResponse.java index 2e47ee144..34c1ad55c 100644 --- a/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponResponse.java +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/api/coupon/CouponResponse.java @@ -19,7 +19,7 @@ public record IssuedCouponDetail( public record CouponListResponse(List coupons) {} - public record IssueCouponResponse(Long issuedCouponId, String status) {} + public record CouponIssueRequestResponse(Long requestId, String eventId, String status) {} public record AvailableCouponDetail( Long couponTemplateId, String name, String description, diff --git a/apps/commerce-api/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java b/apps/commerce-api/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java new file mode 100644 index 000000000..459ad6a7c --- /dev/null +++ b/apps/commerce-api/src/main/java/com/loopers/interfaces/consumer/CouponIssueConsumer.java @@ -0,0 +1,69 @@ +package com.loopers.interfaces.consumer; + +import com.loopers.application.coupon.CouponIssueProcessor; +import com.loopers.application.coupon.CouponIssueProcessor.BusinessFailureException; +import com.loopers.infrastructure.dlq.DlqPublisher; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.stereotype.Component; + +import java.util.List; + +/** + * 선착순 쿠폰 발급 Consumer — 메시지 수신 + ACK + DLQ만 담당 + * + * Interfaces 레이어의 책임: "요청 수신" + * Controller가 HTTP 요청을 받아서 Facade에 위임하듯이, + * Consumer가 Kafka 메시지를 받아서 Processor에 위임한다. + * + * 비즈니스 처리는 CouponIssueProcessor(@Service)에 위임: + * → 프록시를 통한 호출 → @Transactional 정상 동작 + * → self-invocation 방지 → 발급 + 상태 업데이트 + event_handled 같은 TX + * + * 예외 전략 (건별 격리 — 배치 내 1건 실패가 나머지 건을 중단시키지 않음): + * BusinessFailureException → 별도 TX로 FAILED 기록 (재시도 불필요) + * 그 외 Exception → DLQ로 수동 발행 후 나머지 건 계속 처리 + */ +@Component +public class CouponIssueConsumer { + + private static final Logger log = LoggerFactory.getLogger(CouponIssueConsumer.class); + + private final CouponIssueProcessor processor; + private final DlqPublisher dlqPublisher; + + public CouponIssueConsumer(CouponIssueProcessor processor, DlqPublisher dlqPublisher) { + this.processor = processor; + this.dlqPublisher = dlqPublisher; + } + + @KafkaListener( + topics = "coupon-issue-requests-v1", + groupId = "coupon-issue-group", + containerFactory = "BATCH_LISTENER_DEFAULT" + ) + public void consume(List> records, Acknowledgment ack) { + for (ConsumerRecord record : records) { + try { + String payload = record.value().toString(); + processor.process(payload); + + } catch (BusinessFailureException e) { + // 비즈니스 실패 → process()의 TX는 rollback-only 상태. + // 별도 TX(REQUIRES_NEW)로 FAILED + event_handled 기록. + processor.markFailedInNewTx(e.getRequestId(), e.getEventId(), e.getMessage()); + log.warn("[CouponIssue] 비즈니스 실패 — error={}", e.getMessage()); + + } catch (Exception e) { + // 인프라 장애 → DLQ로 수동 발행 후 나머지 건 계속 처리 + log.error("[CouponIssue] 인프라 실패 → DLQ — partition={}, offset={}, error={}", + record.partition(), record.offset(), e.getMessage(), e); + dlqPublisher.sendToDlq(record, e); + } + } + ack.acknowledge(); + } +} diff --git a/apps/commerce-api/src/main/resources/application-local.yml b/apps/commerce-api/src/main/resources/application-local.yml new file mode 100644 index 000000000..d42f49c0c --- /dev/null +++ b/apps/commerce-api/src/main/resources/application-local.yml @@ -0,0 +1,36 @@ +# 로컬 환경 Kafka 토픽 설정 +# 단일 브로커 환경 (docker/infra-compose.yml) + +kafka: + topic: + catalog-events: + name: catalog-events-v1 + partitions: 3 + replicas: 1 # 로컬 단일 브로커 + min-insync-replicas: 1 # ISR 최소 1개 (Leader만) + + order-events: + name: order-events-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + + coupon-issue-requests: + name: coupon-issue-requests-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + + user-activity-events: + name: user-activity-events-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + retention-ms: 259200000 # 3일 (3 * 24 * 60 * 60 * 1000) + + pipeline-dlq: + name: pipeline-dlq-v1 + partitions: 1 + replicas: 1 + min-insync-replicas: 1 + retention-ms: 2592000000 # 30일 (30 * 24 * 60 * 60 * 1000) diff --git a/apps/commerce-api/src/main/resources/application-prd.yml b/apps/commerce-api/src/main/resources/application-prd.yml new file mode 100644 index 000000000..dbf820419 --- /dev/null +++ b/apps/commerce-api/src/main/resources/application-prd.yml @@ -0,0 +1,36 @@ +# 프로덕션 환경 Kafka 토픽 설정 +# 3-브로커 클러스터 환경 + +kafka: + topic: + catalog-events: + name: catalog-events-v1 + partitions: 3 + replicas: 3 # 3-브로커 클러스터 + min-insync-replicas: 2 # ISR 최소 2개 (안전성) + + order-events: + name: order-events-v1 + partitions: 3 + replicas: 3 + min-insync-replicas: 2 + + coupon-issue-requests: + name: coupon-issue-requests-v1 + partitions: 3 + replicas: 3 + min-insync-replicas: 2 + + user-activity-events: + name: user-activity-events-v1 + partitions: 3 + replicas: 3 + min-insync-replicas: 1 # 로깅용이므로 낮은 안전성 허용 + retention-ms: 259200000 # 3일 + + pipeline-dlq: + name: pipeline-dlq-v1 + partitions: 1 + replicas: 3 + min-insync-replicas: 2 + retention-ms: 2592000000 # 30일 diff --git a/apps/commerce-api/src/main/resources/application.yml b/apps/commerce-api/src/main/resources/application.yml index cec712c10..1dc8bcd90 100644 --- a/apps/commerce-api/src/main/resources/application.yml +++ b/apps/commerce-api/src/main/resources/application.yml @@ -21,6 +21,7 @@ spring: import: - jpa.yml - redis.yml + - kafka.yml - logging.yml - monitoring.yml diff --git a/apps/commerce-api/src/main/resources/db/migration/V1__Add_updated_at_and_indexes.sql b/apps/commerce-api/src/main/resources/db/migration/V1__Add_updated_at_and_indexes.sql new file mode 100644 index 000000000..f26f3e84b --- /dev/null +++ b/apps/commerce-api/src/main/resources/db/migration/V1__Add_updated_at_and_indexes.sql @@ -0,0 +1,19 @@ +-- Outbox 개선: updated_at 컬럼 및 인덱스 추가 + +-- updated_at 컬럼 추가 +ALTER TABLE outbox_event +ADD COLUMN updated_at TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6); + +-- 기존 데이터 updated_at 초기화 (created_at과 동일) +UPDATE outbox_event +SET updated_at = created_at +WHERE updated_at IS NULL; + +-- 인덱스 최적화 (MySQL 호환 — Partial Index는 PostgreSQL 전용이므로 복합 인덱스로 대체) +-- status가 선두 컬럼이면 PENDING/PROCESSING 조회 모두 커버 +CREATE INDEX IF NOT EXISTS idx_outbox_status_updated ON outbox_event (status, updated_at); +CREATE INDEX IF NOT EXISTS idx_outbox_status_created ON outbox_event (status, created_at); + +-- 참고: PostgreSQL이면 WHERE 조건부 Partial Index가 더 효율적이지만 +-- 이 프로젝트는 MySQL 사용이므로 복합 인덱스로 대체. +-- JPA @Table 어노테이션의 인덱스와 중복될 수 있으나, Flyway 도입 시 정리 예정. diff --git a/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeAsyncIssueTest.java b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeAsyncIssueTest.java new file mode 100644 index 000000000..416d77d52 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponFacadeAsyncIssueTest.java @@ -0,0 +1,127 @@ +package com.loopers.application.coupon; + +import com.loopers.domain.coupon.CouponTemplate; +import com.loopers.domain.coupon.CouponTemplateRepository; +import com.loopers.domain.coupon.DiscountType; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestStatus; +import com.loopers.infrastructure.outbox.OutboxEventJpaRepository; +import com.loopers.infrastructure.outbox.OutboxStatus; +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.time.ZonedDateTime; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * 선착순 쿠폰 비동기 발급 요청 테스트 + * + * 검증 대상: + * 1. requestCouponIssue() 호출 시 요청 이력(PENDING) + Outbox가 같은 TX에 저장되는가 + * 2. 폴링으로 발급 결과를 조회할 수 있는가 + * 3. Outbox에 올바른 topic, partitionKey가 저장되는가 + */ +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class CouponFacadeAsyncIssueTest { + + @Autowired + private CouponFacade couponFacade; + + @Autowired + private CouponTemplateRepository couponTemplateRepository; + + @Autowired + private CouponIssueRequestJpaRepository couponIssueRequestRepository; + + @Autowired + private OutboxEventJpaRepository outboxEventRepository; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("비동기 발급 요청 시 요청 이력(PENDING)과 Outbox가 같은 TX에 저장된다") + void 비동기_발급_요청_원자성() { + // arrange + CouponTemplate template = createTemplate(100); + + // act + CouponFacade.CouponIssueRequestResult result = couponFacade.requestCouponIssue(template.getId(), 1L); + + // assert — 요청 이력 확인 + assertThat(result.status()).isEqualTo("PENDING"); + assertThat(result.requestId()).isNotNull(); + assertThat(result.eventId()).isNotNull(); + + CouponIssueRequestEntity request = couponIssueRequestRepository.findById(result.requestId()).orElseThrow(); + assertThat(request.getStatus()).isEqualTo(CouponIssueRequestStatus.PENDING); + assertThat(request.getCouponTemplateId()).isEqualTo(template.getId()); + assertThat(request.getUserId()).isEqualTo(1L); + + // assert — Outbox 확인 + var pendingOutbox = outboxEventRepository.findPendingEvents(50); + assertThat(pendingOutbox).hasSize(1); + assertThat(pendingOutbox.get(0).getTopic()).isEqualTo("coupon-issue-requests-v1"); + assertThat(pendingOutbox.get(0).getPartitionKey()).isEqualTo(String.valueOf(template.getId())); + assertThat(pendingOutbox.get(0).getEventType()).isEqualTo("CouponIssueRequestedEvent"); + assertThat(pendingOutbox.get(0).getStatus()).isEqualTo(OutboxStatus.PENDING); + } + + @Test + @DisplayName("폴링으로 발급 결과를 조회할 수 있다 — 초기 상태는 PENDING") + void 폴링_결과_조회() { + // arrange + CouponTemplate template = createTemplate(100); + CouponFacade.CouponIssueRequestResult requested = couponFacade.requestCouponIssue(template.getId(), 1L); + + // act + CouponFacade.CouponIssueRequestResult polled = couponFacade.getCouponIssueResult(requested.requestId()); + + // assert + assertThat(polled.status()).isEqualTo("PENDING"); + assertThat(polled.requestId()).isEqualTo(requested.requestId()); + } + + @Test + @DisplayName("여러 유저의 발급 요청이 각각 별도 Outbox 이벤트로 저장된다") + void 다건_발급_요청() { + // arrange + CouponTemplate template = createTemplate(100); + + // act + couponFacade.requestCouponIssue(template.getId(), 1L); + couponFacade.requestCouponIssue(template.getId(), 2L); + couponFacade.requestCouponIssue(template.getId(), 3L); + + // assert + assertThat(couponIssueRequestRepository.count()).isEqualTo(3); + assertThat(outboxEventRepository.findPendingEvents(50)).hasSize(3); + + // 모든 Outbox의 partitionKey가 같은 templateId → 같은 Kafka 파티션 + var outboxEvents = outboxEventRepository.findPendingEvents(50); + assertThat(outboxEvents).allMatch(e -> + e.getPartitionKey().equals(String.valueOf(template.getId()))); + } + + private CouponTemplate createTemplate(int maxIssueCount) { + return couponTemplateRepository.save( + CouponTemplate.define("선착순쿠폰", "테스트용", DiscountType.FIXED, 1000, null, + 0, maxIssueCount, 1, + ZonedDateTime.now().minusDays(1), ZonedDateTime.now().plusDays(30)) + ); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponIssueProcessorTest.java b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponIssueProcessorTest.java new file mode 100644 index 000000000..bfc4059c6 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/application/coupon/CouponIssueProcessorTest.java @@ -0,0 +1,165 @@ +package com.loopers.application.coupon; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.coupon.CouponTemplate; +import com.loopers.domain.coupon.CouponTemplateRepository; +import com.loopers.domain.coupon.DiscountType; +import com.loopers.domain.coupon.IssuedCouponRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestStatus; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.time.ZonedDateTime; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * CouponIssueProcessor 테스트 — @Transactional 원자성 검증 + * + * self-invocation 수정 후: + * Consumer → Processor 위임 → 프록시를 통한 호출 → @Transactional 정상 동작 + * + * 검증 포인트: + * 1. 발급 성공 시 issue + markIssued + event_handled가 모두 반영 + * 2. 비즈니스 실패 시 markFailed + event_handled가 같은 TX에서 반영 + * 3. 멱등성 — 같은 eventId 2번 처리해도 1번만 발급 + * 4. @Transactional 롤백 — 중간 실패 시 전체 롤백 + */ +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class CouponIssueProcessorTest { + + @Autowired + private CouponIssueProcessor processor; + + @Autowired + private CouponTemplateRepository couponTemplateRepository; + + @Autowired + private IssuedCouponRepository issuedCouponRepository; + + @Autowired + private CouponIssueRequestJpaRepository couponIssueRequestRepository; + + @Autowired + private EventHandledJpaRepository eventHandledRepository; + + @Autowired + private ObjectMapper objectMapper; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("발급 성공 시 쿠폰 발급 + 요청 ISSUED + event_handled가 모두 반영된다") + void 발급_성공_원자성() throws Exception { + // arrange + CouponTemplate template = createTemplate(100); + String eventId = UUID.randomUUID().toString(); + CouponIssueRequestEntity request = createRequest(template.getId(), 1L, eventId); + + String payload = buildPayload(request.getId(), template.getId(), 1L, eventId); + + // act + processor.process(payload); + + // assert — 3가지 모두 반영됨 (같은 TX) + assertThat(issuedCouponRepository.countByCouponTemplateId(template.getId())).isEqualTo(1); + + CouponIssueRequestEntity updated = couponIssueRequestRepository.findById(request.getId()).orElseThrow(); + assertThat(updated.getStatus()).isEqualTo(CouponIssueRequestStatus.ISSUED); + assertThat(updated.getIssuedCouponId()).isNotNull(); + + assertThat(eventHandledRepository.existsByEventId(eventId)).isTrue(); + } + + @Test + @DisplayName("재고 소진 시 요청 FAILED + event_handled가 같은 TX에서 반영된다") + void 재고_소진_실패_원자성() throws Exception { + // arrange — 최대 1장 + CouponTemplate template = createTemplate(1); + + // 1장 먼저 발급 + String eventId1 = UUID.randomUUID().toString(); + CouponIssueRequestEntity request1 = createRequest(template.getId(), 1L, eventId1); + processor.process(buildPayload(request1.getId(), template.getId(), 1L, eventId1)); + + // 2번째 요청 — 재고 소진 + String eventId2 = UUID.randomUUID().toString(); + CouponIssueRequestEntity request2 = createRequest(template.getId(), 2L, eventId2); + + // act — 비즈니스 실패 → BusinessFailureException throw (TX rollback-only) + // Consumer에서 catch → markFailedInNewTx() 호출 흐름을 테스트에서 재현 + try { + processor.process(buildPayload(request2.getId(), template.getId(), 2L, eventId2)); + } catch (CouponIssueProcessor.BusinessFailureException e) { + // Consumer가 하는 것과 동일: 별도 TX로 FAILED 기록 + processor.markFailedInNewTx(e.getRequestId(), e.getEventId(), e.getMessage()); + } + + // assert — FAILED + event_handled 모두 반영 (별도 TX에서 커밋) + CouponIssueRequestEntity updated = couponIssueRequestRepository.findById(request2.getId()).orElseThrow(); + assertThat(updated.getStatus()).isEqualTo(CouponIssueRequestStatus.FAILED); + assertThat(updated.getFailureReason()).isNotNull(); + + // 실패해도 event_handled에 기록 → 재처리 방지 + assertThat(eventHandledRepository.existsByEventId(eventId2)).isTrue(); + + // 총 발급 수 = 1장 (초과 발급 없음) + assertThat(issuedCouponRepository.countByCouponTemplateId(template.getId())).isEqualTo(1); + } + + @Test + @DisplayName("같은 eventId를 2번 처리해도 1번만 발급된다 — 멱등성") + void 멱등성_중복_방지() throws Exception { + // arrange + CouponTemplate template = createTemplate(100); + String eventId = UUID.randomUUID().toString(); + CouponIssueRequestEntity request = createRequest(template.getId(), 1L, eventId); + String payload = buildPayload(request.getId(), template.getId(), 1L, eventId); + + // act — 같은 payload 2번 처리 + processor.process(payload); + processor.process(payload); // 2번째는 중복 스킵 + + // assert — 1번만 발급 + assertThat(issuedCouponRepository.countByCouponTemplateId(template.getId())).isEqualTo(1); + } + + private CouponTemplate createTemplate(int maxIssueCount) { + return couponTemplateRepository.save( + CouponTemplate.define("선착순쿠폰", "테스트", DiscountType.FIXED, 1000, null, + 0, maxIssueCount, 1, + ZonedDateTime.now().minusDays(1), ZonedDateTime.now().plusDays(30)) + ); + } + + private CouponIssueRequestEntity createRequest(Long templateId, Long userId, String eventId) { + return couponIssueRequestRepository.save( + CouponIssueRequestEntity.create(templateId, userId, eventId)); + } + + private String buildPayload(Long requestId, Long templateId, Long userId, String eventId) + throws Exception { + return objectMapper.writeValueAsString( + new PayloadRecord(requestId, templateId, userId, eventId)); + } + + record PayloadRecord(Long requestId, Long templateId, Long userId, String eventId) {} +} diff --git a/apps/commerce-api/src/test/java/com/loopers/concurrency/KafkaCouponConcurrencyTest.java b/apps/commerce-api/src/test/java/com/loopers/concurrency/KafkaCouponConcurrencyTest.java new file mode 100644 index 000000000..affa87333 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/concurrency/KafkaCouponConcurrencyTest.java @@ -0,0 +1,253 @@ +package com.loopers.concurrency; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.application.coupon.CouponFacade; +import com.loopers.domain.coupon.CouponTemplate; +import com.loopers.domain.coupon.CouponTemplateRepository; +import com.loopers.domain.coupon.DiscountType; +import com.loopers.domain.coupon.IssuedCouponRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestStatus; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import com.loopers.infrastructure.outbox.OutboxEventJpaRepository; +import com.loopers.interfaces.consumer.CouponIssueConsumer; +import com.loopers.utils.DatabaseCleanUp; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.nio.charset.StandardCharsets; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Kafka 기반 선착순 쿠폰 발급 동시성 테스트 + * + * 검증 대상: + * 1. N명이 동시에 발급 요청 → Outbox에 N건 저장 (Producer 동시성) + * 2. Consumer가 순차 처리 → 정확히 maxIssueCount만 발급 (Consumer 동시성) + * 3. 초과 발급 0건 + * 4. 발급 성공 수 + 실패 수 = 요청 수 + * + * Kafka를 거치지 않고 Consumer를 직접 호출하여 테스트: + * → Kafka 파티션 순차 처리를 시뮬레이션 + * → 실제 Kafka E2E는 별도 통합 테스트에서 검증 + */ +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class KafkaCouponConcurrencyTest { + + @Autowired + private CouponFacade couponFacade; + + @Autowired + private CouponIssueConsumer couponIssueConsumer; + + @Autowired + private CouponTemplateRepository couponTemplateRepository; + + @Autowired + private IssuedCouponRepository issuedCouponRepository; + + @Autowired + private CouponIssueRequestJpaRepository couponIssueRequestRepository; + + @Autowired + private OutboxEventJpaRepository outboxEventRepository; + + @Autowired + private EventHandledJpaRepository eventHandledRepository; + + @Autowired + private ObjectMapper objectMapper; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("100명이 동시에 선착순 쿠폰(10장)을 요청하면, 정확히 10명만 발급된다") + void 선착순_쿠폰_동시_발급_요청_100명_10장() throws InterruptedException { + // arrange — 최대 10장 발급 가능한 쿠폰 + CouponTemplate template = createTemplate(10); + int userCount = 100; + + // act 1: 100명이 동시에 발급 요청 (Producer 동시성) + List requestResults = + concurrentIssueRequests(template.getId(), userCount); + + // assert 1: 100건 모두 PENDING으로 생성됨 + assertThat(requestResults).hasSize(userCount); + assertThat(couponIssueRequestRepository.count()).isEqualTo(userCount); + assertThat(outboxEventRepository.count()).isEqualTo(userCount); + + // act 2: Consumer가 순차 처리 (Kafka 파티션 순차 처리 시뮬레이션) + processAllRequestsSequentially(requestResults); + + // assert 2: 정확히 10명만 발급 + long issuedCount = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(issuedCount).isEqualTo(10); + + // assert 3: 발급 성공 10건 + 실패 90건 = 100건 + long successCount = couponIssueRequestRepository.findAll().stream() + .filter(r -> r.getStatus() == CouponIssueRequestStatus.ISSUED) + .count(); + long failCount = couponIssueRequestRepository.findAll().stream() + .filter(r -> r.getStatus() == CouponIssueRequestStatus.FAILED) + .count(); + + assertThat(successCount).isEqualTo(10); + assertThat(failCount).isEqualTo(90); + assertThat(successCount + failCount).isEqualTo(userCount); + + // assert 4: 멱등성 기록도 100건 + assertThat(eventHandledRepository.count()).isEqualTo(userCount); + } + + @Test + @DisplayName("200명이 동시에 선착순 쿠폰(50장)을 요청하면, 정확히 50명만 발급된다") + void 선착순_쿠폰_동시_발급_요청_200명_50장() throws InterruptedException { + // arrange + CouponTemplate template = createTemplate(50); + int userCount = 200; + + // act + List requestResults = + concurrentIssueRequests(template.getId(), userCount); + processAllRequestsSequentially(requestResults); + + // assert + long issuedCount = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(issuedCount).isEqualTo(50); + + long successCount = couponIssueRequestRepository.findAll().stream() + .filter(r -> r.getStatus() == CouponIssueRequestStatus.ISSUED) + .count(); + long failCount = couponIssueRequestRepository.findAll().stream() + .filter(r -> r.getStatus() == CouponIssueRequestStatus.FAILED) + .count(); + + assertThat(successCount).isEqualTo(50); + assertThat(failCount).isEqualTo(150); + } + + @Test + @DisplayName("같은 유저가 같은 쿠폰을 2번 요청해도 1번만 발급된다 — Consumer 멱등성") + void 동일_유저_중복_요청_멱등성() throws Exception { + // arrange + CouponTemplate template = createTemplate(100); + + // 같은 유저가 2번 요청 + CouponFacade.CouponIssueRequestResult request1 = couponFacade.requestCouponIssue(template.getId(), 1L); + CouponFacade.CouponIssueRequestResult request2 = couponFacade.requestCouponIssue(template.getId(), 1L); + + // act — Consumer 순차 처리 + processRequest(request1); + processRequest(request2); + + // assert — CouponService.issue()의 유저별 발급 수 체크로 2번째 실패 + // 첫 번째: ISSUED, 두 번째: FAILED (maxIssueCountPerUser=1 초과) + long issuedCount = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(issuedCount).isEqualTo(1); + } + + // ────────────────────────────────────────────── + + private CouponTemplate createTemplate(int maxIssueCount) { + return couponTemplateRepository.save( + CouponTemplate.define("선착순쿠폰", "동시성테스트용", DiscountType.FIXED, 1000, null, + 0, maxIssueCount, 1, + ZonedDateTime.now().minusDays(1), ZonedDateTime.now().plusDays(30)) + ); + } + + /** + * N명이 동시에 requestCouponIssue()를 호출 + * startLatch로 모든 스레드가 동시에 출발하도록 보장 + */ + private List concurrentIssueRequests(Long templateId, int userCount) + throws InterruptedException { + ExecutorService executor = Executors.newFixedThreadPool(Math.min(userCount, 50)); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch doneLatch = new CountDownLatch(userCount); + List results = new ArrayList<>(); + AtomicInteger errorCount = new AtomicInteger(0); + + for (int i = 0; i < userCount; i++) { + long userId = i + 1L; + executor.submit(() -> { + try { + startLatch.await(); + CouponFacade.CouponIssueRequestResult result = + couponFacade.requestCouponIssue(templateId, userId); + synchronized (results) { + results.add(result); + } + } catch (Exception e) { + errorCount.incrementAndGet(); + } finally { + doneLatch.countDown(); + } + }); + } + + startLatch.countDown(); // 모든 스레드 동시 출발 + doneLatch.await(); + executor.shutdown(); + + assertThat(errorCount.get()).isZero(); // 요청 자체는 전부 성공해야 함 + return results; + } + + /** + * 모든 발급 요청을 Consumer가 순차 처리 (Kafka 파티션 순차 처리 시뮬레이션) + */ + private void processAllRequestsSequentially(List requests) + throws InterruptedException { + for (CouponFacade.CouponIssueRequestResult request : requests) { + processRequest(request); + } + } + + private void processRequest(CouponFacade.CouponIssueRequestResult request) throws InterruptedException { + CouponIssueRequestEntity entity = couponIssueRequestRepository.findById(request.requestId()).orElseThrow(); + + try { + String payload = objectMapper.writeValueAsString( + new CouponPayload(entity.getId(), entity.getCouponTemplateId(), + entity.getUserId(), entity.getEventId())); + + ConsumerRecord record = new ConsumerRecord<>( + "coupon-issue-requests-v1", 0, 0L, + String.valueOf(entity.getCouponTemplateId()), payload); + record.headers().add(new RecordHeader("X-Event-Type", + "CouponIssueRequestedEvent".getBytes(StandardCharsets.UTF_8))); + + couponIssueConsumer.consume(List.of(record), () -> {}); + + } catch (Exception e) { + // Consumer 내부에서 처리되므로 여기까지 오면 안 됨 + throw new RuntimeException("Consumer 호출 실패", e); + } + } + + record CouponPayload(Long requestId, Long templateId, Long userId, String eventId) {} +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/EventHandledEntityTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/EventHandledEntityTest.java new file mode 100644 index 000000000..c3544ff58 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/event/EventHandledEntityTest.java @@ -0,0 +1,69 @@ +package com.loopers.infrastructure.event; + +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.dao.DataIntegrityViolationException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class EventHandledEntityTest { + + @Autowired + private EventHandledJpaRepository eventHandledJpaRepository; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("이벤트 처리 기록을 저장할 수 있다") + void save_event_handled() { + // given + EventHandledEntity entity = EventHandledEntity.of("event-123", "order-events-v1"); + + // when + EventHandledEntity saved = eventHandledJpaRepository.save(entity); + + // then + assertThat(saved.getId()).isNotNull(); + assertThat(saved.getEventId()).isEqualTo("event-123"); + assertThat(saved.getTopic()).isEqualTo("order-events-v1"); + assertThat(saved.getHandledAt()).isNotNull(); + } + + @Test + @DisplayName("이미 처리한 eventId를 조회할 수 있다") + void exists_by_event_id() { + // given + eventHandledJpaRepository.save(EventHandledEntity.of("event-123", "order-events-v1")); + + // when & then + assertThat(eventHandledJpaRepository.existsByEventId("event-123")).isTrue(); + assertThat(eventHandledJpaRepository.existsByEventId("event-999")).isFalse(); + } + + @Test + @DisplayName("같은 eventId를 중복 저장하면 UNIQUE 제약 조건 위반 예외가 발생한다") + void duplicate_event_id_throws_exception() { + // given + eventHandledJpaRepository.save(EventHandledEntity.of("event-123", "order-events-v1")); + + // when & then + assertThatThrownBy(() -> + eventHandledJpaRepository.saveAndFlush(EventHandledEntity.of("event-123", "order-events-v1")) + ).isInstanceOf(DataIntegrityViolationException.class); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxEventServiceTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxEventServiceTest.java new file mode 100644 index 000000000..fe6448cea --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxEventServiceTest.java @@ -0,0 +1,242 @@ +package com.loopers.infrastructure.outbox; + +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class OutboxEventServiceTest { + + @Autowired + private OutboxEventService outboxEventService; + + @Autowired + private OutboxEventJpaRepository outboxEventJpaRepository; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("Outbox에 이벤트를 저장하면 PENDING 상태로 생성된다") + void save_creates_pending_event() { + // given + Map payload = Map.of("orderId", 1L, "userId", 1L, "totalAmount", 29900); + + // when + OutboxEventEntity saved = outboxEventService.save( + "ORDER", 1L, "OrderConfirmedEvent", payload, + "order-events-v1", "1" + ); + + // then + assertThat(saved.getId()).isNotNull(); + assertThat(saved.getAggregateType()).isEqualTo("ORDER"); + assertThat(saved.getAggregateId()).isEqualTo(1L); + assertThat(saved.getEventType()).isEqualTo("OrderConfirmedEvent"); + assertThat(saved.getTopic()).isEqualTo("order-events-v1"); + assertThat(saved.getPartitionKey()).isEqualTo("1"); + assertThat(saved.getStatus()).isEqualTo(OutboxStatus.PENDING); + assertThat(saved.getRetryCount()).isEqualTo(0); + assertThat(saved.getCreatedAt()).isNotNull(); + assertThat(saved.getPublishedAt()).isNull(); + } + + @Test + @DisplayName("PENDING 이벤트를 조회할 수 있다") + void find_pending_events() { + // given + outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + outboxEventService.save("PRODUCT", 100L, "ProductLikedEvent", + Map.of("productId", 100L), "catalog-events-v1", "100"); + + // when + List pendingEvents = outboxEventJpaRepository.findPendingEvents(50); + + // then + assertThat(pendingEvents).hasSize(2); + assertThat(pendingEvents).allMatch(e -> e.getStatus() == OutboxStatus.PENDING); + } + + @Test + @DisplayName("PUBLISHED 상태로 변경하면 PENDING 조회에서 제외된다") + void published_event_excluded_from_pending_query() { + // given + OutboxEventEntity event = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + event.markPublished(); + outboxEventJpaRepository.save(event); + + // when + List pendingEvents = outboxEventJpaRepository.findPendingEvents(50); + + // then + assertThat(pendingEvents).isEmpty(); + } + + @Test + @DisplayName("FAILED 상태로 변경하면 retryCount가 증가한다") + void failed_event_increments_retry_count() { + // given + OutboxEventEntity event = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + + // when + event.markFailed("Connection refused"); + outboxEventJpaRepository.save(event); + + // then + OutboxEventEntity found = outboxEventJpaRepository.findById(event.getId()).orElseThrow(); + assertThat(found.getStatus()).isEqualTo(OutboxStatus.FAILED); + assertThat(found.getRetryCount()).isEqualTo(1); + assertThat(found.getErrorMessage()).isEqualTo("Connection refused"); + } + + @Test + @DisplayName("재시도 대상 이벤트를 조회할 수 있다") + void find_retryable_events() { + // given + OutboxEventEntity event = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + event.markFailed("timeout"); + outboxEventJpaRepository.save(event); + + // when + List retryable = outboxEventJpaRepository.findRetryableEvents(5, 50); + + // then + assertThat(retryable).hasSize(1); + assertThat(retryable.get(0).getRetryCount()).isEqualTo(1); + } + + @Test + @DisplayName("maxRetry 초과한 이벤트는 재시도 대상에서 제외된다") + void exceeded_max_retry_excluded() { + // given + OutboxEventEntity event = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + // 5회 실패 + for (int i = 0; i < 5; i++) { + event.markFailed("fail " + i); + } + outboxEventJpaRepository.save(event); + + // when + List retryable = outboxEventJpaRepository.findRetryableEvents(5, 50); + + // then + assertThat(retryable).isEmpty(); + } + + @Test + @DisplayName("payload가 JSON으로 직렬화되어 저장된다") + void payload_serialized_as_json() { + // given + record TestPayload(Long orderId, Long userId, int totalAmount) {} + TestPayload payload = new TestPayload(1L, 1L, 29900); + + // when + OutboxEventEntity saved = outboxEventService.save( + "ORDER", 1L, "OrderConfirmedEvent", payload, + "order-events-v1", "1" + ); + + // then + assertThat(saved.getPayload()).contains("\"orderId\":1"); + assertThat(saved.getPayload()).contains("\"totalAmount\":29900"); + } + + @Test + @DisplayName("markProcessing 호출 시 상태가 PROCESSING으로 변경된다") + void mark_processing_changes_status() { + // given + OutboxEventEntity event = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + assertThat(event.getStatus()).isEqualTo(OutboxStatus.PENDING); + + // when + event.markProcessing(); + outboxEventJpaRepository.save(event); + + // then + OutboxEventEntity found = outboxEventJpaRepository.findById(event.getId()).orElseThrow(); + assertThat(found.getStatus()).isEqualTo(OutboxStatus.PROCESSING); + } + + @Test + @DisplayName("PROCESSING 상태 이벤트를 조회할 수 있다") + void find_processing_events() { + // given + OutboxEventEntity event1 = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + OutboxEventEntity event2 = outboxEventService.save("PRODUCT", 100L, "ProductLikedEvent", + Map.of("productId", 100L), "catalog-events-v1", "100"); + + event1.markProcessing(); + outboxEventJpaRepository.save(event1); + + // when + List processingEvents = outboxEventJpaRepository.findProcessingEvents(50); + + // then + assertThat(processingEvents).hasSize(1); + assertThat(processingEvents.get(0).getId()).isEqualTo(event1.getId()); + assertThat(processingEvents.get(0).getStatus()).isEqualTo(OutboxStatus.PROCESSING); + } + + @Test + @DisplayName("PROCESSING 상태는 PENDING 조회에서 제외된다") + void processing_excluded_from_pending_query() { + // given + OutboxEventEntity event1 = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + OutboxEventEntity event2 = outboxEventService.save("PRODUCT", 100L, "ProductLikedEvent", + Map.of("productId", 100L), "catalog-events-v1", "100"); + + event1.markProcessing(); + outboxEventJpaRepository.save(event1); + + // when + List pendingEvents = outboxEventJpaRepository.findPendingEvents(50); + + // then + assertThat(pendingEvents).hasSize(1); + assertThat(pendingEvents.get(0).getId()).isEqualTo(event2.getId()); + } + + @Test + @DisplayName("findPendingEventsForUpdate는 PENDING 상태만 조회한다") + void find_pending_events_for_update() { + // given + OutboxEventEntity pending = outboxEventService.save("ORDER", 1L, "OrderConfirmedEvent", + Map.of("orderId", 1L), "order-events-v1", "1"); + OutboxEventEntity processing = outboxEventService.save("ORDER", 2L, "OrderConfirmedEvent", + Map.of("orderId", 2L), "order-events-v1", "2"); + processing.markProcessing(); + outboxEventJpaRepository.save(processing); + + // when + List result = outboxEventJpaRepository.findPendingEventsForUpdate(50); + + // then + assertThat(result).hasSize(1); + assertThat(result.get(0).getId()).isEqualTo(pending.getId()); + assertThat(result.get(0).getStatus()).isEqualTo(OutboxStatus.PENDING); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxGracefulShutdownTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxGracefulShutdownTest.java new file mode 100644 index 000000000..ba29ab14a --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxGracefulShutdownTest.java @@ -0,0 +1,109 @@ +package com.loopers.infrastructure.outbox; + +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.transaction.support.TransactionTemplate; + +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Graceful Shutdown 및 복구 로직 통합 테스트 + */ +@SpringBootTest +@DisplayName("OutboxRelayService — Graceful Shutdown 및 복구 테스트") +class OutboxGracefulShutdownTest { + + @Autowired + private OutboxEventService outboxEventService; + + @Autowired + private OutboxEventJpaRepository repository; + + @Autowired + private OutboxRelayService relayService; + + @Autowired + private TransactionTemplate transactionTemplate; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("Graceful Shutdown 시 PROCESSING → PENDING 복원") + void gracefulShutdownRecoversPROCESSINGtoPENDING() { + // given: 10개 이벤트 생성 + for (int i = 1; i <= 10; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: PENDING → PROCESSING 전환 + transactionTemplate.executeWithoutResult(status -> { + List pending = repository.findPendingEventsForUpdate(50); + pending.forEach(OutboxEventEntity::markProcessing); + repository.saveAll(pending); + }); + + // then: PROCESSING 10개 + assertThat(repository.countByStatus(OutboxStatus.PROCESSING)).isEqualTo(10); + + // when: Graceful Shutdown 호출 + relayService.onShutdown(); + + // then: PROCESSING → PENDING 복원됨 + assertThat(repository.countByStatus(OutboxStatus.PROCESSING)).isEqualTo(0); + assertThat(repository.countByStatus(OutboxStatus.PENDING)).isEqualTo(10); + } + + @Test + @DisplayName("5분 이상 PROCESSING 이벤트 복구") + void recoverStalledProcessingEvents() { + // given: PROCESSING 이벤트 생성 (5분 이상 경과 시뮬레이션 어려움) + // 실제 운영에서는 updated_at이 5분 이전인 이벤트를 찾아서 복구 + // 이 테스트는 복구 메서드 호출만 검증 + + // when + relayService.recoverStalledProcessingEvents(); + + // then: 에러 없이 실행됨 + assertThat(repository.countByStatus(OutboxStatus.PENDING)).isEqualTo(0); + } + + @Test + @DisplayName("Shutdown 플래그 설정 시 Phase 2 건너뜀") + void phase2SkipsWhenShuttingDown() { + // given: PROCESSING 이벤트 생성 + for (int i = 1; i <= 5; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + transactionTemplate.executeWithoutResult(status -> { + List pending = repository.findPendingEventsForUpdate(50); + pending.forEach(OutboxEventEntity::markProcessing); + repository.saveAll(pending); + }); + + // when: Shutdown 플래그 설정 + relayService.onShutdown(); + + // when: Phase 2 호출 시도 (shuttingDown=true이므로 스킵) + relayService.publishProcessingEvents(); + + // then: PROCESSING 그대로 유지 (Phase 2 스킵됨) + assertThat(repository.countByStatus(OutboxStatus.PROCESSING)).isEqualTo(0); // 이미 복원됨 + assertThat(repository.countByStatus(OutboxStatus.PENDING)).isEqualTo(5); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxMetricsTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxMetricsTest.java new file mode 100644 index 000000000..af6925096 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxMetricsTest.java @@ -0,0 +1,109 @@ +package com.loopers.infrastructure.outbox; + +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.time.ZonedDateTime; +import java.util.Optional; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +@DisplayName("OutboxMetrics — 메트릭 수집 테스트") +class OutboxMetricsTest { + + private MeterRegistry registry; + + @Mock + private OutboxEventJpaRepository repository; + + private OutboxMetrics metrics; + + @BeforeEach + void setUp() { + MockitoAnnotations.openMocks(this); + registry = new SimpleMeterRegistry(); + metrics = new OutboxMetrics(registry, repository); + } + + @Test + @DisplayName("발행 성공 카운터가 증가한다") + void recordPublishSuccess_increments_counter() { + // when + metrics.recordPublishSuccess(); + metrics.recordPublishSuccess(); + metrics.recordPublishSuccess(); + + // then + double count = registry.counter("outbox.publish.success").count(); + assertThat(count).isEqualTo(3.0); + } + + @Test + @DisplayName("발행 실패 카운터가 증가한다") + void recordPublishFailure_increments_counter() { + // when + metrics.recordPublishFailure(); + metrics.recordPublishFailure(); + + // then + double count = registry.counter("outbox.publish.failed").count(); + assertThat(count).isEqualTo(2.0); + } + + @Test + @DisplayName("Phase 1 처리 시간을 기록한다") + void recordPhase1Duration() { + // when + metrics.recordPhase1Duration(150); + metrics.recordPhase1Duration(200); + + // then + double totalTime = registry.timer("outbox.phase1.duration").totalTime(java.util.concurrent.TimeUnit.MILLISECONDS); + assertThat(totalTime).isEqualTo(350.0); + } + + @Test + @DisplayName("Phase 2 처리 시간을 기록한다") + void recordPhase2Duration() { + // when + metrics.recordPhase2Duration(500); + metrics.recordPhase2Duration(300); + + // then + double totalTime = registry.timer("outbox.phase2.duration").totalTime(java.util.concurrent.TimeUnit.MILLISECONDS); + assertThat(totalTime).isEqualTo(800.0); + } + + @Test + @DisplayName("PENDING 개수 Gauge가 등록되어 있다") + void pendingCountGaugeIsRegistered() { + // when + when(repository.countByStatus(OutboxStatus.PENDING)).thenReturn(5L); + + // then + assertThat(registry.find("outbox.pending.count").gauge()).isNotNull(); + } + + @Test + @DisplayName("PROCESSING 개수 Gauge가 등록되어 있다") + void processingCountGaugeIsRegistered() { + // when + when(repository.countByStatus(OutboxStatus.PROCESSING)).thenReturn(3L); + + // then + assertThat(registry.find("outbox.processing.count").gauge()).isNotNull(); + } + + @Test + @DisplayName("모든 타이머가 등록되어 있다") + void timersAreRegistered() { + assertThat(registry.find("outbox.phase1.duration").timer()).isNotNull(); + assertThat(registry.find("outbox.phase2.duration").timer()).isNotNull(); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxPerformanceTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxPerformanceTest.java new file mode 100644 index 000000000..c7dce0ef2 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxPerformanceTest.java @@ -0,0 +1,178 @@ +package com.loopers.infrastructure.outbox; + +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Outbox Relay 성능 테스트 + * + * 목적: + * 1. 실제 처리량 측정 (건/초) + * 2. Phase 1/2 처리 시간 측정 + * 3. 이론값 vs 실측값 비교 + */ +@SpringBootTest +@DisplayName("OutboxRelayService — 성능 테스트") +class OutboxPerformanceTest { + + private static final Logger log = LoggerFactory.getLogger(OutboxPerformanceTest.class); + + @Autowired + private OutboxEventService outboxEventService; + + @Autowired + private OutboxEventJpaRepository repository; + + @Autowired + private OutboxRelayService relayService; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("1000건 이벤트 처리 성능 측정") + void measure_throughput_for_1000_events() throws InterruptedException { + // given: 1000개 이벤트 생성 + int totalEvents = 1000; + log.info("=== 1000건 이벤트 생성 시작 ==="); + long createStart = System.currentTimeMillis(); + + for (int i = 1; i <= totalEvents; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i % 10)); // 10개 파티션 + } + + long createDuration = System.currentTimeMillis() - createStart; + log.info("이벤트 생성 완료: {}ms", createDuration); + + // when: Relay 실행 (Phase 1 + Phase 2 반복) + log.info("=== Relay 시작 ==="); + long relayStart = System.currentTimeMillis(); + + int maxIterations = 100; // 최대 100번 (100초) + int iteration = 0; + + while (iteration < maxIterations) { + // Phase 1: PENDING → PROCESSING + long phase1Start = System.currentTimeMillis(); + relayService.markPendingAsProcessing(); + long phase1Duration = System.currentTimeMillis() - phase1Start; + + // Phase 2: PROCESSING → Kafka (실제로는 Kafka 없이 상태만 변경) + long phase2Start = System.currentTimeMillis(); + // relayService.publishProcessingEvents(); // Kafka 없으면 주석 처리 + long phase2Duration = System.currentTimeMillis() - phase2Start; + + long pendingCount = repository.countByStatus(OutboxStatus.PENDING); + long processingCount = repository.countByStatus(OutboxStatus.PROCESSING); + + if (iteration % 10 == 0) { + log.info("Iteration {}: Phase1={}ms, Phase2={}ms, PENDING={}, PROCESSING={}", + iteration, phase1Duration, phase2Duration, pendingCount, processingCount); + } + + // PENDING이 0이면 종료 + if (pendingCount == 0 && processingCount >= totalEvents) { + break; + } + + iteration++; + TimeUnit.MILLISECONDS.sleep(100); // 0.1초 대기 (실제는 1초) + } + + long relayEnd = System.currentTimeMillis(); + long totalDuration = relayEnd - relayStart; + + // then: 결과 측정 + long processingCount = repository.countByStatus(OutboxStatus.PROCESSING); + + double throughputPerSecond = (double) processingCount / (totalDuration / 1000.0); + double avgIterationTime = (double) totalDuration / iteration; + + log.info("=== 성능 측정 결과 ==="); + log.info("총 이벤트: {}", totalEvents); + log.info("처리된 이벤트: {}", processingCount); + log.info("총 소요 시간: {}ms ({}초)", totalDuration, totalDuration / 1000.0); + log.info("총 반복 횟수: {}", iteration); + log.info("평균 반복 시간: {}ms", avgIterationTime); + log.info("실측 처리량: {} 건/초", throughputPerSecond); + log.info("이론 처리량: 500건/초 (1초 × 500건 배치)"); + + assertThat(processingCount).isGreaterThanOrEqualTo((long) (totalEvents * 0.9)); // 90% 이상 처리 + } + + @Test + @DisplayName("Phase 1 처리 시간 측정 (배치 500건)") + void measure_phase1_duration() { + // given: 500개 이벤트 생성 + for (int i = 1; i <= 500; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: Phase 1 실행 10회 + long totalDuration = 0; + int iterations = 10; + + for (int i = 0; i < iterations; i++) { + long start = System.currentTimeMillis(); + relayService.markPendingAsProcessing(); + long duration = System.currentTimeMillis() - start; + totalDuration += duration; + + log.info("Phase 1 - Iteration {}: {}ms", i + 1, duration); + } + + // then: 평균 시간 계산 + double avgDuration = (double) totalDuration / iterations; + log.info("=== Phase 1 평균 처리 시간 ==="); + log.info("평균: {}ms", avgDuration); + log.info("최대: {}ms (단일 측정)", totalDuration / iterations); + + assertThat(avgDuration).isLessThan(1000); // 1초 이내 + } + + @Test + @DisplayName("배치 크기별 처리 시간 비교") + void compare_batch_sizes() { + int[] batchSizes = {50, 100, 200, 500}; + + log.info("=== 배치 크기별 처리 시간 비교 ==="); + + for (int batchSize : batchSizes) { + databaseCleanUp.truncateAllTables(); + + // given: batchSize만큼 이벤트 생성 + for (int i = 1; i <= batchSize; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: Phase 1 실행 + long start = System.currentTimeMillis(); + relayService.markPendingAsProcessing(); + long duration = System.currentTimeMillis() - start; + + long processingCount = repository.countByStatus(OutboxStatus.PROCESSING); + + log.info("배치 크기: {}, 처리 시간: {}ms, 처리된 이벤트: {}", + batchSize, duration, processingCount); + } + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayPerformanceE2ETest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayPerformanceE2ETest.java new file mode 100644 index 000000000..a0b4b46d9 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayPerformanceE2ETest.java @@ -0,0 +1,350 @@ +package com.loopers.infrastructure.outbox; + +import com.loopers.infrastructure.event.EventHandledEntity; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.mockito.Mockito; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.scheduling.annotation.ScheduledAnnotationBeanPostProcessor; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; + +/** + * Outbox Relay E2E 성능 테스트 — Phase 1 + Phase 2 (실제 Kafka 발행) + * + * 전제: + * - Docker Kafka가 localhost:19092에서 실행 중이어야 함 + * - docker compose -f ./docker/infra-compose.yml up -d + */ +@SpringBootTest +@TestPropertySource(properties = { + "kafka.topic.catalog-events.name=catalog-events-v1", + "kafka.topic.catalog-events.partitions=3", + "kafka.topic.catalog-events.replicas=1", + "kafka.topic.catalog-events.min-insync-replicas=1", + "kafka.topic.order-events.name=order-events-v1", + "kafka.topic.order-events.partitions=3", + "kafka.topic.order-events.replicas=1", + "kafka.topic.order-events.min-insync-replicas=1", + "kafka.topic.coupon-issue-requests.name=coupon-issue-requests-v1", + "kafka.topic.coupon-issue-requests.partitions=3", + "kafka.topic.coupon-issue-requests.replicas=1", + "kafka.topic.coupon-issue-requests.min-insync-replicas=1", + "kafka.topic.user-activity-events.name=user-activity-events-v1", + "kafka.topic.user-activity-events.partitions=3", + "kafka.topic.user-activity-events.replicas=1", + "kafka.topic.user-activity-events.min-insync-replicas=1", + "kafka.topic.user-activity-events.retention-ms=259200000", + "kafka.topic.pipeline-dlq.name=pipeline-dlq-v1", + "kafka.topic.pipeline-dlq.partitions=1", + "kafka.topic.pipeline-dlq.replicas=1", + "kafka.topic.pipeline-dlq.min-insync-replicas=1", + "kafka.topic.pipeline-dlq.retention-ms=2592000000" +}) +@DisplayName("Outbox Relay — E2E 성능 테스트") +class OutboxRelayPerformanceE2ETest { + + private static final Logger log = LoggerFactory.getLogger(OutboxRelayPerformanceE2ETest.class); + + @Autowired + private OutboxEventService outboxEventService; + + @Autowired + private OutboxEventJpaRepository repository; + + @Autowired + private OutboxRelayService relayService; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @Autowired + private EventHandledJpaRepository eventHandledRepository; + + @Autowired + private ScheduledAnnotationBeanPostProcessor scheduledProcessor; + + @BeforeEach + void setUp() { + // 스케줄러 비활성화 — 테스트와 @Scheduled 메서드 충돌 방지 + scheduledProcessor.destroy(); + databaseCleanUp.truncateAllTables(); + } + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("시나리오 1: Relay 기준선 — 1000건 Phase 1 + Phase 2") + void scenario1_relay_baseline_1000_events() { + int totalEvents = 1000; + + // Given: 1000건 INSERT (10 partition key x 100건) + long insertStart = System.nanoTime(); + for (int pk = 0; pk < 10; pk++) { + for (int i = 0; i < 100; i++) { + outboxEventService.save( + "PRODUCT", (long) pk, + "TestEvent", Map.of("pk", pk, "seq", i), + "catalog-events-v1", String.valueOf(pk) + ); + } + } + long insertDuration = (System.nanoTime() - insertStart) / 1_000_000; + + assertThat(repository.countByStatus(OutboxStatus.PENDING)).isEqualTo(totalEvents); + log.info("=== 시나리오 1: Relay 기준선 ==="); + log.info("1000건 INSERT: {}ms", insertDuration); + + // Phase 1: PENDING → PROCESSING + List phase1Times = new ArrayList<>(); + int phase1Batch = 0; + while (repository.countByStatus(OutboxStatus.PENDING) > 0) { + phase1Batch++; + long start = System.nanoTime(); + relayService.markPendingAsProcessing(); + long duration = (System.nanoTime() - start) / 1_000_000; + phase1Times.add(duration); + } + long phase1Total = phase1Times.stream().mapToLong(Long::longValue).sum(); + + // Phase 2: PROCESSING → Kafka 발행 → PUBLISHED + List phase2Times = new ArrayList<>(); + int phase2Batch = 0; + while (repository.countByStatus(OutboxStatus.PROCESSING) > 0) { + phase2Batch++; + long start = System.nanoTime(); + relayService.publishProcessingEvents(); + long duration = (System.nanoTime() - start) / 1_000_000; + phase2Times.add(duration); + } + long phase2Total = phase2Times.stream().mapToLong(Long::longValue).sum(); + + // 결과 출력 + log.info("--- Phase 1 ---"); + for (int i = 0; i < phase1Times.size(); i++) { + log.info("Phase 1 ({}차): {}ms (500건)", i + 1, phase1Times.get(i)); + } + log.info("Phase 1 합계: {}ms", phase1Total); + + log.info("--- Phase 2 ---"); + for (int i = 0; i < phase2Times.size(); i++) { + log.info("Phase 2 ({}차): {}ms (500건)", i + 1, phase2Times.get(i)); + } + log.info("Phase 2 합계: {}ms", phase2Total); + + long total = phase1Total + phase2Total; + double throughput = (double) totalEvents / total * 1000; + long published = repository.countByStatus(OutboxStatus.PUBLISHED); + + log.info("--- 종합 ---"); + log.info("전체: {}ms", total); + log.info("실제 처리량: {} events/sec", String.format("%.1f", throughput)); + log.info("PUBLISHED: {}건", published); + + // 검증 + assertThat(published).isEqualTo(totalEvents); + } + + @Test + @DisplayName("시나리오 2: burst 5000건 — Relay 소화 시간") + void scenario2_burst_5000_events() { + int totalEvents = 5000; + + // Given: 5000건 한꺼번에 INSERT + long insertStart = System.nanoTime(); + for (int i = 0; i < totalEvents; i++) { + outboxEventService.save( + "COUPON", (long) (i % 10), + "CouponIssueRequestedEvent", Map.of("couponId", i), + "coupon-issue-requests-v1", String.valueOf(i % 10) + ); + } + long insertDuration = (System.nanoTime() - insertStart) / 1_000_000; + + log.info("=== 시나리오 2: burst 5000건 ==="); + log.info("5000건 INSERT: {}ms", insertDuration); + + // When: Phase 1 + Phase 2 반복 + long relayStart = System.nanoTime(); + int cycles = 0; + + while (repository.countByStatus(OutboxStatus.PENDING) > 0 + || repository.countByStatus(OutboxStatus.PROCESSING) > 0) { + relayService.markPendingAsProcessing(); + relayService.publishProcessingEvents(); + cycles++; + + long pending = repository.countByStatus(OutboxStatus.PENDING); + long processing = repository.countByStatus(OutboxStatus.PROCESSING); + log.info("Cycle {}: PENDING={}, PROCESSING={}", cycles, pending, processing); + } + + long totalDuration = (System.nanoTime() - relayStart) / 1_000_000; + long published = repository.countByStatus(OutboxStatus.PUBLISHED); + double throughput = (double) totalEvents / totalDuration * 1000; + + log.info("--- 종합 ---"); + log.info("전체 소화: {}ms, {} cycles", totalDuration, cycles); + log.info("실제 처리량: {} events/sec", String.format("%.1f", throughput)); + log.info("PUBLISHED: {}건", published); + + // 검증 + assertThat(published).isEqualTo(totalEvents); + } + + @Test + @DisplayName("시나리오 3: Kafka 지연 시 Phase 2 영향 — 5분 threshold 도달 조건") + void scenario3_kafka_latency_phase2_impact() { + // KafkaTemplate을 spy로 감싸서 send()에 지연 주입 + @SuppressWarnings("unchecked") + KafkaTemplate originalTemplate = + (KafkaTemplate) ReflectionTestUtils.getField(relayService, "kafkaTemplate"); + KafkaTemplate spyTemplate = Mockito.spy(originalTemplate); + + // 건당 지연 시뮬레이션: Case별로 실행 + int[] delaysMs = {0, 50, 200}; + for (int delayMs : delaysMs) { + databaseCleanUp.truncateAllTables(); + + // 500건 INSERT (1배치) + for (int pk = 0; pk < 10; pk++) { + for (int i = 0; i < 50; i++) { + outboxEventService.save( + "PRODUCT", (long) pk, + "TestEvent", Map.of("pk", pk, "seq", i), + "catalog-events-v1", String.valueOf(pk) + ); + } + } + + // 지연 주입 + if (delayMs > 0) { + doAnswer(invocation -> { + Thread.sleep(delayMs); + return invocation.callRealMethod(); + }).when(spyTemplate).send(any(ProducerRecord.class)); + ReflectionTestUtils.setField(relayService, "kafkaTemplate", spyTemplate); + } else { + ReflectionTestUtils.setField(relayService, "kafkaTemplate", originalTemplate); + } + + // Phase 1 + relayService.markPendingAsProcessing(); + + // Phase 2 측정 + long phase2Start = System.nanoTime(); + relayService.publishProcessingEvents(); + long phase2Duration = (System.nanoTime() - phase2Start) / 1_000_000; + + long published = repository.countByStatus(OutboxStatus.PUBLISHED); + long failed = repository.countByStatus(OutboxStatus.FAILED); + + log.info("=== 시나리오 3: 건당 {}ms 지연 ===", delayMs); + log.info("Phase 2 소요: {}ms (500건)", phase2Duration); + log.info("Phase 2 소요: {}초", String.format("%.1f", phase2Duration / 1000.0)); + log.info("PUBLISHED: {}건, FAILED: {}건", published, failed); + log.info("5분(300초) 대비: {}%", String.format("%.1f", phase2Duration / 3000.0)); + + // 500건 burst에서 건당 200ms면 → parallelStream 병렬도에 따라 달라짐 + // commonPool 크기 = CPU-1. 10개 partition key면 병렬도 ~10이지만 commonPool 제한 + } + + // 원본 복원 + ReflectionTestUtils.setField(relayService, "kafkaTemplate", originalTemplate); + } + + @Test + @DisplayName("시나리오 4: Consumer 멱등성 오버헤드 — event_handled SELECT + INSERT 비용") + void scenario4_consumer_idempotency_overhead() { + int totalEvents = 1000; + + log.info("=== 시나리오 4: Consumer 멱등성 오버헤드 측정 ==="); + + // Case A: event_handled가 비어있을 때 existsByEventId (miss) + save + long checkMissTotal = 0; + long saveTotal = 0; + + for (int i = 0; i < totalEvents; i++) { + String eventId = "perf-test-event-" + i; + + // existsByEventId (MISS — 존재하지 않음) + long checkStart = System.nanoTime(); + boolean exists = eventHandledRepository.existsByEventId(eventId); + long checkDuration = System.nanoTime() - checkStart; + checkMissTotal += checkDuration; + + assertThat(exists).isFalse(); + + // save (INSERT) + long saveStart = System.nanoTime(); + eventHandledRepository.save(EventHandledEntity.of(eventId, "catalog-events-v1")); + long saveDuration = System.nanoTime() - saveStart; + saveTotal += saveDuration; + } + + double avgCheckMissMs = (checkMissTotal / 1_000_000.0) / totalEvents; + double avgSaveMs = (saveTotal / 1_000_000.0) / totalEvents; + + log.info("--- Case A: 신규 이벤트 (MISS → INSERT) ---"); + log.info("existsByEventId (MISS) 평균: {}ms/건", String.format("%.3f", avgCheckMissMs)); + log.info("save (INSERT) 평균: {}ms/건", String.format("%.3f", avgSaveMs)); + log.info("멱등성 오버헤드 합계 (MISS+INSERT): {}ms/건", String.format("%.3f", avgCheckMissMs + avgSaveMs)); + log.info("1000건 기준 총 오버헤드: {}ms", String.format("%.1f", (checkMissTotal + saveTotal) / 1_000_000.0)); + + // Case B: event_handled가 가득 찬 상태에서 existsByEventId (HIT) + long checkHitTotal = 0; + + for (int i = 0; i < totalEvents; i++) { + String eventId = "perf-test-event-" + i; + + long checkStart = System.nanoTime(); + boolean exists = eventHandledRepository.existsByEventId(eventId); + long checkDuration = System.nanoTime() - checkStart; + checkHitTotal += checkDuration; + + assertThat(exists).isTrue(); + } + + double avgCheckHitMs = (checkHitTotal / 1_000_000.0) / totalEvents; + + log.info("--- Case B: 중복 이벤트 (HIT → SKIP) ---"); + log.info("existsByEventId (HIT) 평균: {}ms/건", String.format("%.3f", avgCheckHitMs)); + log.info("1000건 기준 총 오버헤드: {}ms", String.format("%.1f", checkHitTotal / 1_000_000.0)); + + // Case C: 테이블에 대량 레코드가 있을 때 성능 (UNIQUE 인덱스 효과) + log.info("--- Case C: UNIQUE 인덱스 효과 ---"); + log.info("event_handled 레코드 수: {}건", eventHandledRepository.count()); + log.info("HIT 시 SKIP 비용(INSERT 없음): {}ms/건 — MISS 대비 {}% 절감", + String.format("%.3f", avgCheckHitMs), + String.format("%.1f", (1 - avgCheckHitMs / (avgCheckMissMs + avgSaveMs)) * 100)); + + // 종합: Relay 처리량 대비 Consumer 멱등성 오버헤드 비율 + double relay95ThroughputMs = 1000.0 / 95.3; // 시나리오 1 기준 ~10.5ms/건 + double idempotencyOverheadMs = avgCheckMissMs + avgSaveMs; + log.info("--- 종합 ---"); + log.info("Relay 처리 시간: {}ms/건 (95.3건/초 기준)", String.format("%.3f", relay95ThroughputMs)); + log.info("멱등성 오버헤드: {}ms/건", String.format("%.3f", idempotencyOverheadMs)); + log.info("오버헤드 비율: {}%", String.format("%.1f", idempotencyOverheadMs / relay95ThroughputMs * 100)); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayServiceIntegrationTest.java b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayServiceIntegrationTest.java new file mode 100644 index 000000000..989569c13 --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/infrastructure/outbox/OutboxRelayServiceIntegrationTest.java @@ -0,0 +1,237 @@ +package com.loopers.infrastructure.outbox; + +import com.loopers.utils.DatabaseCleanUp; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.transaction.support.TransactionTemplate; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * OutboxRelayService 통합 테스트 — 멀티 스레드 중복 방지 검증 + * + * 핵심 검증 사항: + * 1. FOR UPDATE SKIP LOCKED가 멀티 스레드 환경에서 중복 조회를 방지하는가? + * 2. 2단계 Relay가 순차적으로 동작하는가? + * 3. Partition Key별 순서가 보장되는가? + */ +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class OutboxRelayServiceIntegrationTest { + + @Autowired + private OutboxEventService outboxEventService; + + @Autowired + private OutboxEventJpaRepository outboxEventJpaRepository; + + @Autowired + private TransactionTemplate transactionTemplate; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("멀티 스레드 환경에서 FOR UPDATE SKIP LOCKED가 중복 조회를 방지한다") + void for_update_skip_locked_prevents_duplicate_acquisition() throws InterruptedException { + // given: 100개의 PENDING 이벤트 생성 + for (int i = 1; i <= 100; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: 4개의 스레드가 동시에 PENDING → PROCESSING 전환 시도 + int threadCount = 4; + int batchSize = 50; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch latch = new CountDownLatch(threadCount); + Map> acquiredEventsByThread = new ConcurrentHashMap<>(); + + for (int threadId = 0; threadId < threadCount; threadId++) { + int finalThreadId = threadId; + executor.submit(() -> { + try { + List acquired = transactionTemplate.execute(status -> { + List events = + outboxEventJpaRepository.findPendingEventsForUpdate(batchSize); + events.forEach(OutboxEventEntity::markProcessing); + outboxEventJpaRepository.saveAll(events); + return events.stream().map(OutboxEventEntity::getId).toList(); + }); + acquiredEventsByThread.put(finalThreadId, acquired != null ? acquired : List.of()); + } finally { + latch.countDown(); + } + }); + } + + latch.await(10, TimeUnit.SECONDS); + executor.shutdown(); + + // then: 모든 이벤트가 정확히 1번씩만 조회되었는지 검증 + List allAcquiredIds = acquiredEventsByThread.values().stream() + .flatMap(List::stream) + .toList(); + + assertThat(allAcquiredIds).hasSize(100); // 100개 모두 조회됨 + assertThat(allAcquiredIds).doesNotHaveDuplicates(); // 중복 없음 + + // PROCESSING 상태로 전환된 이벤트 수 확인 + long processingCount = outboxEventJpaRepository.countByStatus(OutboxStatus.PROCESSING); + assertThat(processingCount).isEqualTo(100); + } + + @Test + @DisplayName("Phase 1과 Phase 2가 순차적으로 동작한다") + void two_phase_relay_works_sequentially() { + // given: 10개의 PENDING 이벤트 생성 + for (int i = 1; i <= 10; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: Phase 1 실행 (PENDING → PROCESSING) + transactionTemplate.executeWithoutResult(status -> { + List pending = outboxEventJpaRepository.findPendingEventsForUpdate(50); + pending.forEach(OutboxEventEntity::markProcessing); + outboxEventJpaRepository.saveAll(pending); + }); + + // then: PROCESSING 상태 확인 + long processingCount = outboxEventJpaRepository.countByStatus(OutboxStatus.PROCESSING); + assertThat(processingCount).isEqualTo(10); + + // when: Phase 2 실행 (PROCESSING → PUBLISHED 시뮬레이션) + transactionTemplate.executeWithoutResult(status -> { + List processing = outboxEventJpaRepository.findProcessingEvents(50); + processing.forEach(OutboxEventEntity::markPublished); + outboxEventJpaRepository.saveAll(processing); + }); + + // then: PUBLISHED 상태 확인 + long publishedCount = outboxEventJpaRepository.countByStatus(OutboxStatus.PUBLISHED); + assertThat(publishedCount).isEqualTo(10); + } + + @Test + @DisplayName("동일 Partition Key는 순차 처리되어야 한다") + void same_partition_key_processed_sequentially() { + // given: 같은 Partition Key로 5개 이벤트 생성 + String partitionKey = "order-123"; + List eventIds = new ArrayList<>(); + for (int i = 1; i <= 5; i++) { + OutboxEventEntity event = outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i, "seq", i), "order-events-v1", partitionKey); + eventIds.add(event.getId()); + } + + // when: PENDING → PROCESSING 전환 + transactionTemplate.executeWithoutResult(status -> { + List pending = outboxEventJpaRepository.findPendingEventsForUpdate(50); + pending.forEach(OutboxEventEntity::markProcessing); + outboxEventJpaRepository.saveAll(pending); + }); + + // then: PROCESSING 이벤트를 조회하면 생성 순서대로 나와야 함 + List processing = outboxEventJpaRepository.findProcessingEvents(50); + assertThat(processing).hasSize(5); + + for (int i = 0; i < 5; i++) { + assertThat(processing.get(i).getId()).isEqualTo(eventIds.get(i)); + assertThat(processing.get(i).getPartitionKey()).isEqualTo(partitionKey); + } + } + + @Test + @DisplayName("LIMIT보다 많은 이벤트가 있을 때 LIMIT만큼만 조회된다") + void respects_limit_when_more_events_exist() { + // given: 100개의 PENDING 이벤트 생성 + for (int i = 1; i <= 100; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: LIMIT 30으로 조회 + List result = transactionTemplate.execute(status -> + outboxEventJpaRepository.findPendingEventsForUpdate(30) + ); + + // then: 정확히 30개만 조회됨 + assertThat(result).hasSize(30); + } + + @Test + @DisplayName("멀티 스레드 환경에서 각 스레드는 서로 다른 이벤트를 획득한다") + void concurrent_threads_acquire_different_events() throws InterruptedException { + // given: 200개의 PENDING 이벤트 생성 + for (int i = 1; i <= 200; i++) { + outboxEventService.save("ORDER", (long) i, "OrderConfirmedEvent", + Map.of("orderId", i), "order-events-v1", String.valueOf(i)); + } + + // when: 5개의 스레드가 동시에 50개씩 조회 + int threadCount = 5; + int batchSize = 50; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch endLatch = new CountDownLatch(threadCount); + Map acquiredCountByThread = new ConcurrentHashMap<>(); + AtomicInteger totalAcquired = new AtomicInteger(0); + + for (int threadId = 0; threadId < threadCount; threadId++) { + int finalThreadId = threadId; + executor.submit(() -> { + try { + startLatch.await(); // 모든 스레드 동시 시작 + Integer count = transactionTemplate.execute(status -> { + List events = + outboxEventJpaRepository.findPendingEventsForUpdate(batchSize); + events.forEach(OutboxEventEntity::markProcessing); + outboxEventJpaRepository.saveAll(events); + return events.size(); + }); + acquiredCountByThread.put(finalThreadId, count != null ? count : 0); + totalAcquired.addAndGet(count != null ? count : 0); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + endLatch.countDown(); + } + }); + } + + startLatch.countDown(); // 모든 스레드 시작 + endLatch.await(10, TimeUnit.SECONDS); + executor.shutdown(); + + // then: 총 200개가 중복 없이 조회됨 + assertThat(totalAcquired.get()).isEqualTo(200); + + // 각 스레드가 최소 1개 이상은 획득했는지 확인 (공정성) + assertThat(acquiredCountByThread.values()).allMatch(count -> count > 0); + + // PROCESSING 상태 이벤트 수 확인 + long processingCount = outboxEventJpaRepository.countByStatus(OutboxStatus.PROCESSING); + assertThat(processingCount).isEqualTo(200); + } +} diff --git a/apps/commerce-api/src/test/java/com/loopers/interfaces/consumer/CouponIssueConsumerTest.java b/apps/commerce-api/src/test/java/com/loopers/interfaces/consumer/CouponIssueConsumerTest.java new file mode 100644 index 000000000..08a77c25b --- /dev/null +++ b/apps/commerce-api/src/test/java/com/loopers/interfaces/consumer/CouponIssueConsumerTest.java @@ -0,0 +1,170 @@ +package com.loopers.interfaces.consumer; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.domain.coupon.CouponTemplate; +import com.loopers.domain.coupon.CouponTemplateRepository; +import com.loopers.domain.coupon.DiscountType; +import com.loopers.domain.coupon.IssuedCouponRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestEntity; +import com.loopers.infrastructure.coupon.CouponIssueRequestJpaRepository; +import com.loopers.infrastructure.coupon.CouponIssueRequestStatus; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import com.loopers.utils.DatabaseCleanUp; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.apache.kafka.common.record.TimestampType; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DisplayNameGeneration; +import org.junit.jupiter.api.DisplayNameGenerator; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; + +import java.nio.charset.StandardCharsets; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * CouponIssueConsumer 통합 테스트 + * + * Consumer의 processRecord()를 직접 호출하여 검증: + * 1. 발급 성공 시 요청 이력이 ISSUED로 업데이트되는가 + * 2. 재고 소진 시 요청 이력이 FAILED로 업데이트되는가 + * 3. 멱등성 — 같은 이벤트를 2번 처리해도 1번만 발급되는가 + */ +@SpringBootTest +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class) +class CouponIssueConsumerTest { + + @Autowired + private CouponIssueConsumer couponIssueConsumer; + + @Autowired + private CouponTemplateRepository couponTemplateRepository; + + @Autowired + private IssuedCouponRepository issuedCouponRepository; + + @Autowired + private CouponIssueRequestJpaRepository couponIssueRequestRepository; + + @Autowired + private EventHandledJpaRepository eventHandledRepository; + + @Autowired + private ObjectMapper objectMapper; + + @Autowired + private DatabaseCleanUp databaseCleanUp; + + @AfterEach + void tearDown() { + databaseCleanUp.truncateAllTables(); + } + + @Test + @DisplayName("Consumer가 발급 요청을 처리하면 쿠폰이 발급되고 요청 이력이 ISSUED로 변경된다") + void 발급_성공() throws Exception { + // arrange + CouponTemplate template = createTemplate(100); + String eventId = UUID.randomUUID().toString(); + CouponIssueRequestEntity request = createRequest(template.getId(), 1L, eventId); + + ConsumerRecord record = buildRecord(request.getId(), template.getId(), 1L, eventId); + + // act + couponIssueConsumer.consume(List.of(record), () -> {}); + + // assert + CouponIssueRequestEntity updated = couponIssueRequestRepository.findById(request.getId()).orElseThrow(); + assertThat(updated.getStatus()).isEqualTo(CouponIssueRequestStatus.ISSUED); + assertThat(updated.getIssuedCouponId()).isNotNull(); + assertThat(updated.getProcessedAt()).isNotNull(); + + long issuedCount = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(issuedCount).isEqualTo(1); + + assertThat(eventHandledRepository.existsByEventId(eventId)).isTrue(); + } + + @Test + @DisplayName("재고 소진 시 요청 이력이 FAILED로 변경된다") + void 재고_소진_실패() throws Exception { + // arrange — 최대 1장 + CouponTemplate template = createTemplate(1); + + // 1장 먼저 발급 + String eventId1 = UUID.randomUUID().toString(); + CouponIssueRequestEntity request1 = createRequest(template.getId(), 1L, eventId1); + couponIssueConsumer.consume(List.of(buildRecord(request1.getId(), template.getId(), 1L, eventId1)), () -> {}); + + // 2번째 요청 — 재고 소진 + String eventId2 = UUID.randomUUID().toString(); + CouponIssueRequestEntity request2 = createRequest(template.getId(), 2L, eventId2); + + // act + couponIssueConsumer.consume(List.of(buildRecord(request2.getId(), template.getId(), 2L, eventId2)), () -> {}); + + // assert + CouponIssueRequestEntity updated = couponIssueRequestRepository.findById(request2.getId()).orElseThrow(); + assertThat(updated.getStatus()).isEqualTo(CouponIssueRequestStatus.FAILED); + assertThat(updated.getFailureReason()).isNotNull(); + assertThat(updated.getProcessedAt()).isNotNull(); + + long totalIssued = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(totalIssued).isEqualTo(1); + } + + @Test + @DisplayName("같은 eventId를 2번 처리해도 1번만 발급된다 — 멱등성") + void 멱등성_중복_방지() throws Exception { + // arrange + CouponTemplate template = createTemplate(100); + String eventId = UUID.randomUUID().toString(); + CouponIssueRequestEntity request = createRequest(template.getId(), 1L, eventId); + + ConsumerRecord record = buildRecord(request.getId(), template.getId(), 1L, eventId); + + // act — 같은 이벤트 2번 처리 + couponIssueConsumer.consume(List.of(record), () -> {}); + couponIssueConsumer.consume(List.of(record), () -> {}); + + // assert — 1번만 발급 + long issuedCount = issuedCouponRepository.countByCouponTemplateId(template.getId()); + assertThat(issuedCount).isEqualTo(1); + } + + private CouponTemplate createTemplate(int maxIssueCount) { + return couponTemplateRepository.save( + CouponTemplate.define("선착순쿠폰", "테스트용", DiscountType.FIXED, 1000, null, + 0, maxIssueCount, 1, + ZonedDateTime.now().minusDays(1), ZonedDateTime.now().plusDays(30)) + ); + } + + private CouponIssueRequestEntity createRequest(Long templateId, Long userId, String eventId) { + return couponIssueRequestRepository.save( + CouponIssueRequestEntity.create(templateId, userId, eventId)); + } + + private ConsumerRecord buildRecord(Long requestId, Long templateId, Long userId, String eventId) + throws Exception { + String payload = objectMapper.writeValueAsString( + new CouponIssuePayload(requestId, templateId, userId, eventId)); + + ConsumerRecord record = new ConsumerRecord<>( + "coupon-issue-requests-v1", 0, 0L, + String.valueOf(templateId), payload); + + record.headers().add(new RecordHeader("X-Event-Type", + "CouponIssueRequestedEvent".getBytes(StandardCharsets.UTF_8))); + + return record; + } + + record CouponIssuePayload(Long requestId, Long templateId, Long userId, String eventId) {} +} diff --git a/apps/commerce-api/src/test/resources/application-test.yml b/apps/commerce-api/src/test/resources/application-test.yml new file mode 100644 index 000000000..69992736c --- /dev/null +++ b/apps/commerce-api/src/test/resources/application-test.yml @@ -0,0 +1,36 @@ +# 테스트 환경 Kafka 토픽 설정 +# Docker Kafka 단일 브로커 (docker/infra-compose.yml) + +kafka: + topic: + catalog-events: + name: catalog-events-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + + order-events: + name: order-events-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + + coupon-issue-requests: + name: coupon-issue-requests-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + + user-activity-events: + name: user-activity-events-v1 + partitions: 3 + replicas: 1 + min-insync-replicas: 1 + retention-ms: 259200000 + + pipeline-dlq: + name: pipeline-dlq-v1 + partitions: 1 + replicas: 1 + min-insync-replicas: 1 + retention-ms: 2592000000 diff --git a/apps/commerce-streamer/src/main/java/com/loopers/application/metrics/CatalogMetricsProcessor.java b/apps/commerce-streamer/src/main/java/com/loopers/application/metrics/CatalogMetricsProcessor.java new file mode 100644 index 000000000..93b7b170c --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/application/metrics/CatalogMetricsProcessor.java @@ -0,0 +1,150 @@ +package com.loopers.application.metrics; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.loopers.infrastructure.event.EventHandledEntity; +import com.loopers.infrastructure.event.EventHandledJpaRepository; +import com.loopers.infrastructure.product.ProductMetricsEntity; +import com.loopers.infrastructure.product.ProductLikeCountJpaRepository; +import com.loopers.infrastructure.product.ProductMetricsJpaRepository; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +/** + * 카탈로그 메트릭 처리 — @Transactional 보장 + * + * Consumer(Interfaces)에서 분리된 비즈니스 처리 Bean. + * Consumer → Processor 위임으로 self-invocation 방지. + * + * Consumer가 this.processRecord()를 호출하면 @Transactional이 무시되지만, + * 별도 Bean인 Processor를 프록시를 통해 호출하면 @Transactional이 정상 동작. + * + * increment + event_handled INSERT가 같은 TX: + * → 하나 실패 → 전체 롤백 → 재처리 시 정합성 유지 + * → increment만 커밋되고 event_handled가 실패하는 시나리오 방지 + * + * 좋아요 파이프라인 단일화: + * product_metrics.like_count + products.like_count를 같은 TX에서 업데이트. + * LikeFacade에서 products.like_count 직접 증분을 제거하고, + * 이 Processor가 단일 파이프라인으로 두 테이블을 동기화한다. + */ +@Service +public class CatalogMetricsProcessor { + + private static final Logger log = LoggerFactory.getLogger(CatalogMetricsProcessor.class); + + private final ObjectMapper objectMapper; + private final ProductMetricsJpaRepository productMetricsRepository; + private final ProductLikeCountJpaRepository productLikeCountRepository; + private final EventHandledJpaRepository eventHandledRepository; + + public CatalogMetricsProcessor(ObjectMapper objectMapper, + ProductMetricsJpaRepository productMetricsRepository, + ProductLikeCountJpaRepository productLikeCountRepository, + EventHandledJpaRepository eventHandledRepository) { + this.objectMapper = objectMapper; + this.productMetricsRepository = productMetricsRepository; + this.productLikeCountRepository = productLikeCountRepository; + this.eventHandledRepository = eventHandledRepository; + } + + /** + * 메트릭 이벤트 처리 — 같은 TX에서 increment + 멱등성 기록 + * + * @return true = 처리됨, false = 중복 스킵 + */ + @Transactional + public boolean process(String eventType, String outboxId, String payload) { + // 멱등성 체크 — increment는 멱등하지 않으므로 반드시 중복 방지 + if (outboxId != null && eventHandledRepository.existsByEventId(outboxId)) { + log.warn("[MetricsProcessor] 중복 스킵 — outboxId={}", outboxId); + return false; + } + + JsonNode node; + try { + node = objectMapper.readTree(payload); + } catch (Exception e) { + log.error("[MetricsProcessor] JSON 파싱 실패 — payload={}", payload, e); + return false; + } + + switch (eventType) { + case "ProductViewedEvent" -> handleProductViewed(node); + case "ProductLikedEvent" -> handleProductLiked(node); + case "ProductUnlikedEvent" -> handleProductUnliked(node); + case "OrderItemSoldEvent" -> handleOrderItemSold(node); + default -> { + log.warn("[MetricsProcessor] 알 수 없는 eventType={}", eventType); + return false; + } + } + + // 멱등성 기록 — increment와 같은 TX (핵심!) + if (outboxId != null) { + eventHandledRepository.save(EventHandledEntity.of(outboxId, "catalog-events-v1")); + } + + return true; + } + + private void handleProductViewed(JsonNode node) { + Long productId = node.path("productId").asLong(); + ProductMetricsEntity metrics = getOrCreateMetrics(productId); + metrics.incrementViewCount(); + productMetricsRepository.save(metrics); + log.debug("[MetricsProcessor] 조회 수 집계 완료 — productId={}, viewCount={}", + productId, metrics.getViewCount()); + } + + private void handleProductLiked(JsonNode node) { + Long productId = node.path("productId").asLong(); + ProductMetricsEntity metrics = getOrCreateMetrics(productId); + metrics.incrementLikeCount(); + productMetricsRepository.save(metrics); + + // products.like_count도 같은 TX에서 업데이트 (파이프라인 단일화) + productLikeCountRepository.incrementLikeCount(productId); + + log.info("[MetricsProcessor] 좋아요 집계 완료 — productId={}, metricsLikeCount={}", + productId, metrics.getLikeCount()); + } + + private void handleProductUnliked(JsonNode node) { + Long productId = node.path("productId").asLong(); + ProductMetricsEntity metrics = getOrCreateMetrics(productId); + metrics.decrementLikeCount(); + productMetricsRepository.save(metrics); + + // products.like_count도 같은 TX에서 업데이트 (파이프라인 단일화) + productLikeCountRepository.decrementLikeCount(productId); + + log.info("[MetricsProcessor] 좋아요 취소 집계 완료 — productId={}, metricsLikeCount={}", + productId, metrics.getLikeCount()); + } + + private void handleOrderItemSold(JsonNode node) { + JsonNode productQtyMap = node.path("productQtyMap"); + if (productQtyMap.isMissingNode() || !productQtyMap.isObject()) { + log.warn("[MetricsProcessor] OrderItemSoldEvent에 productQtyMap 없음 — node={}", node); + return; + } + + productQtyMap.fields().forEachRemaining(entry -> { + Long productId = Long.parseLong(entry.getKey()); + int quantity = entry.getValue().asInt(1); + ProductMetricsEntity metrics = getOrCreateMetrics(productId); + metrics.addSalesCount(quantity); + productMetricsRepository.save(metrics); + log.info("[MetricsProcessor] 판매량 집계 완료 — productId={}, quantity={}, salesCount={}", + productId, quantity, metrics.getSalesCount()); + }); + } + + private ProductMetricsEntity getOrCreateMetrics(Long productId) { + return productMetricsRepository.findById(productId) + .orElseGet(() -> productMetricsRepository.save(ProductMetricsEntity.create(productId))); + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java new file mode 100644 index 000000000..a30604629 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/dlq/DlqPublisher.java @@ -0,0 +1,82 @@ +package com.loopers.infrastructure.dlq; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.kafka.support.SendResult; +import org.springframework.stereotype.Component; + +import java.nio.charset.StandardCharsets; +import java.time.ZonedDateTime; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * DLQ Publisher — 처리 실패한 메시지를 DLQ 토픽으로 격리 + * + * 동기 전송 (.get()) — DLQ 유실 방지 + * X-Retry-Count 헤더 포함 — 재시도 횟수 추적 + * 전송 결과 메타데이터 로깅 — DLQ 메시지 위치 추적 + */ +@Component +public class DlqPublisher { + + private static final Logger log = LoggerFactory.getLogger(DlqPublisher.class); + private static final String DLQ_TOPIC = "pipeline-dlq-v1"; + + private final KafkaTemplate kafkaTemplate; + + public DlqPublisher(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + public void sendToDlq(ConsumerRecord record, Exception exception) { + sendToDlq(record, exception, 0); + } + + public void sendToDlq(ConsumerRecord record, Exception exception, int retryCount) { + try { + String errorMsg = exception.getMessage() != null + ? exception.getMessage() + : exception.getClass().getSimpleName(); + + ProducerRecord dlqRecord = new ProducerRecord<>( + DLQ_TOPIC, null, record.key(), record.value()); + + dlqRecord.headers() + .add(new RecordHeader("X-Original-Topic", + record.topic().getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Original-Partition", + String.valueOf(record.partition()).getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Original-Offset", + String.valueOf(record.offset()).getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Error-Message", + errorMsg.getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Error-Timestamp", + ZonedDateTime.now().toString().getBytes(StandardCharsets.UTF_8))) + .add(new RecordHeader("X-Retry-Count", + String.valueOf(retryCount).getBytes(StandardCharsets.UTF_8))); + + // 동기 전송 — DLQ 유실 방지 + SendResult result = kafkaTemplate.send(dlqRecord).get(10, TimeUnit.SECONDS); + + var metadata = result.getRecordMetadata(); + log.warn("[DLQ] 메시지 격리 완료 — dlqPartition={}, dlqOffset={}, " + + "originalTopic={}, partition={}, offset={}, retryCount={}, error={}", + metadata.partition(), metadata.offset(), + record.topic(), record.partition(), record.offset(), retryCount, errorMsg); + + } catch (ExecutionException | TimeoutException e) { + log.error("[DLQ] DLQ 전송 실패! 메시지 유실 위험 — topic={}, partition={}, offset={}, error={}", + record.topic(), record.partition(), record.offset(), e.getMessage(), e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.error("[DLQ] DLQ 전송 중단 — topic={}, partition={}, offset={}", + record.topic(), record.partition(), record.offset()); + } + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java new file mode 100644 index 000000000..46256b195 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledEntity.java @@ -0,0 +1,51 @@ +package com.loopers.infrastructure.event; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.GeneratedValue; +import jakarta.persistence.GenerationType; +import jakarta.persistence.Id; +import jakarta.persistence.Index; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * 멱등성 테이블 — Consumer가 이미 처리한 이벤트를 기록 + * UNIQUE(event_id)로 DB 레벨 동시성 방어 + */ +@Entity +@Table(name = "event_handled", indexes = { + @Index(name = "idx_event_handled_event_id", columnList = "event_id", unique = true), + @Index(name = "idx_event_handled_topic_handled", columnList = "topic, handled_at") +}) +public class EventHandledEntity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "event_id", nullable = false, unique = true, length = 100) + private String eventId; + + @Column(nullable = false, length = 100) + private String topic; + + @Column(name = "handled_at", nullable = false) + private ZonedDateTime handledAt; + + protected EventHandledEntity() {} + + public static EventHandledEntity of(String eventId, String topic) { + EventHandledEntity entity = new EventHandledEntity(); + entity.eventId = eventId; + entity.topic = topic; + entity.handledAt = ZonedDateTime.now(); + return entity; + } + + public Long getId() { return id; } + public String getEventId() { return eventId; } + public String getTopic() { return topic; } + public ZonedDateTime getHandledAt() { return handledAt; } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java new file mode 100644 index 000000000..648543b5a --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/EventHandledJpaRepository.java @@ -0,0 +1,7 @@ +package com.loopers.infrastructure.event; + +import org.springframework.data.jpa.repository.JpaRepository; + +public interface EventHandledJpaRepository extends JpaRepository { + boolean existsByEventId(String eventId); +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/IdempotencyService.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/IdempotencyService.java new file mode 100644 index 000000000..d083f11bc --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/event/IdempotencyService.java @@ -0,0 +1,40 @@ +package com.loopers.infrastructure.event; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.dao.DataIntegrityViolationException; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +/** + * 멱등성 서비스 — Consumer에서 중복 메시지 방지 + * + * 비즈니스 로직과 같은 TX에서 호출해야 한다. + * → 비즈니스 실패 시 멱등 기록도 롤백 → 재처리 가능 + */ +@Service +public class IdempotencyService { + + private static final Logger log = LoggerFactory.getLogger(IdempotencyService.class); + + private final EventHandledJpaRepository eventHandledJpaRepository; + + public IdempotencyService(EventHandledJpaRepository eventHandledJpaRepository) { + this.eventHandledJpaRepository = eventHandledJpaRepository; + } + + public boolean isAlreadyHandled(String eventId) { + return eventHandledJpaRepository.existsByEventId(eventId); + } + + @Transactional + public boolean markHandled(String eventId, String topic) { + try { + eventHandledJpaRepository.save(EventHandledEntity.of(eventId, topic)); + return true; + } catch (DataIntegrityViolationException e) { + log.warn("[Idempotency] 중복 감지 (UNIQUE 위반) — eventId={}", eventId); + return false; + } + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountEntity.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountEntity.java new file mode 100644 index 000000000..543f42e86 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountEntity.java @@ -0,0 +1,31 @@ +package com.loopers.infrastructure.product; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.GeneratedValue; +import jakarta.persistence.GenerationType; +import jakarta.persistence.Id; +import jakarta.persistence.Table; + +/** + * products 테이블의 like_count 업데이트 전용 경량 Entity (Streamer용) + * + * ProductEntity(commerce-api)의 전체 필드를 매핑하지 않고, + * like_count 원자적 UPDATE에 필요한 최소 필드만 매핑한다. + */ +@Entity +@Table(name = "products") +public class ProductLikeCountEntity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + private Long id; + + @Column(name = "like_count", nullable = false) + private Integer likeCount; + + protected ProductLikeCountEntity() {} + + public Long getId() { return id; } + public Integer getLikeCount() { return likeCount; } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountJpaRepository.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountJpaRepository.java new file mode 100644 index 000000000..c41c01a47 --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductLikeCountJpaRepository.java @@ -0,0 +1,24 @@ +package com.loopers.infrastructure.product; + +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Modifying; +import org.springframework.data.jpa.repository.Query; + +/** + * 상품 좋아요 수 동기화 전용 Repository (Streamer용) + * + * product_metrics와 products.like_count를 같은 TX에서 업데이트하여 + * 이중 파이프라인 문제를 해소한다. + * + * 원자적 UPDATE — 비관적 락 불필요, 증감 연산만 수행. + */ +public interface ProductLikeCountJpaRepository extends JpaRepository { + + @Modifying + @Query("UPDATE ProductLikeCountEntity p SET p.likeCount = p.likeCount + 1 WHERE p.id = :productId") + int incrementLikeCount(Long productId); + + @Modifying + @Query("UPDATE ProductLikeCountEntity p SET p.likeCount = p.likeCount - 1 WHERE p.id = :productId AND p.likeCount > 0") + int decrementLikeCount(Long productId); +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java new file mode 100644 index 000000000..d424dfdac --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsEntity.java @@ -0,0 +1,71 @@ +package com.loopers.infrastructure.product; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; + +import java.time.ZonedDateTime; + +/** + * 상품 메트릭 집계 테이블 + * PK = product_id → 상품당 1 row → upsert로 집계 + */ +@Entity +@Table(name = "product_metrics") +public class ProductMetricsEntity { + + @Id + @Column(name = "product_id") + private Long productId; + + @Column(name = "view_count", nullable = false) + private long viewCount; + + @Column(name = "like_count", nullable = false) + private long likeCount; + + @Column(name = "sales_count", nullable = false) + private long salesCount; + + @Column(name = "updated_at", nullable = false) + private ZonedDateTime updatedAt; + + protected ProductMetricsEntity() {} + + public static ProductMetricsEntity create(Long productId) { + ProductMetricsEntity entity = new ProductMetricsEntity(); + entity.productId = productId; + entity.viewCount = 0; + entity.likeCount = 0; + entity.salesCount = 0; + entity.updatedAt = ZonedDateTime.now(); + return entity; + } + + public void incrementViewCount() { + this.viewCount++; + this.updatedAt = ZonedDateTime.now(); + } + + public void incrementLikeCount() { + this.likeCount++; + this.updatedAt = ZonedDateTime.now(); + } + + public void decrementLikeCount() { + if (this.likeCount > 0) this.likeCount--; + this.updatedAt = ZonedDateTime.now(); + } + + public void addSalesCount(int quantity) { + this.salesCount += quantity; + this.updatedAt = ZonedDateTime.now(); + } + + public Long getProductId() { return productId; } + public long getViewCount() { return viewCount; } + public long getLikeCount() { return likeCount; } + public long getSalesCount() { return salesCount; } + public ZonedDateTime getUpdatedAt() { return updatedAt; } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java new file mode 100644 index 000000000..4f8a260dd --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/infrastructure/product/ProductMetricsJpaRepository.java @@ -0,0 +1,6 @@ +package com.loopers.infrastructure.product; + +import org.springframework.data.jpa.repository.JpaRepository; + +public interface ProductMetricsJpaRepository extends JpaRepository { +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CatalogMetricsConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CatalogMetricsConsumer.java new file mode 100644 index 000000000..521b6dbdf --- /dev/null +++ b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/CatalogMetricsConsumer.java @@ -0,0 +1,74 @@ +package com.loopers.interfaces.consumer; + +import com.loopers.application.metrics.CatalogMetricsProcessor; +import com.loopers.infrastructure.dlq.DlqPublisher; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.Header; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.kafka.support.Acknowledgment; +import org.springframework.stereotype.Component; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +/** + * 카탈로그 메트릭 Consumer — 메시지 수신 + ACK + DLQ만 담당 + * + * Interfaces 레이어의 책임: "요청 수신" + * Controller가 HTTP 요청을 받아서 Facade에 위임하듯이, + * Consumer가 Kafka 메시지를 받아서 Processor에 위임한다. + * + * 비즈니스 처리는 CatalogMetricsProcessor(@Service)에 위임: + * → 프록시를 통한 호출 → @Transactional 정상 동작 + * → self-invocation 방지 → increment + event_handled 같은 TX 보장 + */ +@Component +public class CatalogMetricsConsumer { + + private static final Logger log = LoggerFactory.getLogger(CatalogMetricsConsumer.class); + + private final CatalogMetricsProcessor processor; + private final DlqPublisher dlqPublisher; + + public CatalogMetricsConsumer(CatalogMetricsProcessor processor, + DlqPublisher dlqPublisher) { + this.processor = processor; + this.dlqPublisher = dlqPublisher; + } + + @KafkaListener( + topics = "catalog-events-v1", + groupId = "catalog-metrics-group", + containerFactory = "BATCH_LISTENER_DEFAULT" + ) + public void consume(List> records, Acknowledgment ack) { + for (ConsumerRecord record : records) { + try { + String eventType = getHeader(record, "X-Event-Type"); + String outboxId = getHeader(record, "X-Outbox-Id"); + String payload = record.value().toString(); + + if (eventType == null) { + log.warn("[CatalogMetrics] X-Event-Type 헤더 없음 — partition={}, offset={}", + record.partition(), record.offset()); + continue; + } + + processor.process(eventType, outboxId, payload); + + } catch (Exception e) { + log.error("[CatalogMetrics] 처리 실패 → DLQ — partition={}, offset={}, error={}", + record.partition(), record.offset(), e.getMessage(), e); + dlqPublisher.sendToDlq(record, e); + } + } + ack.acknowledge(); + } + + private String getHeader(ConsumerRecord record, String headerName) { + Header header = record.headers().lastHeader(headerName); + return header != null ? new String(header.value(), StandardCharsets.UTF_8) : null; + } +} diff --git a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java b/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java deleted file mode 100644 index ba862cec6..000000000 --- a/apps/commerce-streamer/src/main/java/com/loopers/interfaces/consumer/DemoKafkaConsumer.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.loopers.interfaces.consumer; - -import com.loopers.confg.kafka.KafkaConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.springframework.kafka.annotation.KafkaListener; -import org.springframework.kafka.support.Acknowledgment; -import org.springframework.stereotype.Component; - -import java.util.List; - -@Component -public class DemoKafkaConsumer { - @KafkaListener( - topics = {"${demo-kafka.test.topic-name}"}, - containerFactory = KafkaConfig.BATCH_LISTENER - ) - public void demoListener( - List> messages, - Acknowledgment acknowledgment - ){ - System.out.println(messages); - acknowledgment.acknowledge(); - } -} diff --git a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java index a73842775..2694a6243 100644 --- a/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java +++ b/modules/kafka/src/main/java/com/loopers/confg/kafka/KafkaConfig.java @@ -54,7 +54,8 @@ public ByteArrayJsonMessageConverter jsonMessageConverter(ObjectMapper objectMap @Bean(name = BATCH_LISTENER) public ConcurrentKafkaListenerContainerFactory defaultBatchListenerContainerFactory( KafkaProperties kafkaProperties, - ByteArrayJsonMessageConverter converter + ByteArrayJsonMessageConverter converter, + KafkaTemplate kafkaTemplate ) { Map consumerConfig = new HashMap<>(kafkaProperties.buildConsumerProperties()); consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, MAX_POLLING_SIZE); @@ -70,6 +71,37 @@ public ConcurrentKafkaListenerContainerFactory defaultBatchListe factory.setBatchMessageConverter(new BatchMessagingMessageConverter(converter)); factory.setConcurrency(3); factory.setBatchListener(true); + + // Spring Kafka ErrorHandler 추가 — 재시도 없이 즉시 DLQ 격리 + // 배치 블로킹 방지: 한 레코드 실패 시 나머지 레코드 블로킹 없이 즉시 DLQ 전송 + factory.setCommonErrorHandler(createErrorHandler(kafkaTemplate)); + return factory; } + + /** + * Spring Kafka ErrorHandler 설정 + * + * 재시도 로직을 Consumer 코드에서 제거하고 ErrorHandler에 위임: + * - FixedBackOff(0, 0): 재시도 없이 즉시 DLQ + * - DeadLetterPublishingRecoverer: DLQ로 동기 전송 + * + * 장점: + * - 배치 블로킹 방지 (한 레코드 실패 시 나머지 레코드 블로킹 없음) + * - 처리량 유지 (MAX_POLLING_SIZE = 3000 유지 가능) + * - 일관된 에러 처리 (모든 Consumer에 적용) + */ + private org.springframework.kafka.listener.CommonErrorHandler createErrorHandler( + KafkaTemplate kafkaTemplate + ) { + var recoverer = new org.springframework.kafka.listener.DeadLetterPublishingRecoverer( + kafkaTemplate, + (record, ex) -> new org.apache.kafka.common.TopicPartition("pipeline-dlq-v1", -1) + ); + + return new org.springframework.kafka.listener.DefaultErrorHandler( + recoverer, + new org.springframework.util.backoff.FixedBackOff(0L, 0L) // 재시도 없이 즉시 DLQ + ); + } } diff --git a/modules/kafka/src/main/resources/kafka.yml b/modules/kafka/src/main/resources/kafka.yml index 9609dbf85..f26ad3ca5 100644 --- a/modules/kafka/src/main/resources/kafka.yml +++ b/modules/kafka/src/main/resources/kafka.yml @@ -14,7 +14,11 @@ spring: producer: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.springframework.kafka.support.serializer.JsonSerializer + acks: all # 모든 ISR 복제 확인 후 ACK — 데이터 유실 방지 retries: 3 + properties: + enable.idempotence: true # 멱등성 프로듀서 — PID+Seq로 브로커 중복 적재 방지 + max.in.flight.requests.per.connection: 5 # idempotence 호환 최대값 consumer: group-id: loopers-default-consumer key-deserializer: org.apache.kafka.common.serialization.StringDeserializer