|
| 1 | +/* |
| 2 | + * Copyright 2018 the original author or authors. |
| 3 | + * |
| 4 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | + * you may not use this file except in compliance with the License. |
| 6 | + * You may obtain a copy of the License at |
| 7 | + * |
| 8 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | + * |
| 10 | + * Unless required by applicable law or agreed to in writing, software |
| 11 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | + * See the License for the specific language governing permissions and |
| 14 | + * limitations under the License. |
| 15 | + */ |
| 16 | + |
| 17 | +package org.springframework.kafka.listener; |
| 18 | + |
| 19 | +import java.io.PrintWriter; |
| 20 | +import java.io.StringWriter; |
| 21 | +import java.nio.ByteBuffer; |
| 22 | +import java.nio.charset.StandardCharsets; |
| 23 | +import java.util.function.BiConsumer; |
| 24 | +import java.util.function.BiFunction; |
| 25 | + |
| 26 | +import org.apache.commons.logging.Log; |
| 27 | +import org.apache.commons.logging.LogFactory; |
| 28 | +import org.apache.kafka.clients.consumer.ConsumerRecord; |
| 29 | +import org.apache.kafka.clients.producer.ProducerRecord; |
| 30 | +import org.apache.kafka.common.TopicPartition; |
| 31 | +import org.apache.kafka.common.header.internals.RecordHeader; |
| 32 | +import org.apache.kafka.common.header.internals.RecordHeaders; |
| 33 | + |
| 34 | +import org.springframework.kafka.core.KafkaOperations; |
| 35 | +import org.springframework.kafka.core.KafkaTemplate; |
| 36 | +import org.springframework.kafka.support.KafkaHeaders; |
| 37 | +import org.springframework.util.Assert; |
| 38 | + |
| 39 | +/** |
| 40 | + * A {@link BiConsumer} that publishes a failed record to a dead-letter topic. |
| 41 | + * |
| 42 | + * @author Gary Russell |
| 43 | + * @since 2.2 |
| 44 | + * |
| 45 | + */ |
| 46 | +public class DeadLetterPublishingRecoverer implements BiConsumer<ConsumerRecord<?, ?>, Exception> { |
| 47 | + |
| 48 | + private static final Log logger = LogFactory.getLog(DeadLetterPublishingRecoverer.class); |
| 49 | + |
| 50 | + private final KafkaTemplate<Object, Object> template; |
| 51 | + |
| 52 | + private final boolean transactional; |
| 53 | + |
| 54 | + private final BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver; |
| 55 | + |
| 56 | + /** |
| 57 | + * Create an instance with the provided template and a default destination resolving |
| 58 | + * function that returns a TopicPartition based on the original topic (appended with ".DLT") |
| 59 | + * from the failed record, and the same partition as the failed record. Therefore the |
| 60 | + * dead-letter topic must have at least as many partitions as the original topic. |
| 61 | + * @param template the {@link KafkaTemplate} to use for publishing. |
| 62 | + */ |
| 63 | + public DeadLetterPublishingRecoverer(KafkaTemplate<Object, Object> template) { |
| 64 | + this(template, (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition())); |
| 65 | + } |
| 66 | + |
| 67 | + /** |
| 68 | + * Create an instance with the provided template and destination resolving function, |
| 69 | + * that receives the failed consumer record and the exception and returns a |
| 70 | + * {@link TopicPartition}. If the partition in the {@link TopicPartition} is < 0, no |
| 71 | + * partition is set when publishing to the topic. |
| 72 | + * @param template the {@link KafkaTemplate} to use for publishing. |
| 73 | + * @param destinationResolver the resolving function. |
| 74 | + */ |
| 75 | + public DeadLetterPublishingRecoverer(KafkaTemplate<Object, Object> template, |
| 76 | + BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) { |
| 77 | + |
| 78 | + Assert.notNull(template, "The template cannot be null"); |
| 79 | + Assert.notNull(destinationResolver, "The destinationResolver cannot be null"); |
| 80 | + this.template = template; |
| 81 | + this.transactional = template.isTransactional(); |
| 82 | + this.destinationResolver = destinationResolver; |
| 83 | + } |
| 84 | + |
| 85 | + @Override |
| 86 | + public void accept(ConsumerRecord<?, ?> record, Exception exception) { |
| 87 | + TopicPartition tp = this.destinationResolver.apply(record, exception); |
| 88 | + RecordHeaders headers = new RecordHeaders(record.headers().toArray()); |
| 89 | + enhanceHeaders(headers, record, exception); |
| 90 | + ProducerRecord<Object, Object> outRecord = createProducerRecord(record, tp, headers); |
| 91 | + if (this.transactional) { |
| 92 | + this.template.executeInTransaction(t -> { |
| 93 | + publish(outRecord, t); |
| 94 | + return null; |
| 95 | + }); |
| 96 | + } |
| 97 | + else { |
| 98 | + publish(outRecord, this.template); |
| 99 | + } |
| 100 | + } |
| 101 | + |
| 102 | + /** |
| 103 | + * Subclasses can override this method to customize the producer record to send to the DLQ. |
| 104 | + * The default implementation simply copies the key and value from the consumer record |
| 105 | + * and adds the headers. The timestamp is not set (the original timestamp is in one of |
| 106 | + * the headers). |
| 107 | + * IMPORTANT: if the partition in the {@link TopicPartition} is < 0, it must be set to null |
| 108 | + * in the {@link ProducerRecord}. |
| 109 | + * @param record the failed record |
| 110 | + * @param topicPartition the {@link TopicPartition} returned by the destination resolver. |
| 111 | + * @param headers the headers - original record headers plus DLT headers. |
| 112 | + * @return the producer record to send. |
| 113 | + * @see KafkaHeaders |
| 114 | + */ |
| 115 | + protected ProducerRecord<Object, Object> createProducerRecord(ConsumerRecord<?, ?> record, |
| 116 | + TopicPartition topicPartition, RecordHeaders headers) { |
| 117 | + |
| 118 | + return new ProducerRecord<>(topicPartition.topic(), |
| 119 | + topicPartition.partition() < 0 ? null : topicPartition.partition(), |
| 120 | + record.key(), record.value(), headers); |
| 121 | + } |
| 122 | + |
| 123 | + private void publish(ProducerRecord<Object, Object> outRecord, KafkaOperations<Object, Object> template) { |
| 124 | + try { |
| 125 | + template.send(outRecord).addCallback(result -> { |
| 126 | + if (logger.isDebugEnabled()) { |
| 127 | + logger.debug("Successful dead-letter publication: " + result); |
| 128 | + } |
| 129 | + }, ex -> { |
| 130 | + logger.error("Dead-letter publication failed for: " + outRecord, ex); |
| 131 | + }); |
| 132 | + } |
| 133 | + catch (Exception e) { |
| 134 | + logger.error("Dead-letter publication failed for: " + outRecord, e); |
| 135 | + } |
| 136 | + } |
| 137 | + |
| 138 | + private void enhanceHeaders(RecordHeaders kafkaHeaders, ConsumerRecord<?, ?> record, Exception exception) { |
| 139 | + kafkaHeaders.add( |
| 140 | + new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC, record.topic().getBytes(StandardCharsets.UTF_8))); |
| 141 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION, |
| 142 | + ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array())); |
| 143 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET, |
| 144 | + ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array())); |
| 145 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, |
| 146 | + ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array())); |
| 147 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, |
| 148 | + record.timestampType().toString().getBytes(StandardCharsets.UTF_8))); |
| 149 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_FQCN, |
| 150 | + exception.getClass().getName().getBytes(StandardCharsets.UTF_8))); |
| 151 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_MESSAGE, |
| 152 | + exception.getMessage().getBytes(StandardCharsets.UTF_8))); |
| 153 | + kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_STACKTRACE, |
| 154 | + getStackTraceAsString(exception).getBytes(StandardCharsets.UTF_8))); |
| 155 | + } |
| 156 | + |
| 157 | + private String getStackTraceAsString(Throwable cause) { |
| 158 | + StringWriter stringWriter = new StringWriter(); |
| 159 | + PrintWriter printWriter = new PrintWriter(stringWriter, true); |
| 160 | + cause.printStackTrace(printWriter); |
| 161 | + return stringWriter.getBuffer().toString(); |
| 162 | + } |
| 163 | + |
| 164 | +} |
0 commit comments