11
11
import io .confluent .parallelconsumer .internal .InternalRuntimeError ;
12
12
import io .confluent .parallelconsumer .offsets .EncodingNotSupportedException ;
13
13
import io .confluent .parallelconsumer .offsets .OffsetMapCodecManager ;
14
+ import io .confluent .parallelconsumer .state .PartitionState .OffsetPair ;
14
15
import lombok .Getter ;
15
16
import lombok .RequiredArgsConstructor ;
16
17
import lombok .Setter ;
@@ -49,7 +50,7 @@ public class PartitionMonitor<K, V> implements ConsumerRebalanceListener {
49
50
*/
50
51
@ Getter
51
52
@ Setter
52
- private double USED_PAYLOAD_THRESHOLD_MULTIPLIER = 0.75 ;
53
+ private static double USED_PAYLOAD_THRESHOLD_MULTIPLIER = 0.75 ;
53
54
54
55
private final Consumer <K , V > consumer ;
55
56
@@ -172,7 +173,7 @@ public void onPartitionsLost(Collection<TopicPartition> partitions) {
172
173
* When commits are made to broker, we can throw away all the individually tracked offsets before the committed
173
174
* offset.
174
175
*/
175
- public void onOffsetCommitSuccess (Map <TopicPartition , OffsetAndMetadata > committed ) {
176
+ public void onOffsetCommitSuccess (Map <TopicPartition , OffsetPair > committed ) {
176
177
// partitionOffsetHighWaterMarks this will get overwritten in due course
177
178
committed .forEach ((tp , meta ) -> {
178
179
var partition = getPartitionState (tp );
@@ -408,6 +409,11 @@ public void onSuccess(WorkContainer<K, V> wc) {
408
409
partitionState .onSuccess (wc );
409
410
}
410
411
412
+ public void onFailure (WorkContainer <K , V > wc ) {
413
+ PartitionState <K , V > partitionState = getPartitionState (wc .getTopicPartition ());
414
+ partitionState .onFailure (wc );
415
+ }
416
+
411
417
/**
412
418
* Takes a record as work and puts it into internal queues, unless it's been previously recorded as completed as per
413
419
* loaded records.
@@ -445,16 +451,47 @@ boolean maybeRegisterNewRecordAsWork(final ConsumerRecord<K, V> rec) {
445
451
}
446
452
}
447
453
448
- public Map <TopicPartition , OffsetAndMetadata > findCompletedEligibleOffsetsAndRemove () {
454
+ public Map <TopicPartition , OffsetPair > findCompletedEligibleOffsetsAndRemove () {
449
455
return findCompletedEligibleOffsetsAndRemove (true );
450
456
}
451
457
458
+ private Map <TopicPartition , OffsetPair > findCompletedEligibleOffsetsAndRemove (boolean remove ) {
459
+
460
+ var newone = findCompletedEligibleOffsetsAndRemoveNew ();
461
+
462
+ var oldone = findCompletedEligibleOffsetsAndRemoveOld (remove );
463
+
464
+ return newone ;
465
+ }
466
+
467
+ private Map <TopicPartition , OffsetPair > findCompletedEligibleOffsetsAndRemoveNew () {
468
+ Map <TopicPartition , OffsetPair > offsetsToSend = new HashMap <>();
469
+
470
+ for (var e : getAssignedPartitions ().entrySet ()) {
471
+ Optional <OffsetPair > completedEligibleOffsetsAndRemoveNew = e .getValue ().getCompletedEligibleOffsetsAndRemoveNew ();
472
+ completedEligibleOffsetsAndRemoveNew .ifPresent (offsetAndMetadata ->
473
+ offsetsToSend .put (e .getKey (), offsetAndMetadata )
474
+ );
475
+ }
476
+
477
+ // Map<TopicPartition, OffsetAndMetadata> collect = getAssignedPartitions().entrySet().stream()
478
+ // .map(x -> x.getValue().getCompletedEligibleOffsetsAndRemoveNew(remove))
479
+ // .filter(Optional::isPresent)
480
+ // .map(Optional::get)
481
+ // .collect(Collectors.toMap(Map.Entry::getKey,
482
+ // o -> o.getValue().getCompletedEligibleOffsetsAndRemoveNew()));
483
+
484
+ log .debug ("Scan finished, {} were in flight, offset(s) ({}) to be committed" , offsetsToSend .size (), offsetsToSend );
485
+ return offsetsToSend ;
486
+ }
487
+
488
+
452
489
/**
453
490
* Finds eligible offset positions to commit in each assigned partition
454
491
*/
455
492
// todo remove completely as state of offsets should be tracked live, no need to scan for them - see #201
456
493
// https://github.com/confluentinc/parallel-consumer/issues/201 Refactor: Live tracking of offsets as they change, so we don't need to scan for them
457
- Map <TopicPartition , OffsetAndMetadata > findCompletedEligibleOffsetsAndRemove (boolean remove ) {
494
+ Map <TopicPartition , OffsetAndMetadata > findCompletedEligibleOffsetsAndRemoveOld (boolean remove ) {
458
495
459
496
//
460
497
Map <TopicPartition , OffsetAndMetadata > offsetsToSend = new HashMap <>();
@@ -470,7 +507,7 @@ Map<TopicPartition, OffsetAndMetadata> findCompletedEligibleOffsetsAndRemove(boo
470
507
log .trace ("Starting scan of partition: {}" , topicPartitionKey );
471
508
472
509
count += partitionQueue .size ();
473
- var workToRemove = new LinkedList <WorkContainer <K , V >>();
510
+ var workToRemove = new LinkedList <WorkContainer <?, ? >>();
474
511
var incompleteOffsets = new LinkedHashSet <Long >();
475
512
long lowWaterMark = -1 ;
476
513
var highestSucceeded = partitionState .getOffsetHighestSucceeded ();
@@ -539,6 +576,14 @@ private Map<TopicPartition, PartitionState<K, V>> getAssignedPartitions() {
539
576
.collect (Collectors .toMap (Map .Entry ::getKey , Map .Entry ::getValue )));
540
577
}
541
578
579
+ boolean isDirty () {
580
+ return this .partitionStates .values ().parallelStream ().anyMatch (PartitionState ::isDirty );
581
+ }
582
+
583
+ public boolean isClean () {
584
+ return !isDirty ();
585
+ }
586
+
542
587
public boolean couldBeTakenAsWork (WorkContainer <?, ?> workContainer ) {
543
588
if (checkIfWorkIsStale (workContainer )) {
544
589
log .debug ("Work is in queue with stale epoch or no longer assigned. Skipping. Shard it came from will/was removed during partition revocation. WC: {}" , workContainer );
0 commit comments