mirror of https://github.com/apache/kafka.git
Merge branch 'trunk' of github.com:apache/kafka into KAFKA-19614
This commit is contained in:
commit
b4e5e6b1dd
|
@ -1039,6 +1039,8 @@ project(':core') {
|
||||||
implementation project(':transaction-coordinator')
|
implementation project(':transaction-coordinator')
|
||||||
implementation project(':metadata')
|
implementation project(':metadata')
|
||||||
implementation project(':storage:storage-api')
|
implementation project(':storage:storage-api')
|
||||||
|
// tools-api is automatically included in releaseTarGz via core's runtimeClasspath.
|
||||||
|
// If removed from here, remember to explicitly add it back in the releaseTarGz task.
|
||||||
implementation project(':tools:tools-api')
|
implementation project(':tools:tools-api')
|
||||||
implementation project(':raft')
|
implementation project(':raft')
|
||||||
implementation project(':storage')
|
implementation project(':storage')
|
||||||
|
@ -1261,8 +1263,6 @@ project(':core') {
|
||||||
from(project(':streams:test-utils').configurations.runtimeClasspath) { into("libs/") }
|
from(project(':streams:test-utils').configurations.runtimeClasspath) { into("libs/") }
|
||||||
from(project(':streams:examples').jar) { into("libs/") }
|
from(project(':streams:examples').jar) { into("libs/") }
|
||||||
from(project(':streams:examples').configurations.runtimeClasspath) { into("libs/") }
|
from(project(':streams:examples').configurations.runtimeClasspath) { into("libs/") }
|
||||||
from(project(':tools:tools-api').jar) { into("libs/") }
|
|
||||||
from(project(':tools:tools-api').configurations.runtimeClasspath) { into("libs/") }
|
|
||||||
duplicatesStrategy 'exclude'
|
duplicatesStrategy 'exclude'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -497,6 +497,7 @@
|
||||||
<allow pkg="org.apache.kafka.server.common.serialization" />
|
<allow pkg="org.apache.kafka.server.common.serialization" />
|
||||||
<allow pkg="org.apache.kafka.server.config" />
|
<allow pkg="org.apache.kafka.server.config" />
|
||||||
<allow pkg="org.apache.kafka.server.fault"/>
|
<allow pkg="org.apache.kafka.server.fault"/>
|
||||||
|
<allow pkg="org.apache.kafka.server.metrics" />
|
||||||
<allow pkg="org.apache.kafka.server.util" />
|
<allow pkg="org.apache.kafka.server.util" />
|
||||||
<allow pkg="org.apache.kafka.test"/>
|
<allow pkg="org.apache.kafka.test"/>
|
||||||
<allow pkg="com.fasterxml.jackson" />
|
<allow pkg="com.fasterxml.jackson" />
|
||||||
|
|
|
@ -46,7 +46,6 @@ public class GroupRebalanceConfig {
|
||||||
public final Optional<String> rackId;
|
public final Optional<String> rackId;
|
||||||
public final long retryBackoffMs;
|
public final long retryBackoffMs;
|
||||||
public final long retryBackoffMaxMs;
|
public final long retryBackoffMaxMs;
|
||||||
public final boolean leaveGroupOnClose;
|
|
||||||
|
|
||||||
public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) {
|
public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) {
|
||||||
this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG);
|
this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG);
|
||||||
|
@ -80,13 +79,6 @@ public class GroupRebalanceConfig {
|
||||||
|
|
||||||
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
|
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
|
||||||
this.retryBackoffMaxMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MAX_MS_CONFIG);
|
this.retryBackoffMaxMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MAX_MS_CONFIG);
|
||||||
|
|
||||||
// Internal leave group config is only defined in Consumer.
|
|
||||||
if (protocolType == ProtocolType.CONSUMER) {
|
|
||||||
this.leaveGroupOnClose = config.getBoolean("internal.leave.group.on.close");
|
|
||||||
} else {
|
|
||||||
this.leaveGroupOnClose = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For testing purpose.
|
// For testing purpose.
|
||||||
|
@ -97,8 +89,7 @@ public class GroupRebalanceConfig {
|
||||||
Optional<String> groupInstanceId,
|
Optional<String> groupInstanceId,
|
||||||
String rackId,
|
String rackId,
|
||||||
long retryBackoffMs,
|
long retryBackoffMs,
|
||||||
long retryBackoffMaxMs,
|
long retryBackoffMaxMs) {
|
||||||
boolean leaveGroupOnClose) {
|
|
||||||
this.sessionTimeoutMs = sessionTimeoutMs;
|
this.sessionTimeoutMs = sessionTimeoutMs;
|
||||||
this.rebalanceTimeoutMs = rebalanceTimeoutMs;
|
this.rebalanceTimeoutMs = rebalanceTimeoutMs;
|
||||||
this.heartbeatIntervalMs = heartbeatIntervalMs;
|
this.heartbeatIntervalMs = heartbeatIntervalMs;
|
||||||
|
@ -107,6 +98,5 @@ public class GroupRebalanceConfig {
|
||||||
this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId);
|
this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId);
|
||||||
this.retryBackoffMs = retryBackoffMs;
|
this.retryBackoffMs = retryBackoffMs;
|
||||||
this.retryBackoffMaxMs = retryBackoffMaxMs;
|
this.retryBackoffMaxMs = retryBackoffMaxMs;
|
||||||
this.leaveGroupOnClose = leaveGroupOnClose;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1903,7 +1903,7 @@ public interface Admin extends AutoCloseable {
|
||||||
* Remove a voter node from the KRaft metadata quorum.
|
* Remove a voter node from the KRaft metadata quorum.
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* The clusterId in {@link AddRaftVoterOptions} is optional.
|
* The clusterId in {@link RemoveRaftVoterOptions} is optional.
|
||||||
* If provided, the operation will only succeed if the cluster id matches the id
|
* If provided, the operation will only succeed if the cluster id matches the id
|
||||||
* of the current cluster. If the cluster id does not match, the operation
|
* of the current cluster. If the cluster id does not match, the operation
|
||||||
* will fail with {@link InconsistentClusterIdException}.
|
* will fail with {@link InconsistentClusterIdException}.
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class ListShareGroupOffsetsResult {
|
||||||
/**
|
/**
|
||||||
* Return the future when the requests for all groups succeed.
|
* Return the future when the requests for all groups succeed.
|
||||||
*
|
*
|
||||||
* @return Future which yields all {@code Map<String, Map<TopicPartition, Long>>} objects, if requests for all the groups succeed.
|
* @return Future which yields all {@code Map<String, Map<TopicPartition, OffsetAndMetadata>>} objects, if requests for all the groups succeed.
|
||||||
*/
|
*/
|
||||||
public KafkaFuture<Map<String, Map<TopicPartition, OffsetAndMetadata>>> all() {
|
public KafkaFuture<Map<String, Map<TopicPartition, OffsetAndMetadata>>> all() {
|
||||||
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])).thenApply(
|
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])).thenApply(
|
||||||
|
|
|
@ -330,17 +330,6 @@ public class ConsumerConfig extends AbstractConfig {
|
||||||
"be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic.";
|
"be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic.";
|
||||||
public static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS = true;
|
public static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS = true;
|
||||||
|
|
||||||
/**
|
|
||||||
* <code>internal.leave.group.on.close</code>
|
|
||||||
* Whether or not the consumer should leave the group on close. If set to <code>false</code> then a rebalance
|
|
||||||
* won't occur until <code>session.timeout.ms</code> expires.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* Note: this is an internal configuration and could be changed in the future in a backward incompatible way
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
static final String LEAVE_GROUP_ON_CLOSE_CONFIG = "internal.leave.group.on.close";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <code>internal.throw.on.fetch.stable.offset.unsupported</code>
|
* <code>internal.throw.on.fetch.stable.offset.unsupported</code>
|
||||||
* Whether or not the consumer should throw when the new stable offset feature is supported.
|
* Whether or not the consumer should throw when the new stable offset feature is supported.
|
||||||
|
@ -634,10 +623,6 @@ public class ConsumerConfig extends AbstractConfig {
|
||||||
DEFAULT_EXCLUDE_INTERNAL_TOPICS,
|
DEFAULT_EXCLUDE_INTERNAL_TOPICS,
|
||||||
Importance.MEDIUM,
|
Importance.MEDIUM,
|
||||||
EXCLUDE_INTERNAL_TOPICS_DOC)
|
EXCLUDE_INTERNAL_TOPICS_DOC)
|
||||||
.defineInternal(LEAVE_GROUP_ON_CLOSE_CONFIG,
|
|
||||||
Type.BOOLEAN,
|
|
||||||
true,
|
|
||||||
Importance.LOW)
|
|
||||||
.defineInternal(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED,
|
.defineInternal(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED,
|
||||||
Type.BOOLEAN,
|
Type.BOOLEAN,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -1170,7 +1170,7 @@ public abstract class AbstractCoordinator implements Closeable {
|
||||||
public synchronized RequestFuture<Void> maybeLeaveGroup(CloseOptions.GroupMembershipOperation membershipOperation, String leaveReason) {
|
public synchronized RequestFuture<Void> maybeLeaveGroup(CloseOptions.GroupMembershipOperation membershipOperation, String leaveReason) {
|
||||||
RequestFuture<Void> future = null;
|
RequestFuture<Void> future = null;
|
||||||
|
|
||||||
if (rebalanceConfig.leaveGroupOnClose && shouldSendLeaveGroupRequest(membershipOperation)) {
|
if (shouldSendLeaveGroupRequest(membershipOperation)) {
|
||||||
log.info("Member {} sending LeaveGroup request to coordinator {} due to {}",
|
log.info("Member {} sending LeaveGroup request to coordinator {} due to {}",
|
||||||
generation.memberId, coordinator, leaveReason);
|
generation.memberId, coordinator, leaveReason);
|
||||||
LeaveGroupRequest.Builder request = new LeaveGroupRequest.Builder(
|
LeaveGroupRequest.Builder request = new LeaveGroupRequest.Builder(
|
||||||
|
|
|
@ -331,8 +331,7 @@ public class ClassicKafkaConsumer<K, V> implements ConsumerDelegate<K, V> {
|
||||||
groupInstanceId,
|
groupInstanceId,
|
||||||
rackId,
|
rackId,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs
|
||||||
true
|
|
||||||
);
|
);
|
||||||
this.coordinator = new ConsumerCoordinator(
|
this.coordinator = new ConsumerCoordinator(
|
||||||
rebalanceConfig,
|
rebalanceConfig,
|
||||||
|
|
|
@ -421,7 +421,7 @@ public class ConsumerMembershipManager extends AbstractMembershipManager<Consume
|
||||||
@Override
|
@Override
|
||||||
public boolean isLeavingGroup() {
|
public boolean isLeavingGroup() {
|
||||||
CloseOptions.GroupMembershipOperation leaveGroupOperation = leaveGroupOperation();
|
CloseOptions.GroupMembershipOperation leaveGroupOperation = leaveGroupOperation();
|
||||||
if (REMAIN_IN_GROUP == leaveGroupOperation) {
|
if (REMAIN_IN_GROUP == leaveGroupOperation && groupInstanceId.isEmpty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,7 +432,8 @@ public class ConsumerMembershipManager extends AbstractMembershipManager<Consume
|
||||||
boolean hasLeaveOperation = DEFAULT == leaveGroupOperation ||
|
boolean hasLeaveOperation = DEFAULT == leaveGroupOperation ||
|
||||||
// Leave operation: both static and dynamic consumers will send a leave heartbeat
|
// Leave operation: both static and dynamic consumers will send a leave heartbeat
|
||||||
LEAVE_GROUP == leaveGroupOperation ||
|
LEAVE_GROUP == leaveGroupOperation ||
|
||||||
// Remain in group: only static consumers will send a leave heartbeat, while dynamic members will not
|
// Remain in group: static consumers will send a leave heartbeat with -2 epoch to reflect that a member using the given
|
||||||
|
// instance id decided to leave the group and would be back within the session timeout.
|
||||||
groupInstanceId().isPresent();
|
groupInstanceId().isPresent();
|
||||||
|
|
||||||
return isLeavingState && hasLeaveOperation;
|
return isLeavingState && hasLeaveOperation;
|
||||||
|
|
|
@ -20,13 +20,4 @@ package org.apache.kafka.common.metrics;
|
||||||
* A gauge metric is an instantaneous reading of a particular value.
|
* A gauge metric is an instantaneous reading of a particular value.
|
||||||
*/
|
*/
|
||||||
@FunctionalInterface
|
@FunctionalInterface
|
||||||
public interface Gauge<T> extends MetricValueProvider<T> {
|
public interface Gauge<T> extends MetricValueProvider<T> { }
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the current value associated with this gauge.
|
|
||||||
* @param config The configuration for this metric
|
|
||||||
* @param now The POSIX time in milliseconds the measurement is being taken
|
|
||||||
*/
|
|
||||||
T value(MetricConfig config, long now);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -20,6 +20,8 @@ import org.apache.kafka.common.Metric;
|
||||||
import org.apache.kafka.common.MetricName;
|
import org.apache.kafka.common.MetricName;
|
||||||
import org.apache.kafka.common.utils.Time;
|
import org.apache.kafka.common.utils.Time;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
public final class KafkaMetric implements Metric {
|
public final class KafkaMetric implements Metric {
|
||||||
|
|
||||||
private final MetricName metricName;
|
private final MetricName metricName;
|
||||||
|
@ -41,9 +43,7 @@ public final class KafkaMetric implements Metric {
|
||||||
MetricConfig config, Time time) {
|
MetricConfig config, Time time) {
|
||||||
this.metricName = metricName;
|
this.metricName = metricName;
|
||||||
this.lock = lock;
|
this.lock = lock;
|
||||||
if (!(valueProvider instanceof Measurable) && !(valueProvider instanceof Gauge))
|
this.metricValueProvider = Objects.requireNonNull(valueProvider, "valueProvider must not be null");
|
||||||
throw new IllegalArgumentException("Unsupported metric value provider of class " + valueProvider.getClass());
|
|
||||||
this.metricValueProvider = valueProvider;
|
|
||||||
this.config = config;
|
this.config = config;
|
||||||
this.time = time;
|
this.time = time;
|
||||||
}
|
}
|
||||||
|
@ -67,20 +67,15 @@ public final class KafkaMetric implements Metric {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Take the metric and return the value, which could be a {@link Measurable} or a {@link Gauge}
|
* Take the metric and return the value via {@link MetricValueProvider#value(MetricConfig, long)}.
|
||||||
|
*
|
||||||
* @return Return the metric value
|
* @return Return the metric value
|
||||||
* @throws IllegalStateException if the underlying metric is not a {@link Measurable} or a {@link Gauge}.
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public Object metricValue() {
|
public Object metricValue() {
|
||||||
long now = time.milliseconds();
|
long now = time.milliseconds();
|
||||||
synchronized (this.lock) {
|
synchronized (this.lock) {
|
||||||
if (isMeasurable())
|
return metricValueProvider.value(config, now);
|
||||||
return ((Measurable) metricValueProvider).measure(config, now);
|
|
||||||
else if (this.metricValueProvider instanceof Gauge)
|
|
||||||
return ((Gauge<?>) metricValueProvider).value(config, now);
|
|
||||||
else
|
|
||||||
throw new IllegalStateException("Not a valid metric: " + this.metricValueProvider.getClass());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,26 @@ package org.apache.kafka.common.metrics;
|
||||||
public interface Measurable extends MetricValueProvider<Double> {
|
public interface Measurable extends MetricValueProvider<Double> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Measure this quantity and return the result as a double
|
* Measure this quantity and return the result as a double.
|
||||||
|
*
|
||||||
* @param config The configuration for this metric
|
* @param config The configuration for this metric
|
||||||
* @param now The POSIX time in milliseconds the measurement is being taken
|
* @param now The POSIX time in milliseconds the measurement is being taken
|
||||||
* @return The measured value
|
* @return The measured value
|
||||||
*/
|
*/
|
||||||
double measure(MetricConfig config, long now);
|
double measure(MetricConfig config, long now);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Measure this quantity and return the result as a double.
|
||||||
|
*
|
||||||
|
* This default implementation delegates to {@link #measure(MetricConfig, long)}.
|
||||||
|
*
|
||||||
|
* @param config The configuration for this metric
|
||||||
|
* @param now The POSIX time in milliseconds the measurement is being taken
|
||||||
|
* @return The measured value as a {@link Double}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
default Double value(MetricConfig config, long now) {
|
||||||
|
return measure(config, now);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,10 +19,17 @@ package org.apache.kafka.common.metrics;
|
||||||
/**
|
/**
|
||||||
* Super-interface for {@link Measurable} or {@link Gauge} that provides
|
* Super-interface for {@link Measurable} or {@link Gauge} that provides
|
||||||
* metric values.
|
* metric values.
|
||||||
* <p>
|
|
||||||
* In the future for Java8 and above, {@link Gauge#value(MetricConfig, long)} will be
|
|
||||||
* moved to this interface with a default implementation in {@link Measurable} that returns
|
|
||||||
* {@link Measurable#measure(MetricConfig, long)}.
|
|
||||||
* </p>
|
|
||||||
*/
|
*/
|
||||||
public interface MetricValueProvider<T> { }
|
@FunctionalInterface
|
||||||
|
public interface MetricValueProvider<T> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the current value associated with this metric.
|
||||||
|
*
|
||||||
|
* @param config The configuration for this metric
|
||||||
|
* @param now The POSIX time in milliseconds the measurement is being taken
|
||||||
|
* @return the current metric value
|
||||||
|
*/
|
||||||
|
T value(MetricConfig config, long now);
|
||||||
|
|
||||||
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.kafka.common.protocol;
|
||||||
import org.apache.kafka.common.message.RequestHeaderData;
|
import org.apache.kafka.common.message.RequestHeaderData;
|
||||||
import org.apache.kafka.common.message.ResponseHeaderData;
|
import org.apache.kafka.common.message.ResponseHeaderData;
|
||||||
import org.apache.kafka.common.protocol.types.BoundField;
|
import org.apache.kafka.common.protocol.types.BoundField;
|
||||||
|
import org.apache.kafka.common.protocol.types.Field;
|
||||||
import org.apache.kafka.common.protocol.types.Schema;
|
import org.apache.kafka.common.protocol.types.Schema;
|
||||||
import org.apache.kafka.common.protocol.types.TaggedFields;
|
import org.apache.kafka.common.protocol.types.TaggedFields;
|
||||||
import org.apache.kafka.common.protocol.types.Type;
|
import org.apache.kafka.common.protocol.types.Type;
|
||||||
|
@ -27,6 +28,7 @@ import java.util.LinkedHashMap;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
public class Protocol {
|
public class Protocol {
|
||||||
|
|
||||||
|
@ -49,7 +51,23 @@ public class Protocol {
|
||||||
subTypes.put(field.def.name, type.arrayElementType().get());
|
subTypes.put(field.def.name, type.arrayElementType().get());
|
||||||
}
|
}
|
||||||
} else if (type instanceof TaggedFields) {
|
} else if (type instanceof TaggedFields) {
|
||||||
b.append("_tagged_fields ");
|
Map<Integer, Field> taggedFields = new TreeMap<>(((TaggedFields) type).fields());
|
||||||
|
taggedFields.forEach((tag, taggedField) -> {
|
||||||
|
if (taggedField.type.isArray()) {
|
||||||
|
b.append("[");
|
||||||
|
b.append(taggedField.name);
|
||||||
|
b.append("]");
|
||||||
|
if (!subTypes.containsKey(taggedField.name))
|
||||||
|
subTypes.put(taggedField.name + "<tag: " + tag.toString() + ">", taggedField.type.arrayElementType().get());
|
||||||
|
} else {
|
||||||
|
b.append(taggedField.name);
|
||||||
|
if (!subTypes.containsKey(taggedField.name))
|
||||||
|
subTypes.put(taggedField.name + "<tag: " + tag.toString() + ">", taggedField.type);
|
||||||
|
}
|
||||||
|
b.append("<tag: ");
|
||||||
|
b.append(tag);
|
||||||
|
b.append("> ");
|
||||||
|
});
|
||||||
} else {
|
} else {
|
||||||
b.append(field.def.name);
|
b.append(field.def.name);
|
||||||
b.append(" ");
|
b.append(" ");
|
||||||
|
@ -90,6 +108,12 @@ public class Protocol {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void appendFieldNameToTable(String name, StringBuilder b) {
|
||||||
|
b.append("<td>");
|
||||||
|
b.append(name);
|
||||||
|
b.append("</td>");
|
||||||
|
}
|
||||||
|
|
||||||
private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) {
|
private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) {
|
||||||
Set<BoundField> fields = new LinkedHashSet<>();
|
Set<BoundField> fields = new LinkedHashSet<>();
|
||||||
populateSchemaFields(schema, fields);
|
populateSchemaFields(schema, fields);
|
||||||
|
@ -101,28 +125,12 @@ public class Protocol {
|
||||||
b.append("</tr>");
|
b.append("</tr>");
|
||||||
for (BoundField field : fields) {
|
for (BoundField field : fields) {
|
||||||
b.append("<tr>\n");
|
b.append("<tr>\n");
|
||||||
b.append("<td>");
|
|
||||||
b.append(field.def.name);
|
|
||||||
b.append("</td>");
|
|
||||||
b.append("<td>");
|
|
||||||
if (field.def.type instanceof TaggedFields) {
|
if (field.def.type instanceof TaggedFields) {
|
||||||
TaggedFields taggedFields = (TaggedFields) field.def.type;
|
TaggedFields taggedFields = (TaggedFields) field.def.type;
|
||||||
// Only include the field in the table if there are actually tags defined
|
// Only include the field in the table if there are actually tags defined
|
||||||
if (taggedFields.numFields() > 0) {
|
if (taggedFields.numFields() > 0) {
|
||||||
b.append("<table class=\"data-table\"><tbody>\n");
|
|
||||||
b.append("<tr>");
|
|
||||||
b.append("<th>Tag</th>\n");
|
|
||||||
b.append("<th>Tagged field</th>\n");
|
|
||||||
b.append("<th>Description</th>\n");
|
|
||||||
b.append("</tr>");
|
|
||||||
taggedFields.fields().forEach((tag, taggedField) -> {
|
taggedFields.fields().forEach((tag, taggedField) -> {
|
||||||
b.append("<tr>\n");
|
appendFieldNameToTable(taggedField.name + "<tag: " + tag.toString() + ">", b);
|
||||||
b.append("<td>");
|
|
||||||
b.append(tag);
|
|
||||||
b.append("</td>");
|
|
||||||
b.append("<td>");
|
|
||||||
b.append(taggedField.name);
|
|
||||||
b.append("</td>");
|
|
||||||
b.append("<td>");
|
b.append("<td>");
|
||||||
b.append(taggedField.docString);
|
b.append(taggedField.docString);
|
||||||
if (taggedField.type.isArray()) {
|
if (taggedField.type.isArray()) {
|
||||||
|
@ -136,11 +144,10 @@ public class Protocol {
|
||||||
b.append("</td>");
|
b.append("</td>");
|
||||||
b.append("</tr>\n");
|
b.append("</tr>\n");
|
||||||
});
|
});
|
||||||
b.append("</tbody></table>\n");
|
|
||||||
} else {
|
|
||||||
b.append(field.def.docString);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
appendFieldNameToTable(field.def.name, b);
|
||||||
|
b.append("<td>");
|
||||||
b.append(field.def.docString);
|
b.append(field.def.docString);
|
||||||
}
|
}
|
||||||
b.append("</td>");
|
b.append("</td>");
|
||||||
|
|
|
@ -135,12 +135,7 @@ public class AbstractCoordinatorTest {
|
||||||
Optional.empty(), Optional.empty());
|
Optional.empty(), Optional.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
|
private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
|
||||||
setupCoordinator(retryBackoffMs, retryBackoffMaxMs, rebalanceTimeoutMs, groupInstanceId, heartbeatThreadSupplier, groupInstanceId.isEmpty());
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier, boolean leaveOnClose) {
|
|
||||||
LogContext logContext = new LogContext();
|
LogContext logContext = new LogContext();
|
||||||
this.mockTime = new MockTime();
|
this.mockTime = new MockTime();
|
||||||
ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L,
|
ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L,
|
||||||
|
@ -168,8 +163,7 @@ public class AbstractCoordinatorTest {
|
||||||
groupInstanceId,
|
groupInstanceId,
|
||||||
null,
|
null,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs);
|
||||||
leaveOnClose);
|
|
||||||
this.coordinator = new DummyCoordinator(rebalanceConfig,
|
this.coordinator = new DummyCoordinator(rebalanceConfig,
|
||||||
consumerClient,
|
consumerClient,
|
||||||
metrics,
|
metrics,
|
||||||
|
@ -1109,7 +1103,7 @@ public class AbstractCoordinatorTest {
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@MethodSource("groupInstanceIdAndMembershipOperationMatrix")
|
@MethodSource("groupInstanceIdAndMembershipOperationMatrix")
|
||||||
public void testLeaveGroupSentWithGroupInstanceIdUnSetAndDifferentGroupMembershipOperation(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation) {
|
public void testLeaveGroupSentWithGroupInstanceIdUnSetAndDifferentGroupMembershipOperation(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation) {
|
||||||
checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty(), true);
|
checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Stream<Arguments> groupInstanceIdAndMembershipOperationMatrix() {
|
private static Stream<Arguments> groupInstanceIdAndMembershipOperationMatrix() {
|
||||||
|
@ -1124,11 +1118,11 @@ public class AbstractCoordinatorTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId) {
|
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId) {
|
||||||
checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty(), groupInstanceId.isEmpty());
|
checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier, boolean leaveOnClose) {
|
private void checkLeaveGroupRequestSent(Optional<String> groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional<Supplier<BaseHeartbeatThread>> heartbeatThreadSupplier) {
|
||||||
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier, leaveOnClose);
|
setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier);
|
||||||
|
|
||||||
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
|
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
|
||||||
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
|
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
|
||||||
|
|
|
@ -224,8 +224,7 @@ public abstract class ConsumerCoordinatorTest {
|
||||||
groupInstanceId,
|
groupInstanceId,
|
||||||
rackId,
|
rackId,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs);
|
||||||
groupInstanceId.isEmpty());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterEach
|
@AfterEach
|
||||||
|
|
|
@ -47,8 +47,7 @@ public class HeartbeatTest {
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
null,
|
null,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs);
|
||||||
true);
|
|
||||||
heartbeat = new Heartbeat(rebalanceConfig, time);
|
heartbeat = new Heartbeat(rebalanceConfig, time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,45 +19,323 @@ package org.apache.kafka.clients.consumer.internals.metrics;
|
||||||
import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy;
|
import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy;
|
||||||
import org.apache.kafka.clients.consumer.internals.SubscriptionState;
|
import org.apache.kafka.clients.consumer.internals.SubscriptionState;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.metrics.MetricConfig;
|
||||||
import org.apache.kafka.common.metrics.Metrics;
|
import org.apache.kafka.common.metrics.Metrics;
|
||||||
import org.apache.kafka.common.utils.LogContext;
|
import org.apache.kafka.common.utils.LogContext;
|
||||||
import org.apache.kafka.common.utils.MockTime;
|
import org.apache.kafka.common.utils.MockTime;
|
||||||
import org.apache.kafka.common.utils.Time;
|
import org.apache.kafka.common.utils.Time;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||||
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
|
|
||||||
class ConsumerRebalanceMetricsManagerTest {
|
class ConsumerRebalanceMetricsManagerTest {
|
||||||
|
|
||||||
private final Time time = new MockTime();
|
private Time time;
|
||||||
private final Metrics metrics = new Metrics(time);
|
private Metrics metrics;
|
||||||
|
private SubscriptionState subscriptionState;
|
||||||
|
private ConsumerRebalanceMetricsManager metricsManager;
|
||||||
|
private MetricConfig metricConfig;
|
||||||
|
private long windowSizeMs;
|
||||||
|
private int numSamples;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
public void setUp() {
|
||||||
|
time = new MockTime();
|
||||||
|
// Use MetricConfig with its default values
|
||||||
|
windowSizeMs = 30000; // 30 seconds - default value
|
||||||
|
numSamples = 2; // default value
|
||||||
|
metricConfig = new MetricConfig()
|
||||||
|
.samples(numSamples)
|
||||||
|
.timeWindow(windowSizeMs, java.util.concurrent.TimeUnit.MILLISECONDS);
|
||||||
|
metrics = new Metrics(metricConfig, time);
|
||||||
|
subscriptionState = new SubscriptionState(mock(LogContext.class), AutoOffsetResetStrategy.EARLIEST);
|
||||||
|
metricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState);
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
public void tearDown() {
|
||||||
|
metrics.close();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAssignedPartitionCountMetric() {
|
public void testAssignedPartitionCountMetric() {
|
||||||
SubscriptionState subscriptionState = new SubscriptionState(mock(LogContext.class), AutoOffsetResetStrategy.EARLIEST);
|
assertNotNull(metrics.metric(metricsManager.assignedPartitionsCount), "Metric assigned-partitions has not been registered as expected");
|
||||||
ConsumerRebalanceMetricsManager consumerRebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState);
|
|
||||||
|
|
||||||
assertNotNull(metrics.metric(consumerRebalanceMetricsManager.assignedPartitionsCount), "Metric assigned-partitions has not been registered as expected");
|
|
||||||
|
|
||||||
// Check for manually assigned partitions
|
// Check for manually assigned partitions
|
||||||
subscriptionState.assignFromUser(Set.of(new TopicPartition("topic", 0), new TopicPartition("topic", 1)));
|
subscriptionState.assignFromUser(Set.of(new TopicPartition("topic", 0), new TopicPartition("topic", 1)));
|
||||||
assertEquals(2.0d, metrics.metric(consumerRebalanceMetricsManager.assignedPartitionsCount).metricValue());
|
assertEquals(2.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue());
|
||||||
subscriptionState.assignFromUser(Set.of());
|
subscriptionState.assignFromUser(Set.of());
|
||||||
assertEquals(0.0d, metrics.metric(consumerRebalanceMetricsManager.assignedPartitionsCount).metricValue());
|
assertEquals(0.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue());
|
||||||
|
|
||||||
subscriptionState.unsubscribe();
|
subscriptionState.unsubscribe();
|
||||||
assertEquals(0.0d, metrics.metric(consumerRebalanceMetricsManager.assignedPartitionsCount).metricValue());
|
assertEquals(0.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue());
|
||||||
|
|
||||||
// Check for automatically assigned partitions
|
// Check for automatically assigned partitions
|
||||||
subscriptionState.subscribe(Set.of("topic"), Optional.empty());
|
subscriptionState.subscribe(Set.of("topic"), Optional.empty());
|
||||||
subscriptionState.assignFromSubscribed(Set.of(new TopicPartition("topic", 0)));
|
subscriptionState.assignFromSubscribed(Set.of(new TopicPartition("topic", 0)));
|
||||||
assertEquals(1.0d, metrics.metric(consumerRebalanceMetricsManager.assignedPartitionsCount).metricValue());
|
assertEquals(1.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRebalanceTimingMetrics() {
|
||||||
|
|
||||||
|
// Verify timing metrics are registered
|
||||||
|
assertNotNull(metrics.metric(metricsManager.rebalanceLatencyAvg));
|
||||||
|
assertNotNull(metrics.metric(metricsManager.rebalanceLatencyMax));
|
||||||
|
assertNotNull(metrics.metric(metricsManager.rebalanceLatencyTotal));
|
||||||
|
assertNotNull(metrics.metric(metricsManager.rebalanceTotal));
|
||||||
|
|
||||||
|
// Record first rebalance (10ms duration)
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
// Verify metrics after first rebalance
|
||||||
|
assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue());
|
||||||
|
assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue());
|
||||||
|
assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue());
|
||||||
|
assertEquals(1.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue());
|
||||||
|
|
||||||
|
// Record second rebalance (30ms duration)
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(30);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
// Verify metrics after second rebalance
|
||||||
|
assertEquals(20.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(),
|
||||||
|
"Average latency should be (10 + 30) / 2 = 20ms");
|
||||||
|
assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(),
|
||||||
|
"Max latency should be max(10, 30) = 30ms");
|
||||||
|
assertEquals(40.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(),
|
||||||
|
"Total latency should be 10 + 30 = 40ms");
|
||||||
|
assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue());
|
||||||
|
|
||||||
|
// Record third rebalance (50ms duration)
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(50);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
// Verify metrics after third rebalance
|
||||||
|
assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(),
|
||||||
|
"Average latency should be (10 + 30 + 50) / 3 = 30ms");
|
||||||
|
assertEquals(50.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(),
|
||||||
|
"Max latency should be max(10, 30, 50) = 50ms");
|
||||||
|
assertEquals(90.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(),
|
||||||
|
"Total latency should be 10 + 30 + 50 = 90ms");
|
||||||
|
assertEquals(3.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRebalanceRateMetric() {
|
||||||
|
|
||||||
|
// Verify rate metric is registered
|
||||||
|
assertNotNull(metrics.metric(metricsManager.rebalanceRatePerHour));
|
||||||
|
|
||||||
|
// Record 3 rebalances within 30ms total (3 x 10ms)
|
||||||
|
int rebalanceCount = 3;
|
||||||
|
long startTime = time.milliseconds();
|
||||||
|
for (int i = 0; i < rebalanceCount; i++) {
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
}
|
||||||
|
long endTime = time.milliseconds();
|
||||||
|
long actualElapsedMs = endTime - startTime;
|
||||||
|
|
||||||
|
double ratePerHour = (Double) metrics.metric(metricsManager.rebalanceRatePerHour).metricValue();
|
||||||
|
|
||||||
|
// The Rate metric calculation:
|
||||||
|
// - Uses elapsed time from the oldest sample
|
||||||
|
// - Ensures minimum window size of (numSamples - 1) * windowSizeMs
|
||||||
|
// - With default config: minWindow = (2-1) * 30000 = 30000ms
|
||||||
|
long minWindowMs = (numSamples - 1) * windowSizeMs; // (2-1) * 30000 = 30000ms
|
||||||
|
|
||||||
|
// Since actualElapsedMs (30ms) is much less than minWindowMs (30000ms),
|
||||||
|
// the rate calculation will use minWindowMs as the window
|
||||||
|
// Rate per hour = count / (windowMs / 1000) * 3600
|
||||||
|
double expectedRatePerHour = (double) rebalanceCount / (minWindowMs / 1000.0) * 3600.0;
|
||||||
|
|
||||||
|
assertEquals(expectedRatePerHour, ratePerHour, 1.0,
|
||||||
|
String.format("With %d rebalances in %dms, min window %dms: expecting %.1f rebalances/hour",
|
||||||
|
rebalanceCount, actualElapsedMs, minWindowMs, expectedRatePerHour));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFailedRebalanceMetrics() {
|
||||||
|
|
||||||
|
// Verify failed rebalance metrics are registered
|
||||||
|
assertNotNull(metrics.metric(metricsManager.failedRebalanceTotal));
|
||||||
|
assertNotNull(metrics.metric(metricsManager.failedRebalanceRate));
|
||||||
|
|
||||||
|
assertEquals(0.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(),
|
||||||
|
"Initially, there should be no failed rebalances");
|
||||||
|
|
||||||
|
// Start a rebalance but don't complete it
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
assertEquals(1.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(),
|
||||||
|
"Failed rebalance count should increment to 1 after recording failure");
|
||||||
|
|
||||||
|
// Complete a successful rebalance
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
assertEquals(1.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(),
|
||||||
|
"Failed count should not increment after successful rebalance completes");
|
||||||
|
|
||||||
|
// Start another rebalance, don't complete it, then record failure
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
assertTrue(metricsManager.rebalanceStarted(), "Rebalance should be in progress");
|
||||||
|
time.sleep(10);
|
||||||
|
// Don't call recordRebalanceEnded() to simulate an incomplete rebalance
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
assertEquals(2.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue());
|
||||||
|
|
||||||
|
double failedRate = (Double) metrics.metric(metricsManager.failedRebalanceRate).metricValue();
|
||||||
|
|
||||||
|
// Calculate expected failed rate based on Rate metric behavior
|
||||||
|
// We had 2 failures over ~40ms, but minimum window is (numSamples - 1) * windowSizeMs
|
||||||
|
long minWindowMs = (numSamples - 1) * windowSizeMs; // (2-1) * 30000 = 30000ms
|
||||||
|
double expectedFailedRatePerHour = 2.0 / (minWindowMs / 1000.0) * 3600.0;
|
||||||
|
|
||||||
|
assertEquals(expectedFailedRatePerHour, failedRate, 1.0,
|
||||||
|
String.format("With 2 failures, min window %dms: expecting %.1f failures/hour",
|
||||||
|
minWindowMs, expectedFailedRatePerHour));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLastRebalanceSecondsAgoMetric() {
|
||||||
|
|
||||||
|
// Verify metric is registered
|
||||||
|
assertNotNull(metrics.metric(metricsManager.lastRebalanceSecondsAgo));
|
||||||
|
|
||||||
|
assertEquals(-1.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(),
|
||||||
|
"Should return -1 when no rebalance has occurred");
|
||||||
|
|
||||||
|
// Complete a rebalance
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
assertEquals(0.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(),
|
||||||
|
"Should return 0 immediately after rebalance completes");
|
||||||
|
|
||||||
|
// Advance time by 5 seconds
|
||||||
|
time.sleep(5000);
|
||||||
|
assertEquals(5.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue());
|
||||||
|
|
||||||
|
// Advance time by another 10 seconds
|
||||||
|
time.sleep(10000);
|
||||||
|
assertEquals(15.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue());
|
||||||
|
|
||||||
|
// Complete another rebalance
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(20);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
|
||||||
|
assertEquals(0.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(),
|
||||||
|
"Should reset to 0 after a new rebalance completes");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRebalanceStartedFlag() {
|
||||||
|
|
||||||
|
assertFalse(metricsManager.rebalanceStarted(),
|
||||||
|
"Initially, no rebalance should be in progress");
|
||||||
|
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
assertTrue(metricsManager.rebalanceStarted(),
|
||||||
|
"Rebalance should be marked as started after recordRebalanceStarted()");
|
||||||
|
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
assertFalse(metricsManager.rebalanceStarted(),
|
||||||
|
"Rebalance should not be in progress after recordRebalanceEnded()");
|
||||||
|
|
||||||
|
// Start another rebalance - advance time first
|
||||||
|
time.sleep(100);
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
assertTrue(metricsManager.rebalanceStarted(),
|
||||||
|
"New rebalance should be marked as started");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipleConsecutiveFailures() {
|
||||||
|
|
||||||
|
// Record multiple consecutive failures
|
||||||
|
for (int i = 0; i < 5; i++) {
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(5.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(),
|
||||||
|
"Should have recorded 5 consecutive failed rebalances");
|
||||||
|
|
||||||
|
assertEquals(0.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue(),
|
||||||
|
"Successful rebalance count should remain 0 when only failures occur");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMixedSuccessAndFailureScenarios() {
|
||||||
|
|
||||||
|
// Success -> Failure -> Success -> Failure pattern
|
||||||
|
// First success
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(20);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
assertEquals(1.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue());
|
||||||
|
|
||||||
|
// First failure
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
assertTrue(metricsManager.rebalanceStarted(), "First failure rebalance should be in progress");
|
||||||
|
time.sleep(30);
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
|
||||||
|
double failedAfterFirst = (Double) metrics.metric(metricsManager.failedRebalanceTotal).metricValue();
|
||||||
|
assertEquals(1.0d, failedAfterFirst, "Should have recorded one failed rebalance after first failure");
|
||||||
|
|
||||||
|
// Second success
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
time.sleep(40);
|
||||||
|
metricsManager.recordRebalanceEnded(time.milliseconds());
|
||||||
|
assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue());
|
||||||
|
|
||||||
|
// Second failure
|
||||||
|
time.sleep(10);
|
||||||
|
metricsManager.recordRebalanceStarted(time.milliseconds());
|
||||||
|
assertTrue(metricsManager.rebalanceStarted(), "Second failure rebalance should be in progress");
|
||||||
|
time.sleep(50);
|
||||||
|
metricsManager.maybeRecordRebalanceFailed();
|
||||||
|
|
||||||
|
assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue(),
|
||||||
|
"Should have 2 successful rebalances in mixed scenario");
|
||||||
|
assertEquals(2.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(),
|
||||||
|
"Should have 2 failed rebalances in mixed scenario");
|
||||||
|
|
||||||
|
assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(),
|
||||||
|
"Average latency should only include successful rebalances: (20 + 40) / 2 = 30ms");
|
||||||
|
assertEquals(40.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(),
|
||||||
|
"Max latency should be 40ms from successful rebalances only");
|
||||||
|
assertEquals(60.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(),
|
||||||
|
"Total latency should only include successful rebalances: 20 + 40 = 60ms");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,4 +48,29 @@ public class KafkaMetricTest {
|
||||||
assertThrows(IllegalStateException.class, metric::measurable);
|
assertThrows(IllegalStateException.class, metric::measurable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMeasurableValueReturnsZeroWhenNotMeasurable() {
|
||||||
|
MockTime time = new MockTime();
|
||||||
|
MetricConfig config = new MetricConfig();
|
||||||
|
Gauge<Integer> gauge = (c, now) -> 7;
|
||||||
|
|
||||||
|
KafkaMetric metric = new KafkaMetric(new Object(), METRIC_NAME, gauge, config, time);
|
||||||
|
assertEquals(0.0d, metric.measurableValue(time.milliseconds()), 0.0d);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKafkaMetricAcceptsNonMeasurableNonGaugeProvider() {
|
||||||
|
MetricValueProvider<String> provider = (config, now) -> "metric value provider";
|
||||||
|
KafkaMetric metric = new KafkaMetric(new Object(), METRIC_NAME, provider, new MetricConfig(), new MockTime());
|
||||||
|
|
||||||
|
Object value = metric.metricValue();
|
||||||
|
assertEquals("metric value provider", value);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorWithNullProvider() {
|
||||||
|
assertThrows(NullPointerException.class, () ->
|
||||||
|
new KafkaMetric(new Object(), METRIC_NAME, null, new MetricConfig(), new MockTime())
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,13 +233,13 @@ public class MirrorMaker {
|
||||||
private void addHerder(SourceAndTarget sourceAndTarget) {
|
private void addHerder(SourceAndTarget sourceAndTarget) {
|
||||||
log.info("creating herder for {}", sourceAndTarget.toString());
|
log.info("creating herder for {}", sourceAndTarget.toString());
|
||||||
Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
|
Map<String, String> workerProps = config.workerConfig(sourceAndTarget);
|
||||||
|
DistributedConfig distributedConfig = new DistributedConfig(workerProps);
|
||||||
String encodedSource = encodePath(sourceAndTarget.source());
|
String encodedSource = encodePath(sourceAndTarget.source());
|
||||||
String encodedTarget = encodePath(sourceAndTarget.target());
|
String encodedTarget = encodePath(sourceAndTarget.target());
|
||||||
List<String> restNamespace = List.of(encodedSource, encodedTarget);
|
List<String> restNamespace = List.of(encodedSource, encodedTarget);
|
||||||
String workerId = generateWorkerId(sourceAndTarget);
|
String workerId = generateWorkerId(sourceAndTarget);
|
||||||
Plugins plugins = new Plugins(workerProps);
|
Plugins plugins = new Plugins(workerProps);
|
||||||
plugins.compareAndSwapWithDelegatingLoader();
|
plugins.compareAndSwapWithDelegatingLoader();
|
||||||
DistributedConfig distributedConfig = new DistributedConfig(workerProps);
|
|
||||||
String kafkaClusterId = distributedConfig.kafkaClusterId();
|
String kafkaClusterId = distributedConfig.kafkaClusterId();
|
||||||
String clientIdBase = ConnectUtils.clientIdBase(distributedConfig);
|
String clientIdBase = ConnectUtils.clientIdBase(distributedConfig);
|
||||||
// Create the admin client to be shared by all backing stores for this herder
|
// Create the admin client to be shared by all backing stores for this herder
|
||||||
|
|
|
@ -114,14 +114,15 @@ public abstract class AbstractConnectCli<H extends Herder, T extends WorkerConfi
|
||||||
log.info("Kafka Connect worker initializing ...");
|
log.info("Kafka Connect worker initializing ...");
|
||||||
long initStart = time.hiResClockMs();
|
long initStart = time.hiResClockMs();
|
||||||
|
|
||||||
|
T config = createConfig(workerProps);
|
||||||
|
log.debug("Kafka cluster ID: {}", config.kafkaClusterId());
|
||||||
|
|
||||||
WorkerInfo initInfo = new WorkerInfo();
|
WorkerInfo initInfo = new WorkerInfo();
|
||||||
initInfo.logAll();
|
initInfo.logAll();
|
||||||
|
|
||||||
log.info("Scanning for plugin classes. This might take a moment ...");
|
log.info("Scanning for plugin classes. This might take a moment ...");
|
||||||
Plugins plugins = new Plugins(workerProps);
|
Plugins plugins = new Plugins(workerProps);
|
||||||
plugins.compareAndSwapWithDelegatingLoader();
|
plugins.compareAndSwapWithDelegatingLoader();
|
||||||
T config = createConfig(workerProps);
|
|
||||||
log.debug("Kafka cluster ID: {}", config.kafkaClusterId());
|
|
||||||
|
|
||||||
RestClient restClient = new RestClient(config);
|
RestClient restClient = new RestClient(config);
|
||||||
|
|
||||||
|
|
|
@ -149,8 +149,7 @@ public class WorkerCoordinatorIncrementalTest {
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
null,
|
null,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs);
|
||||||
true);
|
|
||||||
this.coordinator = new WorkerCoordinator(rebalanceConfig,
|
this.coordinator = new WorkerCoordinator(rebalanceConfig,
|
||||||
loggerFactory,
|
loggerFactory,
|
||||||
consumerClient,
|
consumerClient,
|
||||||
|
|
|
@ -140,8 +140,7 @@ public class WorkerCoordinatorTest {
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
null,
|
null,
|
||||||
retryBackoffMs,
|
retryBackoffMs,
|
||||||
retryBackoffMaxMs,
|
retryBackoffMaxMs);
|
||||||
true);
|
|
||||||
this.coordinator = new WorkerCoordinator(rebalanceConfig,
|
this.coordinator = new WorkerCoordinator(rebalanceConfig,
|
||||||
logContext,
|
logContext,
|
||||||
consumerClient,
|
consumerClient,
|
||||||
|
|
|
@ -20,7 +20,6 @@ import java.lang.{Long => JLong}
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock
|
import java.util.concurrent.locks.ReentrantReadWriteLock
|
||||||
import java.util.Optional
|
import java.util.Optional
|
||||||
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, CopyOnWriteArrayList}
|
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, CopyOnWriteArrayList}
|
||||||
import kafka.controller.StateChangeLogger
|
|
||||||
import kafka.log._
|
import kafka.log._
|
||||||
import kafka.server._
|
import kafka.server._
|
||||||
import kafka.server.share.DelayedShareFetch
|
import kafka.server.share.DelayedShareFetch
|
||||||
|
@ -37,6 +36,7 @@ import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch}
|
||||||
import org.apache.kafka.common.requests._
|
import org.apache.kafka.common.requests._
|
||||||
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
|
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
|
||||||
import org.apache.kafka.common.utils.Time
|
import org.apache.kafka.common.utils.Time
|
||||||
|
import org.apache.kafka.logger.StateChangeLogger
|
||||||
import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, PartitionRegistration}
|
import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, PartitionRegistration}
|
||||||
import org.apache.kafka.server.common.RequestLocal
|
import org.apache.kafka.server.common.RequestLocal
|
||||||
import org.apache.kafka.server.log.remote.TopicPartitionLog
|
import org.apache.kafka.server.log.remote.TopicPartitionLog
|
||||||
|
@ -322,7 +322,7 @@ class Partition(val topicPartition: TopicPartition,
|
||||||
def topic: String = topicPartition.topic
|
def topic: String = topicPartition.topic
|
||||||
def partitionId: Int = topicPartition.partition
|
def partitionId: Int = topicPartition.partition
|
||||||
|
|
||||||
private val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None)
|
private val stateChangeLogger = new StateChangeLogger(localBrokerId)
|
||||||
private val remoteReplicasMap = new ConcurrentHashMap[Int, Replica]
|
private val remoteReplicasMap = new ConcurrentHashMap[Int, Replica]
|
||||||
// The read lock is only required when multiple reads are executed and needs to be in a consistent manner
|
// The read lock is only required when multiple reads are executed and needs to be in a consistent manner
|
||||||
private val leaderIsrUpdateLock = new ReentrantReadWriteLock
|
private val leaderIsrUpdateLock = new ReentrantReadWriteLock
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kafka.controller
|
|
||||||
|
|
||||||
import com.typesafe.scalalogging.Logger
|
|
||||||
import kafka.utils.Logging
|
|
||||||
|
|
||||||
object StateChangeLogger {
|
|
||||||
private val logger = Logger("state.change.logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple class that sets `logIdent` appropriately depending on whether the state change logger is being used in the
|
|
||||||
* context of the KafkaController or not (e.g. ReplicaManager and MetadataCache log to the state change logger
|
|
||||||
* irrespective of whether the broker is the Controller).
|
|
||||||
*/
|
|
||||||
class StateChangeLogger(brokerId: Int, inControllerContext: Boolean, controllerEpoch: Option[Int]) extends Logging {
|
|
||||||
|
|
||||||
if (controllerEpoch.isDefined && !inControllerContext)
|
|
||||||
throw new IllegalArgumentException("Controller epoch should only be defined if inControllerContext is true")
|
|
||||||
|
|
||||||
override lazy val logger: Logger = StateChangeLogger.logger
|
|
||||||
|
|
||||||
locally {
|
|
||||||
val prefix = if (inControllerContext) "Controller" else "Broker"
|
|
||||||
val epochEntry = controllerEpoch.fold("")(epoch => s" epoch=$epoch")
|
|
||||||
logIdent = s"[$prefix id=$brokerId$epochEntry] "
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -18,7 +18,6 @@ package kafka.server
|
||||||
|
|
||||||
import com.yammer.metrics.core.Meter
|
import com.yammer.metrics.core.Meter
|
||||||
import kafka.cluster.{Partition, PartitionListener}
|
import kafka.cluster.{Partition, PartitionListener}
|
||||||
import kafka.controller.StateChangeLogger
|
|
||||||
import kafka.log.LogManager
|
import kafka.log.LogManager
|
||||||
import kafka.server.HostedPartition.Online
|
import kafka.server.HostedPartition.Online
|
||||||
import kafka.server.QuotaFactory.QuotaManagers
|
import kafka.server.QuotaFactory.QuotaManagers
|
||||||
|
@ -48,6 +47,7 @@ import org.apache.kafka.common.requests._
|
||||||
import org.apache.kafka.common.utils.{Exit, Time, Utils}
|
import org.apache.kafka.common.utils.{Exit, Time, Utils}
|
||||||
import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig}
|
import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig}
|
||||||
import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta}
|
import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta}
|
||||||
|
import org.apache.kafka.logger.StateChangeLogger
|
||||||
import org.apache.kafka.metadata.LeaderConstants.NO_LEADER
|
import org.apache.kafka.metadata.LeaderConstants.NO_LEADER
|
||||||
import org.apache.kafka.metadata.MetadataCache
|
import org.apache.kafka.metadata.MetadataCache
|
||||||
import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition}
|
import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition}
|
||||||
|
@ -272,7 +272,7 @@ class ReplicaManager(val config: KafkaConfig,
|
||||||
@volatile private var isInControlledShutdown = false
|
@volatile private var isInControlledShutdown = false
|
||||||
|
|
||||||
this.logIdent = s"[ReplicaManager broker=$localBrokerId] "
|
this.logIdent = s"[ReplicaManager broker=$localBrokerId] "
|
||||||
protected val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None)
|
protected val stateChangeLogger = new StateChangeLogger(localBrokerId)
|
||||||
|
|
||||||
private var logDirFailureHandler: LogDirFailureHandler = _
|
private var logDirFailureHandler: LogDirFailureHandler = _
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import java.lang.{Long => JLong}
|
||||||
import java.time.{Duration => JDuration}
|
import java.time.{Duration => JDuration}
|
||||||
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
|
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
|
||||||
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit}
|
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit}
|
||||||
import java.util.{Collections, Locale, Optional, Properties}
|
import java.util.{Collections, Optional, Properties}
|
||||||
import java.{time, util}
|
import java.{time, util}
|
||||||
import kafka.integration.KafkaServerTestHarness
|
import kafka.integration.KafkaServerTestHarness
|
||||||
import kafka.server.KafkaConfig
|
import kafka.server.KafkaConfig
|
||||||
|
@ -2355,9 +2355,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest {
|
||||||
defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
|
defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
|
||||||
defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId)
|
defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId)
|
||||||
defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId)
|
defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId)
|
||||||
// We need to set internal.leave.group.on.close to validate dynamic member removal, but it only works for ClassicConsumer
|
|
||||||
// After KIP-1092, we can control dynamic member removal for both ClassicConsumer and AsyncConsumer
|
|
||||||
defaultConsumerConfig.setProperty("internal.leave.group.on.close", "false")
|
|
||||||
|
|
||||||
val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig)
|
val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig)
|
||||||
groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) =>
|
groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) =>
|
||||||
|
@ -2406,14 +2403,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest {
|
||||||
var testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get()
|
var testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get()
|
||||||
assertEquals(testGroupId, testGroupDescription.groupId)
|
assertEquals(testGroupId, testGroupDescription.groupId)
|
||||||
assertFalse(testGroupDescription.isSimpleConsumerGroup)
|
assertFalse(testGroupDescription.isSimpleConsumerGroup)
|
||||||
|
|
||||||
// Although we set `internal.leave.group.on.close` in the consumer, it only works for ClassicConsumer.
|
|
||||||
// After KIP-1092, we can control dynamic member removal in consumer.close()
|
|
||||||
if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) {
|
|
||||||
assertEquals(3, testGroupDescription.members().size())
|
|
||||||
} else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) {
|
|
||||||
assertEquals(2, testGroupDescription.members().size())
|
assertEquals(2, testGroupDescription.members().size())
|
||||||
}
|
|
||||||
|
|
||||||
// Test delete one static member
|
// Test delete one static member
|
||||||
removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId,
|
removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId,
|
||||||
|
@ -2426,11 +2416,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest {
|
||||||
new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true))
|
new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true))
|
||||||
testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get()
|
testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get()
|
||||||
|
|
||||||
if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) {
|
|
||||||
assertEquals(2, testGroupDescription.members().size())
|
|
||||||
} else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) {
|
|
||||||
assertEquals(1, testGroupDescription.members().size())
|
assertEquals(1, testGroupDescription.members().size())
|
||||||
}
|
|
||||||
|
|
||||||
// Delete all active members remaining
|
// Delete all active members remaining
|
||||||
removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions())
|
removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions())
|
||||||
|
|
|
@ -4315,7 +4315,7 @@ $ bin/kafka-topics.sh --create --topic tieredTopic --bootstrap-server localhost:
|
||||||
|
|
||||||
<p>Lastly, we can try to consume some data from the beginning and print offset number, to make sure it will successfully fetch offset 0 from the remote storage.</p>
|
<p>Lastly, we can try to consume some data from the beginning and print offset number, to make sure it will successfully fetch offset 0 from the remote storage.</p>
|
||||||
|
|
||||||
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --topic tieredTopic --from-beginning --max-messages 1 --bootstrap-server localhost:9092 --property print.offset=true</code></pre>
|
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --topic tieredTopic --from-beginning --max-messages 1 --bootstrap-server localhost:9092 --formatter-property print.offset=true</code></pre>
|
||||||
|
|
||||||
<p>In KRaft mode, you can disable tiered storage at the topic level, to make the remote logs as read-only logs, or completely delete all remote logs.</p>
|
<p>In KRaft mode, you can disable tiered storage at the topic level, to make the remote logs as read-only logs, or completely delete all remote logs.</p>
|
||||||
|
|
||||||
|
|
|
@ -1222,7 +1222,7 @@ streamsConfig.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksD
|
||||||
These optimizations include moving/reducing repartition topics and reusing the source topic as the changelog for source KTables. These optimizations will save on network traffic and storage in Kafka without changing the semantics of your applications. Enabling them is recommended.
|
These optimizations include moving/reducing repartition topics and reusing the source topic as the changelog for source KTables. These optimizations will save on network traffic and storage in Kafka without changing the semantics of your applications. Enabling them is recommended.
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
Note that as of 2.3, you need to do two things to enable optimizations. In addition to setting this config to <code>StreamsConfig.OPTIMIZE</code>, you'll need to pass in your
|
Note that you need to do two things to enable optimizations. In addition to setting this config to <code>StreamsConfig.OPTIMIZE</code>, you'll need to pass in your
|
||||||
configuration properties when building your topology by using the overloaded <code>StreamsBuilder.build(Properties)</code> method.
|
configuration properties when building your topology by using the overloaded <code>StreamsBuilder.build(Properties)</code> method.
|
||||||
For example <code>KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties)</code>.
|
For example <code>KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties)</code>.
|
||||||
</p>
|
</p>
|
||||||
|
@ -1235,7 +1235,7 @@ streamsConfig.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksD
|
||||||
The version you are upgrading from. It is important to set this config when performing a rolling upgrade to certain versions, as described in the upgrade guide.
|
The version you are upgrading from. It is important to set this config when performing a rolling upgrade to certain versions, as described in the upgrade guide.
|
||||||
You should set this config to the appropriate version before bouncing your instances and upgrading them to the newer version. Once everyone is on the
|
You should set this config to the appropriate version before bouncing your instances and upgrading them to the newer version. Once everyone is on the
|
||||||
newer version, you should remove this config and do a second rolling bounce. It is only necessary to set this config and follow the two-bounce upgrade path
|
newer version, you should remove this config and do a second rolling bounce. It is only necessary to set this config and follow the two-bounce upgrade path
|
||||||
when upgrading from below version 2.0, or when upgrading to 2.4+ from any version lower than 2.4.
|
when upgrading to 3.4+ from any version lower than 3.4.
|
||||||
</div>
|
</div>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -217,15 +217,15 @@ TimeWindowedDeserializer<String> deserializer = new TimeWindowedDeserializ
|
||||||
<h4>Usage in Command Line</h4>
|
<h4>Usage in Command Line</h4>
|
||||||
<p>When using command-line tools (like <code>bin/kafka-console-consumer.sh</code>), you can configure windowed deserializers by passing the inner class and window size via configuration properties. The property names use a prefix pattern:</p>
|
<p>When using command-line tools (like <code>bin/kafka-console-consumer.sh</code>), you can configure windowed deserializers by passing the inner class and window size via configuration properties. The property names use a prefix pattern:</p>
|
||||||
<pre class="line-numbers"><code class="language-bash"># Time windowed deserializer configuration
|
<pre class="line-numbers"><code class="language-bash"># Time windowed deserializer configuration
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property key.deserializer=org.apache.kafka.streams.kstream.TimeWindowedDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.streams.kstream.TimeWindowedDeserializer \
|
||||||
--property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer \
|
--formatter-property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer \
|
||||||
--property key.deserializer.window.size.ms=500
|
--formatter-property key.deserializer.window.size.ms=500
|
||||||
|
|
||||||
# Session windowed deserializer configuration
|
# Session windowed deserializer configuration
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property key.deserializer=org.apache.kafka.streams.kstream.SessionWindowedDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.streams.kstream.SessionWindowedDeserializer \
|
||||||
--property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer</code></pre>
|
--formatter-property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer</code></pre>
|
||||||
|
|
||||||
<h4>Deprecated Configs</h4>
|
<h4>Deprecated Configs</h4>
|
||||||
<p>The following <code>StreamsConfig</code> parameters are deprecated in favor of passing parameters directly to serializer/deserializer constructors:</p>
|
<p>The following <code>StreamsConfig</code> parameters are deprecated in favor of passing parameters directly to serializer/deserializer constructors:</p>
|
||||||
|
|
|
@ -175,10 +175,10 @@ and inspect the output of the WordCount demo application by reading from its out
|
||||||
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
||||||
--topic streams-wordcount-output \
|
--topic streams-wordcount-output \
|
||||||
--from-beginning \
|
--from-beginning \
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property print.value=true \
|
--formatter-property print.value=true \
|
||||||
--property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
||||||
--property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer</code></pre>
|
--formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer</code></pre>
|
||||||
|
|
||||||
|
|
||||||
<h4><a id="quickstart_streams_process" href="#quickstart_streams_process">Step 5: Process some data</a></h4>
|
<h4><a id="quickstart_streams_process" href="#quickstart_streams_process">Step 5: Process some data</a></h4>
|
||||||
|
@ -197,10 +197,10 @@ This message will be processed by the Wordcount application and the following ou
|
||||||
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
||||||
--topic streams-wordcount-output \
|
--topic streams-wordcount-output \
|
||||||
--from-beginning \
|
--from-beginning \
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property print.value=true \
|
--formatter-property print.value=true \
|
||||||
--property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
||||||
--property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
--formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||||
|
|
||||||
all 1
|
all 1
|
||||||
streams 1
|
streams 1
|
||||||
|
@ -225,10 +225,10 @@ In your other terminal in which the console consumer is running, you will observ
|
||||||
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
||||||
--topic streams-wordcount-output \
|
--topic streams-wordcount-output \
|
||||||
--from-beginning \
|
--from-beginning \
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property print.value=true \
|
--formatter-property print.value=true \
|
||||||
--property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
||||||
--property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
--formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||||
|
|
||||||
all 1
|
all 1
|
||||||
streams 1
|
streams 1
|
||||||
|
@ -255,10 +255,10 @@ The <b>streams-wordcount-output</b> topic will subsequently show the correspondi
|
||||||
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
<pre><code class="language-bash">$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
|
||||||
--topic streams-wordcount-output \
|
--topic streams-wordcount-output \
|
||||||
--from-beginning \
|
--from-beginning \
|
||||||
--property print.key=true \
|
--formatter-property print.key=true \
|
||||||
--property print.value=true \
|
--formatter-property print.value=true \
|
||||||
--property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
--formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
|
||||||
--property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
--formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||||
|
|
||||||
all 1
|
all 1
|
||||||
streams 1
|
streams 1
|
||||||
|
|
|
@ -35,9 +35,8 @@
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
Upgrading from any older version to {{fullDotVersion}} is possible: if upgrading from 3.4 or below, you will need to do two rolling bounces, where during the first rolling bounce phase you set the config <code>upgrade.from="older version"</code>
|
Upgrading from any older version to {{fullDotVersion}} is possible: if upgrading from 3.4 or below, you will need to do two rolling bounces, where during the first rolling bounce phase you set the config <code>upgrade.from="older version"</code>
|
||||||
(possible values are <code>"0.10.0" - "3.4"</code>) and during the second you remove it. This is required to safely handle 3 changes. The first is introduction of the new cooperative rebalancing protocol of the embedded consumer. The second is a change in foreign-key join serialization format.
|
(possible values are <code>"2.4" - "3.4"</code>) and during the second you remove it. This is required to safely handle 2 changes. The first is a change in foreign-key join serialization format.
|
||||||
Note that you will remain using the old eager rebalancing protocol if you skip or delay the second rolling bounce, but you can safely switch over to cooperative at any time once the entire group is on 2.4+ by removing the config value and bouncing. For more details please refer to
|
The second is a change in the serialization format for an internal repartition topic. For more details, please refer to <a href="https://cwiki.apache.org/confluence/x/P5VbDg">KIP-904</a>:
|
||||||
<a href="https://cwiki.apache.org/confluence/x/vAclBg">KIP-429</a>. The third is a change in the serialization format for an internal repartition topic. For more details, please refer to <a href="https://cwiki.apache.org/confluence/x/P5VbDg">KIP-904</a>:
|
|
||||||
</p>
|
</p>
|
||||||
<ul>
|
<ul>
|
||||||
<li> prepare your application instances for a rolling bounce and make sure that config <code>upgrade.from</code> is set to the version from which it is being upgrade.</li>
|
<li> prepare your application instances for a rolling bounce and make sure that config <code>upgrade.from</code> is set to the version from which it is being upgrade.</li>
|
||||||
|
@ -45,24 +44,12 @@
|
||||||
<li> prepare your newly deployed {{fullDotVersion}} application instances for a second round of rolling bounces; make sure to remove the value for config <code>upgrade.from</code> </li>
|
<li> prepare your newly deployed {{fullDotVersion}} application instances for a second round of rolling bounces; make sure to remove the value for config <code>upgrade.from</code> </li>
|
||||||
<li> bounce each instance of your application once more to complete the upgrade </li>
|
<li> bounce each instance of your application once more to complete the upgrade </li>
|
||||||
</ul>
|
</ul>
|
||||||
<p> As an alternative, an offline upgrade is also possible. Upgrading from any versions as old as 0.10.0.x to {{fullDotVersion}} in offline mode require the following steps: </p>
|
<p> As an alternative, an offline upgrade is also possible. Upgrading from any versions as old as 0.11.0.x to {{fullDotVersion}} in offline mode require the following steps: </p>
|
||||||
<ul>
|
<ul>
|
||||||
<li> stop all old (e.g., 0.10.0.x) application instances </li>
|
<li> stop all old (e.g., 0.11.0.x) application instances </li>
|
||||||
<li> update your code and swap old code and jar file with new code and new jar file </li>
|
<li> update your code and swap old code and jar file with new code and new jar file </li>
|
||||||
<li> restart all new ({{fullDotVersion}}) application instances </li>
|
<li> restart all new ({{fullDotVersion}}) application instances </li>
|
||||||
</ul>
|
</ul>
|
||||||
<p>
|
|
||||||
Note: The cooperative rebalancing protocol has been the default since 2.4, but we have continued to support the
|
|
||||||
eager rebalancing protocol to provide users an upgrade path. This support will be dropped in a future release,
|
|
||||||
so any users still on the eager protocol should prepare to finish upgrading their applications to the cooperative protocol in version 3.1.
|
|
||||||
This only affects users who are still on a version older than 2.4, and users who have upgraded already but have not yet
|
|
||||||
removed the <code>upgrade.from</code> config that they set when upgrading from a version below 2.4.
|
|
||||||
Users fitting into the latter case will simply need to unset this config when upgrading beyond 3.1,
|
|
||||||
while users in the former case will need to follow a slightly different upgrade path if they attempt to upgrade from 2.3 or below to a version above 3.1.
|
|
||||||
Those applications will need to go through a bridge release, by first upgrading to a version between 2.4 - 3.1 and setting the <code>upgrade.from</code> config,
|
|
||||||
then removing that config and upgrading to the final version above 3.1. See <a href="https://issues.apache.org/jira/browse/KAFKA-8575">KAFKA-8575</a>
|
|
||||||
for more details.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>For a table that shows Streams API compatibility with Kafka broker versions, see <a href="#streams_api_broker_compat">Broker Compatibility</a>.</p>
|
<p>For a table that shows Streams API compatibility with Kafka broker versions, see <a href="#streams_api_broker_compat">Broker Compatibility</a>.</p>
|
||||||
|
|
||||||
|
@ -121,26 +108,10 @@
|
||||||
|
|
||||||
<p>Since 2.6.0 release, Kafka Streams depends on a RocksDB version that requires MacOS 10.14 or higher.</p>
|
<p>Since 2.6.0 release, Kafka Streams depends on a RocksDB version that requires MacOS 10.14 or higher.</p>
|
||||||
|
|
||||||
<p>
|
|
||||||
To run a Kafka Streams application version 2.2.1, 2.3.0, or higher a broker version 0.11.0 or higher is required
|
|
||||||
and the on-disk message format must be 0.11 or higher.
|
|
||||||
Brokers must be on version 0.10.1 or higher to run a Kafka Streams application version 0.10.1 to 2.2.0.
|
|
||||||
Additionally, on-disk message format must be 0.10 or higher to run a Kafka Streams application version 1.0 to 2.2.0.
|
|
||||||
For Kafka Streams 0.10.0, broker version 0.10.0 or higher is required.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
In deprecated <code>KStreamBuilder</code> class, when a <code>KTable</code> is created from a source topic via <code>KStreamBuilder.table()</code>, its materialized state store
|
|
||||||
will reuse the source topic as its changelog topic for restoring, and will disable logging to avoid appending new updates to the source topic; in the <code>StreamsBuilder</code> class introduced in 1.0, this behavior was changed
|
|
||||||
accidentally: we still reuse the source topic as the changelog topic for restoring, but will also create a separate changelog topic to append the update records from source topic to. In the 2.0 release, we have fixed this issue and now users
|
|
||||||
can choose whether or not to reuse the source topic based on the <code>StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG</code>: if you are upgrading from the old <code>KStreamBuilder</code> class and hence you need to change your code to use
|
|
||||||
the new <code>StreamsBuilder</code>, you should set this config value to <code>StreamsConfig#OPTIMIZE</code> to continue reusing the source topic; if you are upgrading from 1.0 or 1.1 where you are already using <code>StreamsBuilder</code> and hence have already
|
|
||||||
created a separate changelog topic, you should set this config value to <code>StreamsConfig#NO_OPTIMIZATION</code> when upgrading to {{fullDotVersion}} in order to use that changelog topic for restoring the state store.
|
|
||||||
More details about the new config <code>StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG</code> can be found in <a href="https://cwiki.apache.org/confluence/x/V53LB">KIP-295</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h3><a id="streams_api_changes_410" href="#streams_api_changes_410">Streams API changes in 4.1.0</a></h3>
|
<h3><a id="streams_api_changes_410" href="#streams_api_changes_410">Streams API changes in 4.1.0</a></h3>
|
||||||
|
|
||||||
|
<p><b>Note:</b> Kafka Streams 4.1.0 contains a critical memory leak bug (<a href="https://issues.apache.org/jira/browse/KAFKA-19748">KAFKA-19748</a>) that affects users of range scans and certain DSL operators (session windows, sliding windows, stream-stream joins, foreign-key joins). Users running Kafka Streams should consider upgrading directly to 4.1.1 when available.</p>
|
||||||
|
|
||||||
<h4>Early Access of the Streams Rebalance Protocol</h4>
|
<h4>Early Access of the Streams Rebalance Protocol</h4>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
@ -1150,705 +1121,9 @@
|
||||||
Hence, this feature won't be supported in the future any longer and you need to updated your code accordingly.
|
Hence, this feature won't be supported in the future any longer and you need to updated your code accordingly.
|
||||||
If you use a custom <code>PartitionGrouper</code> and stop to use it, the created tasks might change.
|
If you use a custom <code>PartitionGrouper</code> and stop to use it, the created tasks might change.
|
||||||
Hence, you will need to reset your application to upgrade it.
|
Hence, you will need to reset your application to upgrade it.
|
||||||
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_230" class="anchor-link"></a><a href="#streams_api_changes_230">Streams API changes in 2.3.0</a></h3>
|
|
||||||
|
|
||||||
<p>Version 2.3.0 adds the Suppress operator to the <code>kafka-streams-scala</code> Ktable API.</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
As of 2.3.0 Streams now offers an in-memory version of the window (<a href="https://cwiki.apache.org/confluence/x/6AQlBg">KIP-428</a>)
|
|
||||||
and the session (<a href="https://cwiki.apache.org/confluence/x/DiqGBg">KIP-445</a>) store, in addition to the persistent ones based on RocksDB.
|
|
||||||
The new public interfaces <code>inMemoryWindowStore()</code> and <code>inMemorySessionStore()</code> are added to <code>Stores</code> and provide the built-in in-memory window or session store.
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>For Streams API changes in version older than 2.4.x, please check <a href="/39/documentation/streams/upgrade-guide">3.9 upgrade document</a>.</p>
|
||||||
As of 2.3.0 we've updated how to turn on optimizations. Now to enable optimizations, you need to do two things.
|
|
||||||
First add this line to your properties <code>properties.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);</code>, as you have done before.
|
|
||||||
Second, when constructing your <code>KafkaStreams</code> instance, you'll need to pass your configuration properties when building your
|
|
||||||
topology by using the overloaded <code>StreamsBuilder.build(Properties)</code> method.
|
|
||||||
For example <code>KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties)</code>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
In 2.3.0 we have added default implementation to <code>close()</code> and <code>configure()</code> for <code>Serializer</code>,
|
|
||||||
<code>Deserializer</code> and <code>Serde</code> so that they can be implemented by lambda expression.
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/fgw0BQ">KIP-331</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
To improve operator semantics, new store types are added that allow storing an additional timestamp per key-value pair or window.
|
|
||||||
Some DSL operators (for example KTables) are using those new stores.
|
|
||||||
Hence, you can now retrieve the last update timestamp via Interactive Queries if you specify
|
|
||||||
<code>TimestampedKeyValueStoreType</code> or <code>TimestampedWindowStoreType</code> as your <code>QueryableStoreType</code>.
|
|
||||||
While this change is mainly transparent, there are some corner cases that may require code changes:
|
|
||||||
<strong>Caution: If you receive an untyped store and use a cast, you might need to update your code to cast to the correct type.
|
|
||||||
Otherwise, you might get an exception similar to
|
|
||||||
<code>java.lang.ClassCastException: class org.apache.kafka.streams.state.ValueAndTimestamp cannot be cast to class YOUR-VALUE-TYPE</code>
|
|
||||||
upon getting a value from the store.</strong>
|
|
||||||
Additionally, <code>TopologyTestDriver#getStateStore()</code> only returns non-built-in stores and throws an exception if a built-in store is accessed.
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/0j6HB">KIP-258</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
To improve type safety, a new operator <code>KStream#flatTransformValues</code> is added.
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/bUgYBQ">KIP-313</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Kafka Streams used to set the configuration parameter <code>max.poll.interval.ms</code> to <code>Integer.MAX_VALUE</code>.
|
|
||||||
This default value is removed and Kafka Streams uses the consumer default value now.
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/1COGBg">KIP-442</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Default configuration for repartition topic was changed:
|
|
||||||
The segment size for index files (<code>segment.index.bytes</code>) is no longer 50MB, but uses the cluster default.
|
|
||||||
Similarly, the configuration <code>segment.ms</code> in no longer 10 minutes, but uses the cluster default configuration.
|
|
||||||
Lastly, the retention period (<code>retention.ms</code>) is changed from <code>Long.MAX_VALUE</code> to <code>-1</code> (infinite).
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/4iOGBg">KIP-443</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
To avoid memory leaks, <code>RocksDBConfigSetter</code> has a new <code>close()</code> method that is called on shutdown.
|
|
||||||
Users should implement this method to release any memory used by RocksDB config objects, by closing those objects.
|
|
||||||
For more details please read <a href="https://cwiki.apache.org/confluence/x/QhaZBg">KIP-453</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
RocksDB dependency was updated to version <code>5.18.3</code>.
|
|
||||||
The new version allows to specify more RocksDB configurations, including <code>WriteBufferManager</code> which helps to limit RocksDB off-heap memory usage.
|
|
||||||
For more details please read <a href="https://issues.apache.org/jira/browse/KAFKA-8215">KAFKA-8215</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_220" class="anchor-link"></a><a href="#streams_api_changes_220">Streams API changes in 2.2.0</a></h3>
|
|
||||||
<p>
|
|
||||||
We've simplified the <code>KafkaStreams#state</code> transition diagram during the starting up phase a bit in 2.2.0: in older versions the state will transit from <code>CREATED</code> to <code>RUNNING</code>, and then to <code>REBALANCING</code> to get the first
|
|
||||||
stream task assignment, and then back to <code>RUNNING</code>; starting in 2.2.0 it will transit from <code>CREATED</code> directly to <code>REBALANCING</code> and then to <code>RUNNING</code>.
|
|
||||||
If you have registered a <code>StateListener</code> that captures state transition events, you may need to adjust your listener implementation accordingly for this simplification (in practice, your listener logic should be very unlikely to be affected at all).
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
In <code>WindowedSerdes</code>, we've added a new static constructor to return a <code>TimeWindowSerde</code> with configurable window size. This is to help users to construct time window serdes to read directly from a time-windowed store's changelog.
|
|
||||||
More details can be found in <a href="https://cwiki.apache.org/confluence/x/WYTQBQ">KIP-393</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
In 2.2.0 we have extended a few public interfaces including <code>KafkaStreams</code> to extend <code>AutoCloseable</code> so that they can be
|
|
||||||
used in a try-with-resource statement. For a full list of public interfaces that get impacted please read <a href="https://cwiki.apache.org/confluence/x/-AeQBQ">KIP-376</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_210" class="anchor-link"></a><a href="#streams_api_changes_210">Streams API changes in 2.1.0</a></h3>
|
|
||||||
<p>
|
|
||||||
We updated <code>TopologyDescription</code> API to allow for better runtime checking.
|
|
||||||
Users are encouraged to use <code>#topicSet()</code> and <code>#topicPattern()</code> accordingly on <code>TopologyDescription.Source</code> nodes,
|
|
||||||
instead of using <code>#topics()</code>, which has since been deprecated. Similarly, use <code>#topic()</code> and <code>#topicNameExtractor()</code>
|
|
||||||
to get descriptions of <code>TopologyDescription.Sink</code> nodes. For more details, see
|
|
||||||
<a href="https://cwiki.apache.org/confluence/x/NQU0BQ">KIP-321</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We've added a new class <code>Grouped</code> and deprecated <code>Serialized</code>. The intent of adding <code>Grouped</code> is the ability to
|
|
||||||
name repartition topics created when performing aggregation operations. Users can name the potential repartition topic using the
|
|
||||||
<code>Grouped#as()</code> method which takes a <code>String</code> and is used as part of the repartition topic name. The resulting repartition
|
|
||||||
topic name will still follow the pattern of <code>${application-id}->name<-repartition</code>. The <code>Grouped</code> class is now favored over
|
|
||||||
<code>Serialized</code> in <code>KStream#groupByKey()</code>, <code>KStream#groupBy()</code>, and <code>KTable#groupBy()</code>.
|
|
||||||
Note that Kafka Streams does not automatically create repartition topics for aggregation operations.
|
|
||||||
|
|
||||||
Additionally, we've updated the <code>Joined</code> class with a new method <code>Joined#withName</code>
|
|
||||||
enabling users to name any repartition topics required for performing Stream/Stream or Stream/Table join. For more details repartition
|
|
||||||
topic naming, see <a href="https://cwiki.apache.org/confluence/x/mgJ1BQ">KIP-372</a>.
|
|
||||||
|
|
||||||
As a result we've updated the Kafka Streams Scala API and removed the <code>Serialized</code> class in favor of adding <code>Grouped</code>.
|
|
||||||
If you just rely on the implicit <code>Serialized</code>, you just need to recompile; if you pass in <code>Serialized</code> explicitly, sorry you'll have to make code changes.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We've added a new config named <code>max.task.idle.ms</code> to allow users specify how to handle out-of-order data within a task that may be processing multiple
|
|
||||||
topic-partitions (see <a href="/{{version}}/documentation/streams/core-concepts.html#streams_out_of_ordering">Out-of-Order Handling</a> section for more details).
|
|
||||||
The default value is set to <code>0</code>, to favor minimized latency over synchronization between multiple input streams from topic-partitions.
|
|
||||||
If users would like to wait for longer time when some of the topic-partitions do not have data available to process and hence cannot determine its corresponding stream time,
|
|
||||||
they can override this config to a larger value.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We've added the missing <code>SessionBytesStoreSupplier#retentionPeriod()</code> to be consistent with the <code>WindowBytesStoreSupplier</code> which allows users to get the specified retention period for session-windowed stores.
|
|
||||||
We've also added the missing <code>StoreBuilder#withCachingDisabled()</code> to allow users to turn off caching for their customized stores.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We added a new serde for UUIDs (<code>Serdes.UUIDSerde</code>) that you can use via <code>Serdes.UUID()</code>
|
|
||||||
(cf. <a href="https://cwiki.apache.org/confluence/x/26hjB">KIP-206</a>).
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We updated a list of methods that take <code>long</code> arguments as either timestamp (fix point) or duration (time period)
|
|
||||||
and replaced them with <code>Instant</code> and <code>Duration</code> parameters for improved semantics.
|
|
||||||
Some old methods base on <code>long</code> are deprecated and users are encouraged to update their code.
|
|
||||||
<br />
|
|
||||||
In particular, aggregation windows (hopping/tumbling/unlimited time windows and session windows) as well as join windows now take <code>Duration</code>
|
|
||||||
arguments to specify window size, hop, and gap parameters.
|
|
||||||
Also, window sizes and retention times are now specified as <code>Duration</code> type in <code>Stores</code> class.
|
|
||||||
The <code>Window</code> class has new methods <code>#startTime()</code> and <code>#endTime()</code> that return window start/end timestamp as <code>Instant</code>.
|
|
||||||
For interactive queries, there are new <code>#fetch(...)</code> overloads taking <code>Instant</code> arguments.
|
|
||||||
Additionally, punctuations are now registered via <code>ProcessorContext#schedule(Duration interval, ...)</code>.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/IBNPBQ">KIP-358</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We deprecated <code>KafkaStreams#close(...)</code> and replaced it with <code>KafkaStreams#close(Duration)</code> that accepts a single timeout argument
|
|
||||||
Note: the new <code>#close(Duration)</code> method has improved (but slightly different) semantics.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/IBNPBQ">KIP-358</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The newly exposed <code>AdminClient</code> metrics are now available when calling the <code>KafkaStream#metrics()</code> method.
|
|
||||||
For more details on exposing <code>AdminClients</code> metrics
|
|
||||||
see <a href="https://cwiki.apache.org/confluence/x/lQg0BQ">KIP-324</a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We deprecated the notion of segments in window stores as those are intended to be an implementation details.
|
|
||||||
Thus, method <code>Windows#segments()</code> and variable <code>Windows#segments</code> were deprecated.
|
|
||||||
If you implement custom windows, you should update your code accordingly.
|
|
||||||
Similarly, <code>WindowBytesStoreSupplier#segments()</code> was deprecated and replaced with <code>WindowBytesStoreSupplier#segmentInterval()</code>.
|
|
||||||
If you implement custom window store, you need to update your code accordingly.
|
|
||||||
Finally, <code>Stores#persistentWindowStore(...)</code> were deprecated and replaced with a new overload that does not allow to specify the number of segments any longer.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/mQU0BQ">KIP-319</a>
|
|
||||||
(note: <a href="https://cwiki.apache.org/confluence/x/sQU0BQ">KIP-328</a> and
|
|
||||||
<a href="https://cwiki.apache.org/confluence/x/IBNPBQ">KIP-358</a> 'overlap' with KIP-319).
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We've added an overloaded <code>StreamsBuilder#build</code> method that accepts an instance of <code>java.util.Properties</code> with the intent of using the
|
|
||||||
<code>StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG</code> config added in Kafka Streams 2.0. Before 2.1, when building a topology with
|
|
||||||
the DSL, Kafka Streams writes the physical plan as the user makes calls on the DSL. Now by providing a <code>java.util.Properties</code> instance when
|
|
||||||
executing a <code>StreamsBuilder#build</code> call, Kafka Streams can optimize the physical plan of the topology, provided the <code>StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG</code>
|
|
||||||
config is set to <code>StreamsConfig#OPTIMIZE</code>. By setting <code>StreamsConfig#OPTIMIZE</code> in addition to the <code>KTable</code> optimization of
|
|
||||||
reusing the source topic as the changelog topic, the topology may be optimized to merge redundant repartition topics into one
|
|
||||||
repartition topic. The original no parameter version of <code>StreamsBuilder#build</code> is still available for those who wish to not
|
|
||||||
optimize their topology. Note that enabling optimization of the topology may require you to do an application reset when redeploying the application. For more
|
|
||||||
details, see <a href="https://cwiki.apache.org/confluence/x/CkcYBQ">KIP-312</a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We are introducing static membership towards Kafka Streams user. This feature reduces unnecessary rebalances during normal application upgrades or rolling bounces.
|
|
||||||
For more details on how to use it, checkout <a href="/{{version}}/documentation/#static_membership">static membership design</a>.
|
|
||||||
Note, Kafka Streams uses the same <code>ConsumerConfig#GROUP_INSTANCE_ID_CONFIG</code>, and you only need to make sure it is uniquely defined across
|
|
||||||
different stream instances in one application.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_200" class="anchor-link"></a><a href="#streams_api_changes_200">Streams API changes in 2.0.0</a></h3>
|
|
||||||
<p>
|
|
||||||
In 2.0.0 we have added a few new APIs on the <code>ReadOnlyWindowStore</code> interface (for details please read <a href="#streams_api_changes_200">Streams API changes</a> below).
|
|
||||||
If you have customized window store implementations that extends the <code>ReadOnlyWindowStore</code> interface you need to make code changes.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
In addition, if you using Java 8 method references in your Kafka Streams code you might need to update your code to resolve method ambiguities.
|
|
||||||
Hot-swapping the jar-file only might not work for this case.
|
|
||||||
See below a complete list of <a href="#streams_api_changes_200">2.0.0</a>
|
|
||||||
API and semantic changes that allow you to advance your application and/or simplify your code base.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We moved <code>Consumed</code> interface from <code>org.apache.kafka.streams</code> to <code>org.apache.kafka.streams.kstream</code>
|
|
||||||
as it was mistakenly placed in the previous release. If your code has already used it there is a simple one-liner change needed in your import statement.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We have also removed some public APIs that are deprecated prior to 1.0.x in 2.0.0.
|
|
||||||
See below for a detailed list of removed APIs.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have removed the <code>skippedDueToDeserializationError-rate</code> and <code>skippedDueToDeserializationError-total</code> metrics.
|
|
||||||
Deserialization errors, and all other causes of record skipping, are now accounted for in the pre-existing metrics
|
|
||||||
<code>skipped-records-rate</code> and <code>skipped-records-total</code>. When a record is skipped, the event is
|
|
||||||
now logged at WARN level. If these warnings become burdensome, we recommend explicitly filtering out unprocessable
|
|
||||||
records instead of depending on record skipping semantics. For more details, see
|
|
||||||
<a href="https://cwiki.apache.org/confluence/x/gFOHB">KIP-274</a>.
|
|
||||||
As of right now, the potential causes of skipped records are:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li><code>null</code> keys in table sources</li>
|
|
||||||
<li><code>null</code> keys in table-table inner/left/outer/right joins</li>
|
|
||||||
<li><code>null</code> keys or values in stream-table joins</li>
|
|
||||||
<li><code>null</code> keys or values in stream-stream joins</li>
|
|
||||||
<li><code>null</code> keys or values in aggregations on grouped streams</li>
|
|
||||||
<li><code>null</code> keys or values in reductions on grouped streams</li>
|
|
||||||
<li><code>null</code> keys in aggregations on windowed streams</li>
|
|
||||||
<li><code>null</code> keys in reductions on windowed streams</li>
|
|
||||||
<li><code>null</code> keys in aggregations on session-windowed streams</li>
|
|
||||||
<li>
|
|
||||||
Errors producing results, when the configured <code>default.production.exception.handler</code> decides to
|
|
||||||
<code>CONTINUE</code> (the default is to <code>FAIL</code> and throw an exception).
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
Errors deserializing records, when the configured <code>default.deserialization.exception.handler</code>
|
|
||||||
decides to <code>CONTINUE</code> (the default is to <code>FAIL</code> and throw an exception).
|
|
||||||
This was the case previously captured in the <code>skippedDueToDeserializationError</code> metrics.
|
|
||||||
</li>
|
|
||||||
<li>Fetched records having a negative timestamp.</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We've also fixed the metrics name for time and session windowed store operations in 2.0. As a result, our current built-in stores
|
|
||||||
will have their store types in the metric names as <code>in-memory-state</code>, <code>in-memory-lru-state</code>,
|
|
||||||
<code>rocksdb-state</code>, <code>rocksdb-window-state</code>, and <code>rocksdb-session-state</code>. For example, a RocksDB time windowed store's
|
|
||||||
put operation metrics would now be
|
|
||||||
<code>kafka.streams:type=stream-rocksdb-window-state-metrics,client-id=([-.\w]+),task-id=([-.\w]+),rocksdb-window-state-id=([-.\w]+)</code>.
|
|
||||||
Users need to update their metrics collecting and reporting systems for their time and session windowed stores accordingly.
|
|
||||||
For more details, please read the <a href="/{{version}}/documentation/#kafka_streams_store_monitoring">State Store Metrics</a> section.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
We have added support for methods in <code>ReadOnlyWindowStore</code> which allows for querying a single window's key-value pair.
|
|
||||||
For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/UUSHB">KIP-261</a>.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have added public <code>WindowedSerdes</code> to allow users to read from / write to a topic storing windowed table changelogs directly.
|
|
||||||
In addition, in <code>StreamsConfig</code> we have also added <code>default.windowed.key.serde.inner</code> and <code>default.windowed.value.serde.inner</code>
|
|
||||||
to let users specify inner serdes if the default serde classes are windowed serdes.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/_keHB">KIP-265</a>.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We've added message header support in the <code>Processor API</code> in Kafka 2.0.0. In particular, we have added a new API <code>ProcessorContext#headers()</code>
|
|
||||||
which returns a <code>Headers</code> object that keeps track of the headers of the source topic's message that is being processed. Through this object, users can manipulate
|
|
||||||
the headers map that is being propagated throughout the processor topology as well. For more details please feel free to read
|
|
||||||
the <a href="/{{version}}/documentation/streams/developer-guide/processor-api.html#accessing-processor-context">Developer Guide</a> section.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have deprecated constructors of <code>KafkaStreams</code> that take a <code>StreamsConfig</code> as parameter.
|
|
||||||
Please use the other corresponding constructors that accept <code>java.util.Properties</code> instead.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/KLRzB">KIP-245</a>.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Kafka 2.0.0 allows to manipulate timestamps of output records using the Processor API (<a href="https://cwiki.apache.org/confluence/x/Ih6HB">KIP-251</a>).
|
|
||||||
To enable this new feature, <code>ProcessorContext#forward(...)</code> was modified.
|
|
||||||
The two existing overloads <code>#forward(Object key, Object value, String childName)</code> and <code>#forward(Object key, Object value, int childIndex)</code> were deprecated and a new overload <code>#forward(Object key, Object value, To to)</code> was added.
|
|
||||||
The new class <code>To</code> allows you to send records to all or specific downstream processors by name and to set the timestamp for the output record.
|
|
||||||
Forwarding based on child index is not supported in the new API any longer.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have added support to allow routing records dynamically to Kafka topics. More specifically, in both the lower-level <code>Topology#addSink</code> and higher-level <code>KStream#to</code> APIs, we have added variants that
|
|
||||||
take a <code>TopicNameExtractor</code> instance instead of a specific <code>String</code> typed topic name, such that for each received record from the upstream processor, the library will dynamically determine which Kafka topic to write to
|
|
||||||
based on the record's key and value, as well as record context. Note that all the Kafka topics that may possibly be used are still considered as user topics and hence required to be pre-created. In addition to that, we have modified the
|
|
||||||
<code>StreamPartitioner</code> interface to add the topic name parameter since the topic name now may not be known beforehand; users who have customized implementations of this interface would need to update their code while upgrading their application
|
|
||||||
to use Kafka Streams 2.0.0.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
<a href="https://cwiki.apache.org/confluence/x/DVyHB">KIP-284</a> changed the retention time for repartition topics by setting its default value to <code>Long.MAX_VALUE</code>.
|
|
||||||
Instead of relying on data retention Kafka Streams uses the new purge data API to delete consumed data from those topics and to keep used storage small now.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have modified the <code>ProcessorStateManger#register(...)</code> signature and removed the deprecated <code>loggingEnabled</code> boolean parameter as it is specified in the <code>StoreBuilder</code>.
|
|
||||||
Users who used this function to register their state stores into the processor topology need to simply update their code and remove this parameter from the caller.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Kafka Streams DSL for Scala is a new Kafka Streams client library available for developers authoring Kafka Streams applications in Scala. It wraps core Kafka Streams DSL types to make it easier to call when
|
|
||||||
interoperating with Scala code. For example, it includes higher order functions as parameters for transformations avoiding the need anonymous classes in Java 7 or experimental SAM type conversions in Scala 2.11,
|
|
||||||
automatic conversion between Java and Scala collection types, a way
|
|
||||||
to implicitly provide Serdes to reduce boilerplate from your application and make it more typesafe, and more! For more information see the
|
|
||||||
<a href="/{{version}}/documentation/streams/developer-guide/dsl-api.html#scala-dsl">Kafka Streams DSL for Scala documentation</a> and
|
|
||||||
<a href="https://cwiki.apache.org/confluence/x/c06HB">KIP-270</a>.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
We have removed these deprecated APIs:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li><code>KafkaStreams#toString</code> no longer returns the topology and runtime metadata; to get topology metadata users can call <code>Topology#describe()</code> and to get thread runtime metadata users can call <code>KafkaStreams#localThreadsMetadata</code> (they are deprecated since 1.0.0).
|
|
||||||
For detailed guidance on how to update your code please read <a href="#streams_api_changes_100">here</a></li>
|
|
||||||
<li><code>TopologyBuilder</code> and <code>KStreamBuilder</code> are removed and replaced by <code>Topology</code> and <code>StreamsBuidler</code> respectively (they are deprecated since 1.0.0).
|
|
||||||
For detailed guidance on how to update your code please read <a href="#streams_api_changes_100">here</a></li>
|
|
||||||
<li><code>StateStoreSupplier</code> are removed and replaced with <code>StoreBuilder</code> (they are deprecated since 1.0.0);
|
|
||||||
and the corresponding <code>Stores#create</code> and <code>KStream, KTable, KGroupedStream</code> overloaded functions that use it have also been removed.
|
|
||||||
For detailed guidance on how to update your code please read <a href="#streams_api_changes_100">here</a></li>
|
|
||||||
<li><code>KStream, KTable, KGroupedStream</code> overloaded functions that requires serde and other specifications explicitly are removed and replaced with simpler overloaded functions that use <code>Consumed, Produced, Serialized, Materialized, Joined</code> (they are deprecated since 1.0.0).
|
|
||||||
For detailed guidance on how to update your code please read <a href="#streams_api_changes_100">here</a></li>
|
|
||||||
<li><code>Processor#punctuate</code>, <code>ValueTransformer#punctuate</code>, <code>ValueTransformer#punctuate</code> and <code>ProcessorContext#schedule(long)</code> are removed and replaced by <code>ProcessorContext#schedule(long, PunctuationType, Punctuator)</code> (they are deprecated in 1.0.0). </li>
|
|
||||||
<li>The second <code>boolean</code> typed parameter "loggingEnabled" in <code>ProcessorContext#register</code> has been removed; users can now use <code>StoreBuilder#withLoggingEnabled, withLoggingDisabled</code> to specify the behavior when they create the state store. </li>
|
|
||||||
<li><code>KTable#writeAs, print, foreach, to, through</code> are removed, users can call <code>KTable#tostream()#writeAs</code> instead for the same purpose (they are deprecated since 0.11.0.0).
|
|
||||||
For detailed list of removed APIs please read <a href="#streams_api_changes_0110">here</a></li>
|
|
||||||
<li><code>StreamsConfig#KEY_SERDE_CLASS_CONFIG, VALUE_SERDE_CLASS_CONFIG, TIMESTAMP_EXTRACTOR_CLASS_CONFIG</code> are removed and replaced with <code>StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG, DEFAULT_VALUE_SERDE_CLASS_CONFIG, DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG</code> respectively (they are deprecated since 0.11.0.0). </li>
|
|
||||||
<li><code>StreamsConfig#ZOOKEEPER_CONNECT_CONFIG</code> are removed as we do not need ZooKeeper dependency in Streams any more (it is deprecated since 0.10.2.0). </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_110" class="anchor-link"></a><a href="#streams_api_changes_110">Streams API changes in 1.1.0</a></h3>
|
|
||||||
<p>
|
|
||||||
We have added support for methods in <code>ReadOnlyWindowStore</code> which allows for querying <code>WindowStore</code>s without the necessity of providing keys.
|
|
||||||
For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/6qdjB">KIP-205</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
There is a new artifact <code>kafka-streams-test-utils</code> providing a <code>TopologyTestDriver</code>, <code>ConsumerRecordFactory</code>, and <code>OutputVerifier</code> class.
|
|
||||||
You can include the new artifact as a regular dependency to your unit tests and use the test driver to test your business logic of your Kafka Streams application.
|
|
||||||
For more details, see <a href="https://cwiki.apache.org/confluence/x/EQOHB">KIP-247</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The introduction of <a href="https://cwiki.apache.org/confluence/x/QJ5zB">KIP-220</a>
|
|
||||||
enables you to provide configuration parameters for the embedded admin client created by Kafka Streams, similar to the embedded producer and consumer clients.
|
|
||||||
You can provide the configs via <code>StreamsConfig</code> by adding the configs with the prefix <code>admin.</code> as defined by <code>StreamsConfig#adminClientPrefix(String)</code>
|
|
||||||
to distinguish them from configurations of other clients that share the same config names.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New method in <code>KTable</code>
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> <code>transformValues</code> methods have been added to <code>KTable</code>. Similar to those on <code>KStream</code>, these methods allow for richer, stateful, value transformation similar to the Processor API.</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New method in <code>GlobalKTable</code>
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> A method has been provided such that it will return the store name associated with the <code>GlobalKTable</code> or <code>null</code> if the store name is non-queryable. </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New methods in <code>KafkaStreams</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> added overload for the constructor that allows overriding the <code>Time</code> object used for tracking system wall-clock time; this is useful for unit testing your application code. </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New methods in <code>KafkaClientSupplier</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added <code>getAdminClient(config)</code> that allows to override an <code>AdminClient</code> used for administrative requests such as internal topic creations, etc. </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>New error handling for exceptions during production:</p>
|
|
||||||
<ul>
|
|
||||||
<li>added interface <code>ProductionExceptionHandler</code> that allows implementors to decide whether or not Streams should <code>FAIL</code> or <code>CONTINUE</code> when certain exception occur while trying to produce.</li>
|
|
||||||
<li>provided an implementation, <code>DefaultProductionExceptionHandler</code> that always fails, preserving the existing behavior by default.</li>
|
|
||||||
<li>changing which implementation is used can be done by settings <code>default.production.exception.handler</code> to the fully qualified name of a class implementing this interface.</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Changes in <code>StreamsResetter</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added options to specify input topics offsets to reset according to <a href="https://cwiki.apache.org/confluence/x/ApI7B">KIP-171</a></li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_100" class="anchor-link"></a><a href="#streams_api_changes_100">Streams API changes in 1.0.0</a></h3>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
With 1.0 a major API refactoring was accomplished and the new API is cleaner and easier to use.
|
|
||||||
This change includes the five main classes <code>KafkaStreams</code>, <code>KStreamBuilder</code>,
|
|
||||||
<code>KStream</code>, <code>KTable</code>, and <code>TopologyBuilder</code> (and some more others).
|
|
||||||
All changes are fully backward compatible as old API is only deprecated but not removed.
|
|
||||||
We recommend to move to the new API as soon as you can.
|
|
||||||
We will summarize all API changes in the next paragraphs.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The two main classes to specify a topology via the DSL (<code>KStreamBuilder</code>)
|
|
||||||
or the Processor API (<code>TopologyBuilder</code>) were deprecated and replaced by
|
|
||||||
<code>StreamsBuilder</code> and <code>Topology</code> (both new classes are located in
|
|
||||||
package <code>org.apache.kafka.streams</code>).
|
|
||||||
Note, that <code>StreamsBuilder</code> does not extend <code>Topology</code>, i.e.,
|
|
||||||
the class hierarchy is different now.
|
|
||||||
The new classes have basically the same methods as the old ones to build a topology via DSL or Processor API.
|
|
||||||
However, some internal methods that were public in <code>KStreamBuilder</code>
|
|
||||||
and <code>TopologyBuilder</code> but not part of the actual API are not present
|
|
||||||
in the new classes any longer.
|
|
||||||
Furthermore, some overloads were simplified compared to the original classes.
|
|
||||||
See <a href="https://cwiki.apache.org/confluence/x/uR8IB">KIP-120</a>
|
|
||||||
and <a href="https://cwiki.apache.org/confluence/x/TYZjB">KIP-182</a>
|
|
||||||
for full details.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Changing how a topology is specified also affects <code>KafkaStreams</code> constructors,
|
|
||||||
that now only accept a <code>Topology</code>.
|
|
||||||
Using the DSL builder class <code>StreamsBuilder</code> one can get the constructed
|
|
||||||
<code>Topology</code> via <code>StreamsBuilder#build()</code>.
|
|
||||||
Additionally, a new class <code>org.apache.kafka.streams.TopologyDescription</code>
|
|
||||||
(and some more dependent classes) were added.
|
|
||||||
Those can be used to get a detailed description of the specified topology
|
|
||||||
and can be obtained by calling <code>Topology#describe()</code>.
|
|
||||||
An example using this new API is shown in the <a href="/{{version}}/documentation/streams/quickstart">quickstart section</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New methods in <code>KStream</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li>With the introduction of <a href="https://cwiki.apache.org/confluence/x/66JjB">KIP-202</a>
|
|
||||||
a new method <code>merge()</code> has been created in <code>KStream</code> as the StreamsBuilder class's <code>StreamsBuilder#merge()</code> has been removed.
|
|
||||||
The method signature was also changed, too: instead of providing multiple <code>KStream</code>s into the method at the once, only a single <code>KStream</code> is accepted.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New methods in <code>KafkaStreams</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li>retrieve the current runtime information about the local threads via <code>localThreadsMetadata()</code> </li>
|
|
||||||
<li>observe the restoration of all state stores via <code>setGlobalStateRestoreListener()</code>, in which users can provide their customized implementation of the <code>org.apache.kafka.streams.processor.StateRestoreListener</code> interface</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Deprecated / modified methods in <code>KafkaStreams</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<code>toString()</code>, <code>toString(final String indent)</code> were previously used to return static and runtime information.
|
|
||||||
They have been deprecated in favor of using the new classes/methods <code>localThreadsMetadata()</code> / <code>ThreadMetadata</code> (returning runtime information) and
|
|
||||||
<code>TopologyDescription</code> / <code>Topology#describe()</code> (returning static information).
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
With the introduction of <a href="https://cwiki.apache.org/confluence/x/TYZjB">KIP-182</a>
|
|
||||||
you should no longer pass in <code>Serde</code> to <code>KStream#print</code> operations.
|
|
||||||
If you can't rely on using <code>toString</code> to print your keys an values, you should instead you provide a custom <code>KeyValueMapper</code> via the <code>Printed#withKeyValueMapper</code> call.
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<code>setStateListener()</code> now can only be set before the application start running, i.e. before <code>KafkaStreams.start()</code> is called.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Deprecated methods in <code>KGroupedStream</code>
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
Windowed aggregations have been deprecated from <code>KGroupedStream</code> and moved to <code>WindowedKStream</code>.
|
|
||||||
You can now perform a windowed aggregation by, for example, using <code>KGroupedStream#windowedBy(Windows)#reduce(Reducer)</code>.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Modified methods in <code>Processor</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<p>
|
|
||||||
The Processor API was extended to allow users to schedule <code>punctuate</code> functions either based on data-driven <b>stream time</b> or wall-clock time.
|
|
||||||
As a result, the original <code>ProcessorContext#schedule</code> is deprecated with a new overloaded function that accepts a user customizable <code>Punctuator</code> callback interface, which triggers its <code>punctuate</code> API method periodically based on the <code>PunctuationType</code>.
|
|
||||||
The <code>PunctuationType</code> determines what notion of time is used for the punctuation scheduling: either <a href="/{{version}}/documentation/streams/core-concepts#streams_time">stream time</a> or wall-clock time (by default, <b>stream time</b> is configured to represent event time via <code>TimestampExtractor</code>).
|
|
||||||
In addition, the <code>punctuate</code> function inside <code>Processor</code> is also deprecated.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Before this, users could only schedule based on stream time (i.e. <code>PunctuationType.STREAM_TIME</code>) and hence the <code>punctuate</code> function was data-driven only because stream time is determined (and advanced forward) by the timestamps derived from the input data.
|
|
||||||
If there is no data arriving at the processor, the stream time would not advance and hence punctuation will not be triggered.
|
|
||||||
On the other hand, When wall-clock time (i.e. <code>PunctuationType.WALL_CLOCK_TIME</code>) is used, <code>punctuate</code> will be triggered purely based on wall-clock time.
|
|
||||||
So for example if the <code>Punctuator</code> function is scheduled based on <code>PunctuationType.WALL_CLOCK_TIME</code>, if these 60 records were processed within 20 seconds,
|
|
||||||
<code>punctuate</code> would be called 2 times (one time every 10 seconds);
|
|
||||||
if these 60 records were processed within 5 seconds, then no <code>punctuate</code> would be called at all.
|
|
||||||
Users can schedule multiple <code>Punctuator</code> callbacks with different <code>PunctuationType</code>s within the same processor by simply calling <code>ProcessorContext#schedule</code> multiple times inside processor's <code>init()</code> method.
|
|
||||||
</p>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
If you are monitoring on task level or processor-node / state store level Streams metrics, please note that the metrics sensor name and hierarchy was changed:
|
|
||||||
The task ids, store names and processor names are no longer in the sensor metrics names, but instead are added as tags of the sensors to achieve consistent metrics hierarchy.
|
|
||||||
As a result you may need to make corresponding code changes on your metrics reporting and monitoring tools when upgrading to 1.0.0.
|
|
||||||
Detailed metrics sensor can be found in the <a href="/{{version}}/documentation/#kafka_streams_monitoring">Streams Monitoring</a> section.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The introduction of <a href="https://cwiki.apache.org/confluence/x/WQgwB">KIP-161</a>
|
|
||||||
enables you to provide a default exception handler for deserialization errors when reading data from Kafka rather than throwing the exception all the way out of your streams application.
|
|
||||||
You can provide the configs via the <code>StreamsConfig</code> as <code>StreamsConfig#DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG</code>.
|
|
||||||
The specified handler must implement the <code>org.apache.kafka.streams.errors.DeserializationExceptionHandler</code> interface.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The introduction of <a href="https://cwiki.apache.org/confluence/x/aZM7B">KIP-173</a>
|
|
||||||
enables you to provide topic configuration parameters for any topics created by Kafka Streams.
|
|
||||||
This includes repartition and changelog topics.
|
|
||||||
You can provide the configs via the <code>StreamsConfig</code> by adding the configs with the prefix as defined by <code>StreamsConfig#topicPrefix(String)</code>.
|
|
||||||
Any properties in the <code>StreamsConfig</code> with the prefix will be applied when creating internal topics.
|
|
||||||
Any configs that aren't topic configs will be ignored.
|
|
||||||
If you already use <code>StateStoreSupplier</code> or <code>Materialized</code> to provide configs for changelogs, then they will take precedence over those supplied in the config.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_0110" class="anchor-link"></a><a href="#streams_api_changes_0110">Streams API changes in 0.11.0.0</a></h3>
|
|
||||||
|
|
||||||
<p> Updates in <code>StreamsConfig</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> new configuration parameter <code>processing.guarantee</code> is added </li>
|
|
||||||
<li> configuration parameter <code>key.serde</code> was deprecated and replaced by <code>default.key.serde</code> </li>
|
|
||||||
<li> configuration parameter <code>value.serde</code> was deprecated and replaced by <code>default.value.serde</code> </li>
|
|
||||||
<li> configuration parameter <code>timestamp.extractor</code> was deprecated and replaced by <code>default.timestamp.extractor</code> </li>
|
|
||||||
<li> method <code>keySerde()</code> was deprecated and replaced by <code>defaultKeySerde()</code> </li>
|
|
||||||
<li> method <code>valueSerde()</code> was deprecated and replaced by <code>defaultValueSerde()</code> </li>
|
|
||||||
<li> new method <code>defaultTimestampExtractor()</code> was added </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New methods in <code>TopologyBuilder</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added overloads for <code>addSource()</code> that allow to define a <code>TimestampExtractor</code> per source node </li>
|
|
||||||
<li> added overloads for <code>addGlobalStore()</code> that allow to define a <code>TimestampExtractor</code> per source node associated with the global store </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New methods in <code>KStreamBuilder</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added overloads for <code>stream()</code> that allow to define a <code>TimestampExtractor</code> per input stream </li>
|
|
||||||
<li> added overloads for <code>table()</code> that allow to define a <code>TimestampExtractor</code> per input table </li>
|
|
||||||
<li> added overloads for <code>globalKTable()</code> that allow to define a <code>TimestampExtractor</code> per global table </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Deprecated methods in <code>KTable</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> <code>void foreach(final ForeachAction<? super K, ? super V> action)</code> </li>
|
|
||||||
<li> <code>void print()</code> </li>
|
|
||||||
<li> <code>void print(final String streamName)</code> </li>
|
|
||||||
<li> <code>void print(final Serde<K> keySerde, final Serde<V> valSerde)</code> </li>
|
|
||||||
<li> <code>void print(final Serde<K> keySerde, final Serde<V> valSerde, final String streamName)</code> </li>
|
|
||||||
<li> <code>void writeAsText(final String filePath)</code> </li>
|
|
||||||
<li> <code>void writeAsText(final String filePath, final String streamName)</code> </li>
|
|
||||||
<li> <code>void writeAsText(final String filePath, final Serde<K> keySerde, final Serde<V> valSerde)</code> </li>
|
|
||||||
<li> <code>void writeAsText(final String filePath, final String streamName, final Serde<K> keySerde, final Serde<V> valSerde)</code> </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
The above methods have been deprecated in favor of using the Interactive Queries API.
|
|
||||||
If you want to query the current content of the state store backing the KTable, use the following approach:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> Make a call to <code>KafkaStreams.store(final String storeName, final QueryableStoreType<T> queryableStoreType)</code> </li>
|
|
||||||
<li> Then make a call to <code>ReadOnlyKeyValueStore.all()</code> to iterate over the keys of a <code>KTable</code>. </li>
|
|
||||||
</ul>
|
|
||||||
<p>
|
|
||||||
If you want to view the changelog stream of the <code>KTable</code> then you could call <code>KTable.toStream().print(Printed.toSysOut)</code>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p> Metrics using exactly-once semantics: </p>
|
|
||||||
<p>
|
|
||||||
If <code>"exactly_once"</code> processing (EOS version 1) is enabled via the <code>processing.guarantee</code> parameter,
|
|
||||||
internally Streams switches from a producer-per-thread to a producer-per-task runtime model.
|
|
||||||
Using <code>"exactly_once_beta"</code> (EOS version 2) does use a producer-per-thread, so <code>client.id</code> doesn't change,
|
|
||||||
compared with <code>"at_least_once"</code> for this case).
|
|
||||||
In order to distinguish the different producers, the producer's <code>client.id</code> additionally encodes the task-ID for this case.
|
|
||||||
Because the producer's <code>client.id</code> is used to report JMX metrics, it might be required to update tools that receive those metrics.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p> Producer's <code>client.id</code> naming schema: </p>
|
|
||||||
<ul>
|
|
||||||
<li> at-least-once (default): <code>[client.Id]-StreamThread-[sequence-number]</code> </li>
|
|
||||||
<li> exactly-once: <code>[client.Id]-StreamThread-[sequence-number]-[taskId]</code> </li>
|
|
||||||
<li> exactly-once-beta: <code>[client.Id]-StreamThread-[sequence-number]</code> </li>
|
|
||||||
</ul>
|
|
||||||
<p> <code>[client.Id]</code> is either set via Streams configuration parameter <code>client.id</code> or defaults to <code>[application.id]-[processId]</code> (<code>[processId]</code> is a random UUID). </p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_01021" class="anchor-link"></a><a href="#streams_api_changes_01021">Notable changes in 0.10.2.1</a></h3>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Parameter updates in <code>StreamsConfig</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> The default config values of embedded producer's <code>retries</code> and consumer's <code>max.poll.interval.ms</code> have been changed to improve the resiliency of a Kafka Streams application </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_0102" class="anchor-link"></a><a href="#streams_api_changes_0102">Streams API changes in 0.10.2.0</a></h3>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
New methods in <code>KafkaStreams</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> set a listener to react on application state change via <code>setStateListener(StateListener listener)</code> </li>
|
|
||||||
<li> retrieve the current application state via <code>state()</code> </li>
|
|
||||||
<li> retrieve the global metrics registry via <code>metrics()</code> </li>
|
|
||||||
<li> apply a timeout when closing an application via <code>close(long timeout, TimeUnit timeUnit)</code> </li>
|
|
||||||
<li> specify a custom indent when retrieving Kafka Streams information via <code>toString(String indent)</code> </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Parameter updates in <code>StreamsConfig</code>:
|
|
||||||
</p>
|
|
||||||
<ul>
|
|
||||||
<li> parameter <code>zookeeper.connect</code> was deprecated; a Kafka Streams application does no longer interact with ZooKeeper for topic management but uses the new broker admin protocol
|
|
||||||
(cf. <a href="https://cwiki.apache.org/confluence/x/vBEIAw">KIP-4, Section "Topic Admin Schema"</a>) </li>
|
|
||||||
<li> added many new parameters for metrics, security, and client configurations </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Changes in <code>StreamsMetrics</code> interface: </p>
|
|
||||||
<ul>
|
|
||||||
<li> removed methods: <code>addLatencySensor()</code> </li>
|
|
||||||
<li> added methods: <code>addLatencyAndThroughputSensor()</code>, <code>addThroughputSensor()</code>, <code>recordThroughput()</code>,
|
|
||||||
<code>addSensor()</code>, <code>removeSensor()</code> </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New methods in <code>TopologyBuilder</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added overloads for <code>addSource()</code> that allow to define a <code>auto.offset.reset</code> policy per source node </li>
|
|
||||||
<li> added methods <code>addGlobalStore()</code> to add global <code>StateStore</code>s </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New methods in <code>KStreamBuilder</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added overloads for <code>stream()</code> and <code>table()</code> that allow to define a <code>auto.offset.reset</code> policy per input stream/table </li>
|
|
||||||
<li> added method <code>globalKTable()</code> to create a <code>GlobalKTable</code> </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New joins for <code>KStream</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added overloads for <code>join()</code> to join with <code>KTable</code> </li>
|
|
||||||
<li> added overloads for <code>join()</code> and <code>leftJoin()</code> to join with <code>GlobalKTable</code> </li>
|
|
||||||
<li> note, join semantics in 0.10.2 were improved and thus you might see different result compared to 0.10.0.x and 0.10.1.x
|
|
||||||
(cf. <a href="https://cwiki.apache.org/confluence/x/EzPtAw">Kafka Streams Join Semantics</a> in the Apache Kafka wiki)
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Aligned <code>null</code>-key handling for <code>KTable</code> joins: </p>
|
|
||||||
<ul>
|
|
||||||
<li> like all other KTable operations, <code>KTable-KTable</code> joins do not throw an exception on <code>null</code> key records anymore, but drop those records silently </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> New window type <em>Session Windows</em>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> added class <code>SessionWindows</code> to specify session windows </li>
|
|
||||||
<li> added overloads for <code>KGroupedStream</code> methods <code>count()</code>, <code>reduce()</code>, and <code>aggregate()</code>
|
|
||||||
to allow session window aggregations </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Changes to <code>TimestampExtractor</code>: </p>
|
|
||||||
<ul>
|
|
||||||
<li> method <code>extract()</code> has a second parameter now </li>
|
|
||||||
<li> new default timestamp extractor class <code>FailOnInvalidTimestamp</code>
|
|
||||||
(it gives the same behavior as old (and removed) default extractor <code>ConsumerRecordTimestampExtractor</code>) </li>
|
|
||||||
<li> new alternative timestamp extractor classes <code>LogAndSkipOnInvalidTimestamp</code> and <code>UsePreviousTimeOnInvalidTimestamps</code> </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Relaxed type constraints of many DSL interfaces, classes, and methods (cf. <a href="https://cwiki.apache.org/confluence/x/dQMIB">KIP-100</a>). </p>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_changes_0101" class="anchor-link"></a><a href="#streams_api_changes_0101">Streams API changes in 0.10.1.0</a></h3>
|
|
||||||
|
|
||||||
<p> Stream grouping and aggregation split into two methods: </p>
|
|
||||||
<ul>
|
|
||||||
<li> old: KStream #aggregateByKey(), #reduceByKey(), and #countByKey() </li>
|
|
||||||
<li> new: KStream#groupByKey() plus KGroupedStream #aggregate(), #reduce(), and #count() </li>
|
|
||||||
<li> Example: stream.countByKey() changes to stream.groupByKey().count() </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Auto Repartitioning: </p>
|
|
||||||
<ul>
|
|
||||||
<li> a call to through() after a key-changing operator and before an aggregation/join is no longer required </li>
|
|
||||||
<li> Example: stream.selectKey(...).through(...).countByKey() changes to stream.selectKey().groupByKey().count() </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> TopologyBuilder: </p>
|
|
||||||
<ul>
|
|
||||||
<li> methods #sourceTopics(String applicationId) and #topicGroups(String applicationId) got simplified to #sourceTopics() and #topicGroups() </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> DSL: new parameter to specify state store names: </p>
|
|
||||||
<ul>
|
|
||||||
<li> The new Interactive Queries feature requires to specify a store name for all source KTables and window aggregation result KTables (previous parameter "operator/window name" is now the storeName) </li>
|
|
||||||
<li> KStreamBuilder#table(String topic) changes to #topic(String topic, String storeName) </li>
|
|
||||||
<li> KTable#through(String topic) changes to #through(String topic, String storeName) </li>
|
|
||||||
<li> KGroupedStream #aggregate(), #reduce(), and #count() require additional parameter "String storeName"</li>
|
|
||||||
<li> Example: stream.countByKey(TimeWindows.of("windowName", 1000)) changes to stream.groupByKey().count(TimeWindows.of(1000), "countStoreName") </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p> Windowing: </p>
|
|
||||||
<ul>
|
|
||||||
<li> Windows are not named anymore: TimeWindows.of("name", 1000) changes to TimeWindows.of(1000) (cf. DSL: new parameter to specify state store names) </li>
|
|
||||||
<li> JoinWindows has no default size anymore: JoinWindows.of("name").within(1000) changes to JoinWindows.of(1000) </li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h3 class="anchor-heading"><a id="streams_api_broker_compat" class="anchor-link"></a><a href="#streams_api_broker_compat">Streams API broker compatibility</a></h3>
|
<h3 class="anchor-heading"><a id="streams_api_broker_compat" class="anchor-link"></a><a href="#streams_api_broker_compat">Streams API broker compatibility</a></h3>
|
||||||
|
|
||||||
|
@ -1864,7 +1139,7 @@
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<td>Kafka Streams API (rows)</td>
|
<td>Kafka Streams API (rows)</td>
|
||||||
<td>2.1.x and<br>2.2.x and<br>2.3.x and<br>2.4.x and<br>2.5.x and<br>2.6.x and<br>2.7.x and<br>2.8.x and<br>3.0.x and<br>3.1.x and<br>3.2.x and<br>3.3.x and<br>3.4.x and<br>3.5.x and<br>3.6.x and<br>3.7.x and<br>3.8.x and<br>3.9.x and<br>4.0.x</td>
|
<td>2.4.x and<br>2.5.x and<br>2.6.x and<br>2.7.x and<br>2.8.x and<br>3.0.x and<br>3.1.x and<br>3.2.x and<br>3.3.x and<br>3.4.x and<br>3.5.x and<br>3.6.x and<br>3.7.x and<br>3.8.x and<br>3.9.x and<br>4.0.x</td>
|
||||||
<td>4.1.x</td>
|
<td>4.1.x</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
|
|
@ -36,6 +36,9 @@
|
||||||
The <code>KafkaPrincipalBuilder</code> now extends <code>KafkaPrincipalSerde</code>. Force developer to implement <code>KafkaPrincipalSerde</code> interface for custom <code>KafkaPrincipalBuilder</code>.
|
The <code>KafkaPrincipalBuilder</code> now extends <code>KafkaPrincipalSerde</code>. Force developer to implement <code>KafkaPrincipalSerde</code> interface for custom <code>KafkaPrincipalBuilder</code>.
|
||||||
For further details, please refer to <a href="https://cwiki.apache.org/confluence/x/1gq9F">KIP-1157</a>.
|
For further details, please refer to <a href="https://cwiki.apache.org/confluence/x/1gq9F">KIP-1157</a>.
|
||||||
</li>
|
</li>
|
||||||
|
<li>The behavior of <code>org.apache.kafka.streams.KafkaStreams#removeStreamThread</code> has been changed. The consumer has no longer remove once <code>removeStreamThread</code> finished.
|
||||||
|
Instead, consumer would be kicked off from the group after <code>org.apache.kafka.streams.processor.internals.StreamThread</code> completes its <code>run</code> function.
|
||||||
|
</li>
|
||||||
<li>
|
<li>
|
||||||
The support for MX4J library, enabled through <code>kafka_mx4jenable</code> system property, was deprecated and will be removed in Kafka 5.0.
|
The support for MX4J library, enabled through <code>kafka_mx4jenable</code> system property, was deprecated and will be removed in Kafka 5.0.
|
||||||
</li>
|
</li>
|
||||||
|
@ -182,10 +185,16 @@
|
||||||
<code>kafka.log.remote:type=RemoteStorageThreadPool.RemoteLogReaderAvgIdlePercent</code>.
|
<code>kafka.log.remote:type=RemoteStorageThreadPool.RemoteLogReaderAvgIdlePercent</code>.
|
||||||
For further details, please refer to <a href="https://cwiki.apache.org/confluence/x/6oqMEw">KIP-1100</a>.
|
For further details, please refer to <a href="https://cwiki.apache.org/confluence/x/6oqMEw">KIP-1100</a>.
|
||||||
</li>
|
</li>
|
||||||
|
<li>
|
||||||
|
A new metric <code>AvgIdleRatio</code> has been added to the <code>ControllerEventManager</code> group. This metric measures the average idle ratio of the controller event queue thread,
|
||||||
|
providing visibility into how much time the controller spends waiting for events versus processing them. The metric value ranges from 0.0 (always busy) to 1.0 (always idle).
|
||||||
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<h4><a id="upgrade_4_1_0" href="#upgrade_4_1_0">Upgrading to 4.1.0</a></h4>
|
<h4><a id="upgrade_4_1_0" href="#upgrade_4_1_0">Upgrading to 4.1.0</a></h4>
|
||||||
|
|
||||||
|
<p><b>Note:</b> Kafka Streams 4.1.0 contains a critical memory leak bug (<a href="https://issues.apache.org/jira/browse/KAFKA-19748">KAFKA-19748</a>) that affects users of range scans and certain DSL operators (session windows, sliding windows, stream-stream joins, foreign-key joins). Users running Kafka Streams should consider upgrading directly to 4.1.1 when available.</p>
|
||||||
|
|
||||||
<h5><a id="upgrade_4_1_0_from" href="#upgrade_4_1_0_from">Upgrading Servers to 4.1.0 from any version 3.3.x through 4.0.x</a></h5>
|
<h5><a id="upgrade_4_1_0_from" href="#upgrade_4_1_0_from">Upgrading Servers to 4.1.0 from any version 3.3.x through 4.0.x</a></h5>
|
||||||
<h5><a id="upgrade_410_notable" href="#upgrade_410_notable">Notable changes in 4.1.0</a></h5>
|
<h5><a id="upgrade_410_notable" href="#upgrade_410_notable">Notable changes in 4.1.0</a></h5>
|
||||||
<ul>
|
<ul>
|
||||||
|
|
|
@ -2249,18 +2249,13 @@ public class GroupMetadataManager {
|
||||||
.setClassicMemberMetadata(null)
|
.setClassicMemberMetadata(null)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
// If the group is newly created, we must ensure that it moves away from
|
boolean subscribedTopicNamesChanged = hasMemberSubscriptionChanged(
|
||||||
// epoch 0 and that it is fully initialized.
|
|
||||||
boolean bumpGroupEpoch = group.groupEpoch() == 0;
|
|
||||||
|
|
||||||
bumpGroupEpoch |= hasMemberSubscriptionChanged(
|
|
||||||
groupId,
|
groupId,
|
||||||
member,
|
member,
|
||||||
updatedMember,
|
updatedMember,
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
UpdateRegularExpressionsResult updateRegularExpressionsResult = maybeUpdateRegularExpressions(
|
||||||
bumpGroupEpoch |= maybeUpdateRegularExpressions(
|
|
||||||
context,
|
context,
|
||||||
group,
|
group,
|
||||||
member,
|
member,
|
||||||
|
@ -2268,9 +2263,24 @@ public class GroupMetadataManager {
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// The subscription has changed when either the subscribed topic names or subscribed topic
|
||||||
|
// regex has changed.
|
||||||
|
boolean hasSubscriptionChanged = subscribedTopicNamesChanged || updateRegularExpressionsResult.regexUpdated();
|
||||||
int groupEpoch = group.groupEpoch();
|
int groupEpoch = group.groupEpoch();
|
||||||
SubscriptionType subscriptionType = group.subscriptionType();
|
SubscriptionType subscriptionType = group.subscriptionType();
|
||||||
|
|
||||||
|
boolean bumpGroupEpoch =
|
||||||
|
// If the group is newly created, we must ensure that it moves away from
|
||||||
|
// epoch 0 and that it is fully initialized.
|
||||||
|
groupEpoch == 0 ||
|
||||||
|
// Bumping the group epoch signals that the target assignment should be updated. We bump
|
||||||
|
// the group epoch when the member has changed its subscribed topic names or the member
|
||||||
|
// has changed its subscribed topic regex to a regex that is already resolved. We avoid
|
||||||
|
// bumping the group epoch when the new subscribed topic regex has not been resolved
|
||||||
|
// yet, since we will have to update the target assignment again later.
|
||||||
|
subscribedTopicNamesChanged ||
|
||||||
|
updateRegularExpressionsResult == UpdateRegularExpressionsResult.REGEX_UPDATED_AND_RESOLVED;
|
||||||
|
|
||||||
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
|
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
|
||||||
// The subscription metadata is updated in two cases:
|
// The subscription metadata is updated in two cases:
|
||||||
// 1) The member has updated its subscriptions;
|
// 1) The member has updated its subscriptions;
|
||||||
|
@ -2315,6 +2325,9 @@ public class GroupMetadataManager {
|
||||||
group::currentPartitionEpoch,
|
group::currentPartitionEpoch,
|
||||||
targetAssignmentEpoch,
|
targetAssignmentEpoch,
|
||||||
targetAssignment,
|
targetAssignment,
|
||||||
|
group.resolvedRegularExpressions(),
|
||||||
|
// Force consistency with the subscription when the subscription has changed.
|
||||||
|
hasSubscriptionChanged,
|
||||||
ownedTopicPartitions,
|
ownedTopicPartitions,
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
@ -2468,6 +2481,8 @@ public class GroupMetadataManager {
|
||||||
group::currentPartitionEpoch,
|
group::currentPartitionEpoch,
|
||||||
group.assignmentEpoch(),
|
group.assignmentEpoch(),
|
||||||
group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()),
|
group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()),
|
||||||
|
group.resolvedRegularExpressions(),
|
||||||
|
bumpGroupEpoch,
|
||||||
toTopicPartitions(subscription.ownedPartitions(), metadataImage),
|
toTopicPartitions(subscription.ownedPartitions(), metadataImage),
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
@ -2511,6 +2526,9 @@ public class GroupMetadataManager {
|
||||||
group::currentPartitionEpoch,
|
group::currentPartitionEpoch,
|
||||||
targetAssignmentEpoch,
|
targetAssignmentEpoch,
|
||||||
targetAssignment,
|
targetAssignment,
|
||||||
|
group.resolvedRegularExpressions(),
|
||||||
|
// Force consistency with the subscription when the subscription has changed.
|
||||||
|
bumpGroupEpoch,
|
||||||
toTopicPartitions(subscription.ownedPartitions(), metadataImage),
|
toTopicPartitions(subscription.ownedPartitions(), metadataImage),
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
@ -2669,6 +2687,8 @@ public class GroupMetadataManager {
|
||||||
updatedMember,
|
updatedMember,
|
||||||
targetAssignmentEpoch,
|
targetAssignmentEpoch,
|
||||||
targetAssignment,
|
targetAssignment,
|
||||||
|
// Force consistency with the subscription when the subscription has changed.
|
||||||
|
bumpGroupEpoch,
|
||||||
records
|
records
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -3108,6 +3128,16 @@ public class GroupMetadataManager {
|
||||||
return value != null && !value.isEmpty();
|
return value != null && !value.isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private enum UpdateRegularExpressionsResult {
|
||||||
|
NO_CHANGE,
|
||||||
|
REGEX_UPDATED,
|
||||||
|
REGEX_UPDATED_AND_RESOLVED;
|
||||||
|
|
||||||
|
public boolean regexUpdated() {
|
||||||
|
return this == REGEX_UPDATED || this == REGEX_UPDATED_AND_RESOLVED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check whether the member has updated its subscribed topic regular expression and
|
* Check whether the member has updated its subscribed topic regular expression and
|
||||||
* may trigger the resolution/the refresh of all the regular expressions in the
|
* may trigger the resolution/the refresh of all the regular expressions in the
|
||||||
|
@ -3119,9 +3149,9 @@ public class GroupMetadataManager {
|
||||||
* @param member The old member.
|
* @param member The old member.
|
||||||
* @param updatedMember The new member.
|
* @param updatedMember The new member.
|
||||||
* @param records The records accumulator.
|
* @param records The records accumulator.
|
||||||
* @return Whether a rebalance must be triggered.
|
* @return The result of the update.
|
||||||
*/
|
*/
|
||||||
private boolean maybeUpdateRegularExpressions(
|
private UpdateRegularExpressionsResult maybeUpdateRegularExpressions(
|
||||||
AuthorizableRequestContext context,
|
AuthorizableRequestContext context,
|
||||||
ConsumerGroup group,
|
ConsumerGroup group,
|
||||||
ConsumerGroupMember member,
|
ConsumerGroupMember member,
|
||||||
|
@ -3134,14 +3164,17 @@ public class GroupMetadataManager {
|
||||||
String oldSubscribedTopicRegex = member.subscribedTopicRegex();
|
String oldSubscribedTopicRegex = member.subscribedTopicRegex();
|
||||||
String newSubscribedTopicRegex = updatedMember.subscribedTopicRegex();
|
String newSubscribedTopicRegex = updatedMember.subscribedTopicRegex();
|
||||||
|
|
||||||
boolean bumpGroupEpoch = false;
|
|
||||||
boolean requireRefresh = false;
|
boolean requireRefresh = false;
|
||||||
|
UpdateRegularExpressionsResult updateRegularExpressionsResult = UpdateRegularExpressionsResult.NO_CHANGE;
|
||||||
|
|
||||||
// Check whether the member has changed its subscribed regex.
|
// Check whether the member has changed its subscribed regex.
|
||||||
if (!Objects.equals(oldSubscribedTopicRegex, newSubscribedTopicRegex)) {
|
boolean subscribedTopicRegexChanged = !Objects.equals(oldSubscribedTopicRegex, newSubscribedTopicRegex);
|
||||||
|
if (subscribedTopicRegexChanged) {
|
||||||
log.debug("[GroupId {}] Member {} updated its subscribed regex to: {}.",
|
log.debug("[GroupId {}] Member {} updated its subscribed regex to: {}.",
|
||||||
groupId, memberId, newSubscribedTopicRegex);
|
groupId, memberId, newSubscribedTopicRegex);
|
||||||
|
|
||||||
|
updateRegularExpressionsResult = UpdateRegularExpressionsResult.REGEX_UPDATED;
|
||||||
|
|
||||||
if (isNotEmpty(oldSubscribedTopicRegex) && group.numSubscribedMembers(oldSubscribedTopicRegex) == 1) {
|
if (isNotEmpty(oldSubscribedTopicRegex) && group.numSubscribedMembers(oldSubscribedTopicRegex) == 1) {
|
||||||
// If the member was the last one subscribed to the regex, we delete the
|
// If the member was the last one subscribed to the regex, we delete the
|
||||||
// resolved regular expression.
|
// resolved regular expression.
|
||||||
|
@ -3160,7 +3193,9 @@ public class GroupMetadataManager {
|
||||||
} else {
|
} else {
|
||||||
// If the new regex is already resolved, we trigger a rebalance
|
// If the new regex is already resolved, we trigger a rebalance
|
||||||
// by bumping the group epoch.
|
// by bumping the group epoch.
|
||||||
bumpGroupEpoch = group.resolvedRegularExpression(newSubscribedTopicRegex).isPresent();
|
if (group.resolvedRegularExpression(newSubscribedTopicRegex).isPresent()) {
|
||||||
|
updateRegularExpressionsResult = UpdateRegularExpressionsResult.REGEX_UPDATED_AND_RESOLVED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3176,20 +3211,20 @@ public class GroupMetadataManager {
|
||||||
// 0. The group is subscribed to regular expressions. We also take the one
|
// 0. The group is subscribed to regular expressions. We also take the one
|
||||||
// that the current may have just introduced.
|
// that the current may have just introduced.
|
||||||
if (!requireRefresh && group.subscribedRegularExpressions().isEmpty()) {
|
if (!requireRefresh && group.subscribedRegularExpressions().isEmpty()) {
|
||||||
return bumpGroupEpoch;
|
return updateRegularExpressionsResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. There is no ongoing refresh for the group.
|
// 1. There is no ongoing refresh for the group.
|
||||||
String key = group.groupId() + "-regex";
|
String key = group.groupId() + "-regex";
|
||||||
if (executor.isScheduled(key)) {
|
if (executor.isScheduled(key)) {
|
||||||
return bumpGroupEpoch;
|
return updateRegularExpressionsResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. The last refresh is older than 10s. If the group does not have any regular
|
// 2. The last refresh is older than 10s. If the group does not have any regular
|
||||||
// expressions but the current member just brought a new one, we should continue.
|
// expressions but the current member just brought a new one, we should continue.
|
||||||
long lastRefreshTimeMs = group.lastResolvedRegularExpressionRefreshTimeMs();
|
long lastRefreshTimeMs = group.lastResolvedRegularExpressionRefreshTimeMs();
|
||||||
if (currentTimeMs <= lastRefreshTimeMs + REGEX_BATCH_REFRESH_MIN_INTERVAL_MS) {
|
if (currentTimeMs <= lastRefreshTimeMs + REGEX_BATCH_REFRESH_MIN_INTERVAL_MS) {
|
||||||
return bumpGroupEpoch;
|
return updateRegularExpressionsResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3.1 The group has unresolved regular expressions.
|
// 3.1 The group has unresolved regular expressions.
|
||||||
|
@ -3218,7 +3253,7 @@ public class GroupMetadataManager {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return bumpGroupEpoch;
|
return updateRegularExpressionsResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3498,6 +3533,8 @@ public class GroupMetadataManager {
|
||||||
* a given partition.
|
* a given partition.
|
||||||
* @param targetAssignmentEpoch The target assignment epoch.
|
* @param targetAssignmentEpoch The target assignment epoch.
|
||||||
* @param targetAssignment The target assignment.
|
* @param targetAssignment The target assignment.
|
||||||
|
* @param resolvedRegularExpressions The resolved regular expressions.
|
||||||
|
* @param hasSubscriptionChanged Whether the member has changed its subscription on the current heartbeat.
|
||||||
* @param ownedTopicPartitions The list of partitions owned by the member. This
|
* @param ownedTopicPartitions The list of partitions owned by the member. This
|
||||||
* is reported in the ConsumerGroupHeartbeat API and
|
* is reported in the ConsumerGroupHeartbeat API and
|
||||||
* it could be null if not provided.
|
* it could be null if not provided.
|
||||||
|
@ -3511,15 +3548,20 @@ public class GroupMetadataManager {
|
||||||
BiFunction<Uuid, Integer, Integer> currentPartitionEpoch,
|
BiFunction<Uuid, Integer, Integer> currentPartitionEpoch,
|
||||||
int targetAssignmentEpoch,
|
int targetAssignmentEpoch,
|
||||||
Assignment targetAssignment,
|
Assignment targetAssignment,
|
||||||
|
Map<String, ResolvedRegularExpression> resolvedRegularExpressions,
|
||||||
|
boolean hasSubscriptionChanged,
|
||||||
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions,
|
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions,
|
||||||
List<CoordinatorRecord> records
|
List<CoordinatorRecord> records
|
||||||
) {
|
) {
|
||||||
if (member.isReconciledTo(targetAssignmentEpoch)) {
|
if (!hasSubscriptionChanged && member.isReconciledTo(targetAssignmentEpoch)) {
|
||||||
return member;
|
return member;
|
||||||
}
|
}
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(targetAssignmentEpoch, targetAssignment)
|
.withTargetAssignment(targetAssignmentEpoch, targetAssignment)
|
||||||
|
.withHasSubscriptionChanged(hasSubscriptionChanged)
|
||||||
|
.withResolvedRegularExpressions(resolvedRegularExpressions)
|
||||||
.withCurrentPartitionEpoch(currentPartitionEpoch)
|
.withCurrentPartitionEpoch(currentPartitionEpoch)
|
||||||
.withOwnedTopicPartitions(ownedTopicPartitions)
|
.withOwnedTopicPartitions(ownedTopicPartitions)
|
||||||
.build();
|
.build();
|
||||||
|
@ -3560,6 +3602,7 @@ public class GroupMetadataManager {
|
||||||
* @param member The member to reconcile.
|
* @param member The member to reconcile.
|
||||||
* @param targetAssignmentEpoch The target assignment epoch.
|
* @param targetAssignmentEpoch The target assignment epoch.
|
||||||
* @param targetAssignment The target assignment.
|
* @param targetAssignment The target assignment.
|
||||||
|
* @param hasSubscriptionChanged Whether the member has changed its subscription on the current heartbeat.
|
||||||
* @param records The list to accumulate any new records.
|
* @param records The list to accumulate any new records.
|
||||||
* @return The received member if no changes have been made; or a new
|
* @return The received member if no changes have been made; or a new
|
||||||
* member containing the new assignment.
|
* member containing the new assignment.
|
||||||
|
@ -3569,14 +3612,17 @@ public class GroupMetadataManager {
|
||||||
ShareGroupMember member,
|
ShareGroupMember member,
|
||||||
int targetAssignmentEpoch,
|
int targetAssignmentEpoch,
|
||||||
Assignment targetAssignment,
|
Assignment targetAssignment,
|
||||||
|
boolean hasSubscriptionChanged,
|
||||||
List<CoordinatorRecord> records
|
List<CoordinatorRecord> records
|
||||||
) {
|
) {
|
||||||
if (member.isReconciledTo(targetAssignmentEpoch)) {
|
if (!hasSubscriptionChanged && member.isReconciledTo(targetAssignmentEpoch)) {
|
||||||
return member;
|
return member;
|
||||||
}
|
}
|
||||||
|
|
||||||
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(targetAssignmentEpoch, targetAssignment)
|
.withTargetAssignment(targetAssignmentEpoch, targetAssignment)
|
||||||
|
.withHasSubscriptionChanged(hasSubscriptionChanged)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
if (!updatedMember.equals(member)) {
|
if (!updatedMember.equals(member)) {
|
||||||
|
|
|
@ -19,8 +19,11 @@ package org.apache.kafka.coordinator.group.modern.consumer;
|
||||||
import org.apache.kafka.common.Uuid;
|
import org.apache.kafka.common.Uuid;
|
||||||
import org.apache.kafka.common.errors.FencedMemberEpochException;
|
import org.apache.kafka.common.errors.FencedMemberEpochException;
|
||||||
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
|
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.CoordinatorMetadataImage;
|
||||||
import org.apache.kafka.coordinator.group.modern.Assignment;
|
import org.apache.kafka.coordinator.group.modern.Assignment;
|
||||||
import org.apache.kafka.coordinator.group.modern.MemberState;
|
import org.apache.kafka.coordinator.group.modern.MemberState;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.TopicIds;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.UnionSet;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -41,6 +44,11 @@ public class CurrentAssignmentBuilder {
|
||||||
*/
|
*/
|
||||||
private final ConsumerGroupMember member;
|
private final ConsumerGroupMember member;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The metadata image.
|
||||||
|
*/
|
||||||
|
private CoordinatorMetadataImage metadataImage = CoordinatorMetadataImage.EMPTY;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The target assignment epoch.
|
* The target assignment epoch.
|
||||||
*/
|
*/
|
||||||
|
@ -51,6 +59,16 @@ public class CurrentAssignmentBuilder {
|
||||||
*/
|
*/
|
||||||
private Assignment targetAssignment;
|
private Assignment targetAssignment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether the member has changed its subscription on the current heartbeat.
|
||||||
|
*/
|
||||||
|
private boolean hasSubscriptionChanged;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The resolved regular expressions.
|
||||||
|
*/
|
||||||
|
private Map<String, ResolvedRegularExpression> resolvedRegularExpressions = Map.of();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A function which returns the current epoch of a topic-partition or -1 if the
|
* A function which returns the current epoch of a topic-partition or -1 if the
|
||||||
* topic-partition is not assigned. The current epoch is the epoch of the current owner.
|
* topic-partition is not assigned. The current epoch is the epoch of the current owner.
|
||||||
|
@ -73,6 +91,19 @@ public class CurrentAssignmentBuilder {
|
||||||
this.member = Objects.requireNonNull(member);
|
this.member = Objects.requireNonNull(member);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the metadata image.
|
||||||
|
*
|
||||||
|
* @param metadataImage The metadata image.
|
||||||
|
* @return This object.
|
||||||
|
*/
|
||||||
|
public CurrentAssignmentBuilder withMetadataImage(
|
||||||
|
CoordinatorMetadataImage metadataImage
|
||||||
|
) {
|
||||||
|
this.metadataImage = metadataImage;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the target assignment epoch and the target assignment that the
|
* Sets the target assignment epoch and the target assignment that the
|
||||||
* consumer group member must be reconciled to.
|
* consumer group member must be reconciled to.
|
||||||
|
@ -90,6 +121,32 @@ public class CurrentAssignmentBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether the member has changed its subscription on the current heartbeat.
|
||||||
|
*
|
||||||
|
* @param hasSubscriptionChanged If true, always removes unsubscribed topics from the current assignment.
|
||||||
|
* @return This object.
|
||||||
|
*/
|
||||||
|
public CurrentAssignmentBuilder withHasSubscriptionChanged(
|
||||||
|
boolean hasSubscriptionChanged
|
||||||
|
) {
|
||||||
|
this.hasSubscriptionChanged = hasSubscriptionChanged;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the resolved regular expressions.
|
||||||
|
*
|
||||||
|
* @param resolvedRegularExpressions The resolved regular expressions.
|
||||||
|
* @return This object.
|
||||||
|
*/
|
||||||
|
public CurrentAssignmentBuilder withResolvedRegularExpressions(
|
||||||
|
Map<String, ResolvedRegularExpression> resolvedRegularExpressions
|
||||||
|
) {
|
||||||
|
this.resolvedRegularExpressions = resolvedRegularExpressions;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets a BiFunction which allows to retrieve the current epoch of a
|
* Sets a BiFunction which allows to retrieve the current epoch of a
|
||||||
* partition. This is used by the state machine to determine if a
|
* partition. This is used by the state machine to determine if a
|
||||||
|
@ -132,12 +189,15 @@ public class CurrentAssignmentBuilder {
|
||||||
case STABLE:
|
case STABLE:
|
||||||
// When the member is in the STABLE state, we verify if a newer
|
// When the member is in the STABLE state, we verify if a newer
|
||||||
// epoch (or target assignment) is available. If it is, we can
|
// epoch (or target assignment) is available. If it is, we can
|
||||||
// reconcile the member towards it. Otherwise, we return.
|
// reconcile the member towards it. Otherwise, we ensure the
|
||||||
|
// assignment is consistent with the subscribed topics, if changed.
|
||||||
if (member.memberEpoch() != targetAssignmentEpoch) {
|
if (member.memberEpoch() != targetAssignmentEpoch) {
|
||||||
return computeNextAssignment(
|
return computeNextAssignment(
|
||||||
member.memberEpoch(),
|
member.memberEpoch(),
|
||||||
member.assignedPartitions()
|
member.assignedPartitions()
|
||||||
);
|
);
|
||||||
|
} else if (hasSubscriptionChanged) {
|
||||||
|
return updateCurrentAssignment(member.assignedPartitions());
|
||||||
} else {
|
} else {
|
||||||
return member;
|
return member;
|
||||||
}
|
}
|
||||||
|
@ -147,18 +207,27 @@ public class CurrentAssignmentBuilder {
|
||||||
// until the member has revoked the necessary partitions. They are
|
// until the member has revoked the necessary partitions. They are
|
||||||
// considered revoked when they are not anymore reported in the
|
// considered revoked when they are not anymore reported in the
|
||||||
// owned partitions set in the ConsumerGroupHeartbeat API.
|
// owned partitions set in the ConsumerGroupHeartbeat API.
|
||||||
|
// Additional partitions may need revoking when the member's
|
||||||
|
// subscription changes.
|
||||||
|
|
||||||
// If the member provides its owned partitions. We verify if it still
|
// If the member provides its owned partitions. We verify if it still
|
||||||
// owns any of the revoked partitions. If it does, we cannot progress.
|
// owns any of the revoked partitions. If it does, we cannot progress.
|
||||||
if (ownsRevokedPartitions(member.partitionsPendingRevocation())) {
|
if (ownsRevokedPartitions(member.partitionsPendingRevocation())) {
|
||||||
|
if (hasSubscriptionChanged) {
|
||||||
|
return updateCurrentAssignment(member.assignedPartitions());
|
||||||
|
} else {
|
||||||
return member;
|
return member;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// When the member has revoked all the pending partitions, it can
|
// When the member has revoked all the pending partitions, it can
|
||||||
// transition to the next epoch (current + 1) and we can reconcile
|
// transition to the next epoch (current + 1) and we can reconcile
|
||||||
// its state towards the latest target assignment.
|
// its state towards the latest target assignment.
|
||||||
return computeNextAssignment(
|
return computeNextAssignment(
|
||||||
member.memberEpoch() + 1,
|
// When we enter UNREVOKED_PARTITIONS due to a subscription change,
|
||||||
|
// we must not advance the member epoch when the new target
|
||||||
|
// assignment is not available yet.
|
||||||
|
Math.min(member.memberEpoch() + 1, targetAssignmentEpoch),
|
||||||
member.assignedPartitions()
|
member.assignedPartitions()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -215,6 +284,71 @@ public class CurrentAssignmentBuilder {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the current assignment, removing any partitions that are not part of the subscribed topics.
|
||||||
|
* This method is a lot faster than running the full reconciliation logic in computeNextAssignment.
|
||||||
|
*
|
||||||
|
* @param memberAssignedPartitions The assigned partitions of the member to use.
|
||||||
|
* @return A new ConsumerGroupMember.
|
||||||
|
*/
|
||||||
|
private ConsumerGroupMember updateCurrentAssignment(
|
||||||
|
Map<Uuid, Set<Integer>> memberAssignedPartitions
|
||||||
|
) {
|
||||||
|
Set<Uuid> subscribedTopicIds = subscribedTopicIds();
|
||||||
|
|
||||||
|
// Reuse the original map if no topics need to be removed.
|
||||||
|
Map<Uuid, Set<Integer>> newAssignedPartitions;
|
||||||
|
Map<Uuid, Set<Integer>> newPartitionsPendingRevocation;
|
||||||
|
if (subscribedTopicIds.isEmpty() && member.partitionsPendingRevocation().isEmpty()) {
|
||||||
|
newAssignedPartitions = Map.of();
|
||||||
|
newPartitionsPendingRevocation = memberAssignedPartitions;
|
||||||
|
} else {
|
||||||
|
newAssignedPartitions = memberAssignedPartitions;
|
||||||
|
newPartitionsPendingRevocation = new HashMap<>(member.partitionsPendingRevocation());
|
||||||
|
for (Map.Entry<Uuid, Set<Integer>> entry : memberAssignedPartitions.entrySet()) {
|
||||||
|
if (!subscribedTopicIds.contains(entry.getKey())) {
|
||||||
|
if (newAssignedPartitions == memberAssignedPartitions) {
|
||||||
|
newAssignedPartitions = new HashMap<>(memberAssignedPartitions);
|
||||||
|
newPartitionsPendingRevocation = new HashMap<>(member.partitionsPendingRevocation());
|
||||||
|
}
|
||||||
|
newAssignedPartitions.remove(entry.getKey());
|
||||||
|
newPartitionsPendingRevocation.merge(
|
||||||
|
entry.getKey(),
|
||||||
|
entry.getValue(),
|
||||||
|
(existing, additional) -> {
|
||||||
|
existing = new HashSet<>(existing);
|
||||||
|
existing.addAll(additional);
|
||||||
|
return existing;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newAssignedPartitions == memberAssignedPartitions) {
|
||||||
|
// If no partitions were removed, we can return the member as is.
|
||||||
|
return member;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!newPartitionsPendingRevocation.isEmpty() && ownsRevokedPartitions(newPartitionsPendingRevocation)) {
|
||||||
|
return new ConsumerGroupMember.Builder(member)
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setAssignedPartitions(newAssignedPartitions)
|
||||||
|
.setPartitionsPendingRevocation(newPartitionsPendingRevocation)
|
||||||
|
.build();
|
||||||
|
} else {
|
||||||
|
// There were partitions removed, but they were already revoked.
|
||||||
|
// Keep the member in the current state and shrink the assigned partitions.
|
||||||
|
|
||||||
|
// We do not expect to be in the UNREVOKED_PARTITIONS state here. The full
|
||||||
|
// reconciliation logic should handle the case where the member has revoked all its
|
||||||
|
// partitions pending revocation.
|
||||||
|
return new ConsumerGroupMember.Builder(member)
|
||||||
|
.setAssignedPartitions(newAssignedPartitions)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Computes the next assignment.
|
* Computes the next assignment.
|
||||||
*
|
*
|
||||||
|
@ -227,6 +361,8 @@ public class CurrentAssignmentBuilder {
|
||||||
int memberEpoch,
|
int memberEpoch,
|
||||||
Map<Uuid, Set<Integer>> memberAssignedPartitions
|
Map<Uuid, Set<Integer>> memberAssignedPartitions
|
||||||
) {
|
) {
|
||||||
|
Set<Uuid> subscribedTopicIds = subscribedTopicIds();
|
||||||
|
|
||||||
boolean hasUnreleasedPartitions = false;
|
boolean hasUnreleasedPartitions = false;
|
||||||
Map<Uuid, Set<Integer>> newAssignedPartitions = new HashMap<>();
|
Map<Uuid, Set<Integer>> newAssignedPartitions = new HashMap<>();
|
||||||
Map<Uuid, Set<Integer>> newPartitionsPendingRevocation = new HashMap<>();
|
Map<Uuid, Set<Integer>> newPartitionsPendingRevocation = new HashMap<>();
|
||||||
|
@ -241,6 +377,11 @@ public class CurrentAssignmentBuilder {
|
||||||
Set<Integer> currentAssignedPartitions = memberAssignedPartitions
|
Set<Integer> currentAssignedPartitions = memberAssignedPartitions
|
||||||
.getOrDefault(topicId, Set.of());
|
.getOrDefault(topicId, Set.of());
|
||||||
|
|
||||||
|
// If the member is no longer subscribed to the topic, treat its target assignment as empty.
|
||||||
|
if (!subscribedTopicIds.contains(topicId)) {
|
||||||
|
target = Set.of();
|
||||||
|
}
|
||||||
|
|
||||||
// New Assigned Partitions = Previous Assigned Partitions ∩ Target
|
// New Assigned Partitions = Previous Assigned Partitions ∩ Target
|
||||||
Set<Integer> assignedPartitions = new HashSet<>(currentAssignedPartitions);
|
Set<Integer> assignedPartitions = new HashSet<>(currentAssignedPartitions);
|
||||||
assignedPartitions.retainAll(target);
|
assignedPartitions.retainAll(target);
|
||||||
|
@ -317,4 +458,28 @@ public class CurrentAssignmentBuilder {
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the set of topic IDs that the member is subscribed to.
|
||||||
|
*
|
||||||
|
* @return The set of topic IDs that the member is subscribed to.
|
||||||
|
*/
|
||||||
|
private Set<Uuid> subscribedTopicIds() {
|
||||||
|
Set<String> subscriptions = member.subscribedTopicNames();
|
||||||
|
String subscribedTopicRegex = member.subscribedTopicRegex();
|
||||||
|
if (subscribedTopicRegex != null && !subscribedTopicRegex.isEmpty()) {
|
||||||
|
ResolvedRegularExpression resolvedRegularExpression = resolvedRegularExpressions.get(subscribedTopicRegex);
|
||||||
|
if (resolvedRegularExpression != null) {
|
||||||
|
if (subscriptions.isEmpty()) {
|
||||||
|
subscriptions = resolvedRegularExpression.topics();
|
||||||
|
} else if (!resolvedRegularExpression.topics().isEmpty()) {
|
||||||
|
subscriptions = new UnionSet<>(subscriptions, resolvedRegularExpression.topics());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Treat an unresolved regex as matching no topics, to be conservative.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new TopicIds(subscriptions, metadataImage);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,10 +16,16 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.kafka.coordinator.group.modern.share;
|
package org.apache.kafka.coordinator.group.modern.share;
|
||||||
|
|
||||||
|
import org.apache.kafka.common.Uuid;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.CoordinatorMetadataImage;
|
||||||
import org.apache.kafka.coordinator.group.modern.Assignment;
|
import org.apache.kafka.coordinator.group.modern.Assignment;
|
||||||
import org.apache.kafka.coordinator.group.modern.MemberState;
|
import org.apache.kafka.coordinator.group.modern.MemberState;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.TopicIds;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The ShareGroupAssignmentBuilder class encapsulates the reconciliation engine of the
|
* The ShareGroupAssignmentBuilder class encapsulates the reconciliation engine of the
|
||||||
|
@ -32,6 +38,11 @@ public class ShareGroupAssignmentBuilder {
|
||||||
*/
|
*/
|
||||||
private final ShareGroupMember member;
|
private final ShareGroupMember member;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The metadata image.
|
||||||
|
*/
|
||||||
|
private CoordinatorMetadataImage metadataImage = CoordinatorMetadataImage.EMPTY;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The target assignment epoch.
|
* The target assignment epoch.
|
||||||
*/
|
*/
|
||||||
|
@ -42,6 +53,11 @@ public class ShareGroupAssignmentBuilder {
|
||||||
*/
|
*/
|
||||||
private Assignment targetAssignment;
|
private Assignment targetAssignment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether the member has changed its subscription on the current heartbeat.
|
||||||
|
*/
|
||||||
|
private boolean hasSubscriptionChanged;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs the ShareGroupAssignmentBuilder based on the current state of the
|
* Constructs the ShareGroupAssignmentBuilder based on the current state of the
|
||||||
* provided share group member.
|
* provided share group member.
|
||||||
|
@ -52,6 +68,19 @@ public class ShareGroupAssignmentBuilder {
|
||||||
this.member = Objects.requireNonNull(member);
|
this.member = Objects.requireNonNull(member);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the metadata image.
|
||||||
|
*
|
||||||
|
* @param metadataImage The metadata image.
|
||||||
|
* @return This object.
|
||||||
|
*/
|
||||||
|
public ShareGroupAssignmentBuilder withMetadataImage(
|
||||||
|
CoordinatorMetadataImage metadataImage
|
||||||
|
) {
|
||||||
|
this.metadataImage = metadataImage;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the target assignment epoch and the target assignment that the
|
* Sets the target assignment epoch and the target assignment that the
|
||||||
* share group member must be reconciled to.
|
* share group member must be reconciled to.
|
||||||
|
@ -69,6 +98,19 @@ public class ShareGroupAssignmentBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether the member has changed its subscription on the current heartbeat.
|
||||||
|
*
|
||||||
|
* @param hasSubscriptionChanged If true, always removes unsubscribed topics from the current assignment.
|
||||||
|
* @return This object.
|
||||||
|
*/
|
||||||
|
public ShareGroupAssignmentBuilder withHasSubscriptionChanged(
|
||||||
|
boolean hasSubscriptionChanged
|
||||||
|
) {
|
||||||
|
this.hasSubscriptionChanged = hasSubscriptionChanged;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builds the next state for the member or keep the current one if it
|
* Builds the next state for the member or keep the current one if it
|
||||||
* is not possible to move forward with the current state.
|
* is not possible to move forward with the current state.
|
||||||
|
@ -83,11 +125,38 @@ public class ShareGroupAssignmentBuilder {
|
||||||
// when the member is updated.
|
// when the member is updated.
|
||||||
return new ShareGroupMember.Builder(member)
|
return new ShareGroupMember.Builder(member)
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setAssignedPartitions(targetAssignment.partitions())
|
// If we have client-side assignors, the latest target assignment may not
|
||||||
|
// be consistent with the latest subscribed topics, so we must always
|
||||||
|
// filter the assigned partitions to ensure they are consistent with the
|
||||||
|
// subscribed topics.
|
||||||
|
.setAssignedPartitions(filterAssignedPartitions(targetAssignment.partitions(), member.subscribedTopicNames()))
|
||||||
.updateMemberEpoch(targetAssignmentEpoch)
|
.updateMemberEpoch(targetAssignmentEpoch)
|
||||||
.build();
|
.build();
|
||||||
}
|
} else if (hasSubscriptionChanged) {
|
||||||
|
return new ShareGroupMember.Builder(member)
|
||||||
|
.setAssignedPartitions(filterAssignedPartitions(targetAssignment.partitions(), member.subscribedTopicNames()))
|
||||||
|
.build();
|
||||||
|
} else {
|
||||||
return member;
|
return member;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<Uuid, Set<Integer>> filterAssignedPartitions(
|
||||||
|
Map<Uuid, Set<Integer>> partitions,
|
||||||
|
Set<String> subscribedTopicNames
|
||||||
|
) {
|
||||||
|
TopicIds subscribedTopicIds = new TopicIds(member.subscribedTopicNames(), metadataImage);
|
||||||
|
|
||||||
|
// Reuse the original map if no topics need to be removed.
|
||||||
|
Map<Uuid, Set<Integer>> filteredPartitions = partitions;
|
||||||
|
for (Map.Entry<Uuid, Set<Integer>> entry : partitions.entrySet()) {
|
||||||
|
if (!subscribedTopicIds.contains(entry.getKey())) {
|
||||||
|
if (filteredPartitions == partitions) {
|
||||||
|
filteredPartitions = new HashMap<>(partitions);
|
||||||
|
}
|
||||||
|
filteredPartitions.remove(entry.getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filteredPartitions;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,7 @@ public class ShareGroupMember extends ModernGroupMember {
|
||||||
this.memberId = Objects.requireNonNull(newMemberId);
|
this.memberId = Objects.requireNonNull(newMemberId);
|
||||||
this.memberEpoch = member.memberEpoch;
|
this.memberEpoch = member.memberEpoch;
|
||||||
this.previousMemberEpoch = member.previousMemberEpoch;
|
this.previousMemberEpoch = member.previousMemberEpoch;
|
||||||
|
this.state = member.state;
|
||||||
this.rackId = member.rackId;
|
this.rackId = member.rackId;
|
||||||
this.clientId = member.clientId;
|
this.clientId = member.clientId;
|
||||||
this.clientHost = member.clientHost;
|
this.clientHost = member.clientHost;
|
||||||
|
|
|
@ -20604,7 +20604,7 @@ public class GroupMetadataManagerTest {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
// Member 1 updates its new regular expression.
|
// Member 1 updates its new regular expression.
|
||||||
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(
|
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result1 = context.consumerGroupHeartbeat(
|
||||||
new ConsumerGroupHeartbeatRequestData()
|
new ConsumerGroupHeartbeatRequestData()
|
||||||
.setGroupId(groupId)
|
.setGroupId(groupId)
|
||||||
.setMemberId(memberId1)
|
.setMemberId(memberId1)
|
||||||
|
@ -20620,19 +20620,15 @@ public class GroupMetadataManagerTest {
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setHeartbeatIntervalMs(5000)
|
.setHeartbeatIntervalMs(5000)
|
||||||
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
.setTopicPartitions(List.of(
|
.setTopicPartitions(List.of())
|
||||||
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
|
||||||
.setTopicId(fooTopicId)
|
|
||||||
.setPartitions(List.of(0, 1, 2, 3, 4, 5))
|
|
||||||
))
|
|
||||||
),
|
),
|
||||||
result.response()
|
result1.response()
|
||||||
);
|
);
|
||||||
|
|
||||||
ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1)
|
ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1)
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(0)
|
.setPreviousMemberEpoch(10)
|
||||||
.setClientId(DEFAULT_CLIENT_ID)
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
.setRebalanceTimeoutMs(5000)
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
@ -20644,10 +20640,12 @@ public class GroupMetadataManagerTest {
|
||||||
// The member subscription is updated.
|
// The member subscription is updated.
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1),
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1),
|
||||||
// The previous regular expression is deleted.
|
// The previous regular expression is deleted.
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*")
|
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*"),
|
||||||
|
// The member assignment is updated.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1)
|
||||||
);
|
);
|
||||||
|
|
||||||
assertRecordsEquals(expectedRecords, result.records());
|
assertRecordsEquals(expectedRecords, result1.records());
|
||||||
|
|
||||||
// Execute pending tasks.
|
// Execute pending tasks.
|
||||||
List<MockCoordinatorExecutor.ExecutorResult<CoordinatorRecord>> tasks = context.processTasks();
|
List<MockCoordinatorExecutor.ExecutorResult<CoordinatorRecord>> tasks = context.processTasks();
|
||||||
|
@ -20675,6 +20673,65 @@ public class GroupMetadataManagerTest {
|
||||||
),
|
),
|
||||||
task.result().records()
|
task.result().records()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assignor.prepareGroupAssignment(new GroupAssignment(Map.of(
|
||||||
|
memberId1, new MemberAssignmentImpl(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)
|
||||||
|
))
|
||||||
|
)));
|
||||||
|
|
||||||
|
// Member heartbeats again with the same regex.
|
||||||
|
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result2 = context.consumerGroupHeartbeat(
|
||||||
|
new ConsumerGroupHeartbeatRequestData()
|
||||||
|
.setGroupId(groupId)
|
||||||
|
.setMemberId(memberId1)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*|bar*")
|
||||||
|
.setServerAssignor("range")
|
||||||
|
.setTopicPartitions(List.of()));
|
||||||
|
|
||||||
|
assertResponseEquals(
|
||||||
|
new ConsumerGroupHeartbeatResponseData()
|
||||||
|
.setMemberId(memberId1)
|
||||||
|
.setMemberEpoch(11)
|
||||||
|
.setHeartbeatIntervalMs(5000)
|
||||||
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
|
.setTopicPartitions(List.of(
|
||||||
|
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
||||||
|
.setTopicId(fooTopicId)
|
||||||
|
.setPartitions(List.of(0, 1, 2, 3, 4, 5)),
|
||||||
|
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
||||||
|
.setTopicId(barTopicId)
|
||||||
|
.setPartitions(List.of(0, 1, 2))))),
|
||||||
|
result2.response()
|
||||||
|
);
|
||||||
|
|
||||||
|
ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId1)
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(11)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*|bar*")
|
||||||
|
.setServerAssignorName("range")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
expectedRecords = List.of(
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)
|
||||||
|
)),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)
|
||||||
|
);
|
||||||
|
|
||||||
|
assertRecordsEquals(expectedRecords, result2.records());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -21077,10 +21134,7 @@ public class GroupMetadataManagerTest {
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setHeartbeatIntervalMs(5000)
|
.setHeartbeatIntervalMs(5000)
|
||||||
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
.setTopicPartitions(List.of(
|
.setTopicPartitions(List.of())),
|
||||||
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
|
||||||
.setTopicId(fooTopicId)
|
|
||||||
.setPartitions(List.of(3, 4, 5))))),
|
|
||||||
result1.response()
|
result1.response()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -21098,7 +21152,8 @@ public class GroupMetadataManagerTest {
|
||||||
assertRecordsEquals(
|
assertRecordsEquals(
|
||||||
List.of(
|
List.of(
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2),
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2),
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*")
|
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*"),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)
|
||||||
),
|
),
|
||||||
result1.records()
|
result1.records()
|
||||||
);
|
);
|
||||||
|
@ -21164,8 +21219,7 @@ public class GroupMetadataManagerTest {
|
||||||
.setRebalanceTimeoutMs(5000)
|
.setRebalanceTimeoutMs(5000)
|
||||||
.setSubscribedTopicRegex("foo|bar*")
|
.setSubscribedTopicRegex("foo|bar*")
|
||||||
.setServerAssignorName("range")
|
.setServerAssignorName("range")
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment())
|
||||||
mkTopicAssignment(fooTopicId, 3, 4, 5)))
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
assertResponseEquals(
|
assertResponseEquals(
|
||||||
|
@ -21174,10 +21228,7 @@ public class GroupMetadataManagerTest {
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setHeartbeatIntervalMs(5000)
|
.setHeartbeatIntervalMs(5000)
|
||||||
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
.setTopicPartitions(List.of(
|
.setTopicPartitions(List.of())),
|
||||||
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
|
||||||
.setTopicId(fooTopicId)
|
|
||||||
.setPartitions(List.of(3, 4, 5))))),
|
|
||||||
result2.response()
|
result2.response()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -21306,10 +21357,7 @@ public class GroupMetadataManagerTest {
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setHeartbeatIntervalMs(5000)
|
.setHeartbeatIntervalMs(5000)
|
||||||
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
.setTopicPartitions(List.of(
|
.setTopicPartitions(List.of())),
|
||||||
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
|
||||||
.setTopicId(fooTopicId)
|
|
||||||
.setPartitions(List.of(3, 4, 5))))),
|
|
||||||
result1.response()
|
result1.response()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -21327,7 +21375,8 @@ public class GroupMetadataManagerTest {
|
||||||
assertRecordsEquals(
|
assertRecordsEquals(
|
||||||
List.of(
|
List.of(
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2),
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2),
|
||||||
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*")
|
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*"),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)
|
||||||
),
|
),
|
||||||
result1.records()
|
result1.records()
|
||||||
);
|
);
|
||||||
|
@ -21440,6 +21489,219 @@ public class GroupMetadataManagerTest {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStaticConsumerGroupMemberJoinsWithUpdatedRegex() {
|
||||||
|
String groupId = "fooup";
|
||||||
|
String memberId1 = Uuid.randomUuid().toString();
|
||||||
|
String memberId2 = Uuid.randomUuid().toString();
|
||||||
|
String instanceId = "instance-id";
|
||||||
|
|
||||||
|
Uuid fooTopicId = Uuid.randomUuid();
|
||||||
|
String fooTopicName = "foo";
|
||||||
|
Uuid barTopicId = Uuid.randomUuid();
|
||||||
|
String barTopicName = "bar";
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(fooTopicId, fooTopicName, 6)
|
||||||
|
.addTopic(barTopicId, barTopicName, 3)
|
||||||
|
.buildCoordinatorMetadataImage(12345L);
|
||||||
|
|
||||||
|
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
|
||||||
|
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
|
||||||
|
.withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor))
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
|
||||||
|
.withMember(new ConsumerGroupMember.Builder(memberId1)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*|bar*")
|
||||||
|
.setServerAssignorName("range")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)))
|
||||||
|
.build())
|
||||||
|
.withAssignment(memberId1, mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)))
|
||||||
|
.withAssignmentEpoch(10))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Static member temporarily leaves the group.
|
||||||
|
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result1 = context.consumerGroupHeartbeat(
|
||||||
|
new ConsumerGroupHeartbeatRequestData()
|
||||||
|
.setGroupId(groupId)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberId(memberId1)
|
||||||
|
.setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH)
|
||||||
|
);
|
||||||
|
|
||||||
|
assertResponseEquals(
|
||||||
|
new ConsumerGroupHeartbeatResponseData()
|
||||||
|
.setMemberId(memberId1)
|
||||||
|
.setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH),
|
||||||
|
result1.response()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Static member joins the group with an updated regular expression.
|
||||||
|
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result2 = context.consumerGroupHeartbeat(
|
||||||
|
new ConsumerGroupHeartbeatRequestData()
|
||||||
|
.setGroupId(groupId)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberId(memberId2)
|
||||||
|
.setMemberEpoch(0)
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*")
|
||||||
|
.setServerAssignor("range")
|
||||||
|
.setTopicPartitions(List.of()));
|
||||||
|
|
||||||
|
// The returned assignment does not contain topics not in the current regular expression.
|
||||||
|
assertResponseEquals(
|
||||||
|
new ConsumerGroupHeartbeatResponseData()
|
||||||
|
.setMemberId(memberId2)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setHeartbeatIntervalMs(5000)
|
||||||
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
|
.setTopicPartitions(List.of())
|
||||||
|
),
|
||||||
|
result2.response()
|
||||||
|
);
|
||||||
|
|
||||||
|
ConsumerGroupMember expectedCopiedMember = new ConsumerGroupMember.Builder(memberId2)
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberEpoch(0)
|
||||||
|
.setPreviousMemberEpoch(0)
|
||||||
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*|bar*")
|
||||||
|
.setServerAssignorName("range")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId2)
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(0)
|
||||||
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*")
|
||||||
|
.setServerAssignorName("range")
|
||||||
|
.build();
|
||||||
|
|
||||||
|
List<CoordinatorRecord> expectedRecords = List.of(
|
||||||
|
// The previous member is deleted.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1),
|
||||||
|
// The previous member is replaced by the new one.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedCopiedMember),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5),
|
||||||
|
mkTopicAssignment(barTopicId, 0, 1, 2)
|
||||||
|
)),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedCopiedMember),
|
||||||
|
// The member subscription is updated.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1),
|
||||||
|
// The previous regular expression is deleted.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*|bar*"),
|
||||||
|
// The member assignment is updated.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1)
|
||||||
|
);
|
||||||
|
|
||||||
|
assertRecordsEquals(expectedRecords, result2.records());
|
||||||
|
|
||||||
|
// Execute pending tasks.
|
||||||
|
List<MockCoordinatorExecutor.ExecutorResult<CoordinatorRecord>> tasks = context.processTasks();
|
||||||
|
assertEquals(1, tasks.size());
|
||||||
|
|
||||||
|
MockCoordinatorExecutor.ExecutorResult<CoordinatorRecord> task = tasks.get(0);
|
||||||
|
assertEquals(groupId + "-regex", task.key());
|
||||||
|
assertRecordsEquals(
|
||||||
|
List.of(
|
||||||
|
// The resolution of the new regex is persisted.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord(
|
||||||
|
groupId,
|
||||||
|
"foo*",
|
||||||
|
new ResolvedRegularExpression(
|
||||||
|
Set.of("foo"),
|
||||||
|
12345L,
|
||||||
|
context.time.milliseconds()
|
||||||
|
)
|
||||||
|
),
|
||||||
|
// The group epoch is bumped.
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11, computeGroupHash(Map.of(
|
||||||
|
fooTopicName, computeTopicHash(fooTopicName, metadataImage)
|
||||||
|
)))
|
||||||
|
),
|
||||||
|
task.result().records()
|
||||||
|
);
|
||||||
|
|
||||||
|
assignor.prepareGroupAssignment(new GroupAssignment(Map.of(
|
||||||
|
memberId2, new MemberAssignmentImpl(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5)
|
||||||
|
))
|
||||||
|
)));
|
||||||
|
|
||||||
|
// Member heartbeats again with the same regex.
|
||||||
|
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result3 = context.consumerGroupHeartbeat(
|
||||||
|
new ConsumerGroupHeartbeatRequestData()
|
||||||
|
.setGroupId(groupId)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberId(memberId2)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*")
|
||||||
|
.setServerAssignor("range")
|
||||||
|
.setTopicPartitions(List.of()));
|
||||||
|
|
||||||
|
assertResponseEquals(
|
||||||
|
new ConsumerGroupHeartbeatResponseData()
|
||||||
|
.setMemberId(memberId2)
|
||||||
|
.setMemberEpoch(11)
|
||||||
|
.setHeartbeatIntervalMs(5000)
|
||||||
|
.setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()
|
||||||
|
.setTopicPartitions(List.of(
|
||||||
|
new ConsumerGroupHeartbeatResponseData.TopicPartitions()
|
||||||
|
.setTopicId(fooTopicId)
|
||||||
|
.setPartitions(List.of(0, 1, 2, 3, 4, 5))))),
|
||||||
|
result3.response()
|
||||||
|
);
|
||||||
|
|
||||||
|
ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2)
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setInstanceId(instanceId)
|
||||||
|
.setMemberEpoch(11)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setClientId(DEFAULT_CLIENT_ID)
|
||||||
|
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
|
||||||
|
.setRebalanceTimeoutMs(5000)
|
||||||
|
.setSubscribedTopicRegex("foo*|bar*")
|
||||||
|
.setServerAssignorName("range")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
expectedRecords = List.of(
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5)
|
||||||
|
)),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11),
|
||||||
|
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)
|
||||||
|
);
|
||||||
|
|
||||||
|
assertRecordsEquals(expectedRecords, result3.records());
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testResolvedRegularExpressionsRemovedWhenMembersLeaveOrFenced() {
|
public void testResolvedRegularExpressionsRemovedWhenMembersLeaveOrFenced() {
|
||||||
String groupId = "fooup";
|
String groupId = "fooup";
|
||||||
|
|
|
@ -19,13 +19,19 @@ package org.apache.kafka.coordinator.group.modern.consumer;
|
||||||
import org.apache.kafka.common.Uuid;
|
import org.apache.kafka.common.Uuid;
|
||||||
import org.apache.kafka.common.errors.FencedMemberEpochException;
|
import org.apache.kafka.common.errors.FencedMemberEpochException;
|
||||||
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
|
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.CoordinatorMetadataImage;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.MetadataImageBuilder;
|
||||||
import org.apache.kafka.coordinator.group.modern.Assignment;
|
import org.apache.kafka.coordinator.group.modern.Assignment;
|
||||||
import org.apache.kafka.coordinator.group.modern.MemberState;
|
import org.apache.kafka.coordinator.group.modern.MemberState;
|
||||||
|
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
|
import org.junit.jupiter.params.provider.CsvSource;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment;
|
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment;
|
||||||
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment;
|
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment;
|
||||||
|
@ -36,19 +42,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToStable() {
|
public void testStableToStable() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6))))
|
mkTopicAssignment(topicId2, 4, 5, 6))))
|
||||||
|
@ -60,6 +75,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
@ -70,19 +86,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToStableWithNewPartitions() {
|
public void testStableToStableWithNewPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
||||||
|
@ -94,6 +119,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6, 7)))
|
mkTopicAssignment(topicId2, 4, 5, 6, 7)))
|
||||||
|
@ -104,19 +130,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToUnrevokedPartitions() {
|
public void testStableToUnrevokedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 5, 6, 7))))
|
||||||
|
@ -128,6 +163,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -141,19 +177,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToUnreleasedPartitions() {
|
public void testStableToUnreleasedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
||||||
|
@ -165,6 +210,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
@ -175,19 +221,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToUnreleasedPartitionsWithOwnedPartitionsNotHavingRevokedPartitions() {
|
public void testStableToUnreleasedPartitionsWithOwnedPartitionsNotHavingRevokedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 7))))
|
mkTopicAssignment(topicId2, 4, 5, 7))))
|
||||||
|
@ -202,6 +257,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5)))
|
mkTopicAssignment(topicId2, 4, 5)))
|
||||||
|
@ -212,13 +268,21 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnrevokedPartitionsToStable() {
|
public void testUnrevokedPartitionsToStable() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -228,6 +292,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6))))
|
mkTopicAssignment(topicId2, 5, 6))))
|
||||||
|
@ -246,6 +311,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -256,13 +322,21 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRemainsInUnrevokedPartitions() {
|
public void testRemainsInUnrevokedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -272,6 +346,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
CurrentAssignmentBuilder currentAssignmentBuilder = new CurrentAssignmentBuilder(member)
|
CurrentAssignmentBuilder currentAssignmentBuilder = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6))))
|
mkTopicAssignment(topicId2, 6))))
|
||||||
|
@ -311,15 +386,27 @@ public class CurrentAssignmentBuilderTest {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void testUnrevokedPartitionsToUnrevokedPartitions() {
|
@CsvSource({
|
||||||
|
"10, 12, 11",
|
||||||
|
"10, 10, 10", // The member epoch must not advance past the target assignment epoch.
|
||||||
|
})
|
||||||
|
public void testUnrevokedPartitionsToUnrevokedPartitions(int memberEpoch, int targetAssignmentEpoch, int expectedMemberEpoch) {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(memberEpoch)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -329,7 +416,8 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(targetAssignmentEpoch, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6))))
|
mkTopicAssignment(topicId2, 6))))
|
||||||
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
@ -345,8 +433,9 @@ public class CurrentAssignmentBuilderTest {
|
||||||
assertEquals(
|
assertEquals(
|
||||||
new ConsumerGroupMember.Builder("member")
|
new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(expectedMemberEpoch)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6)))
|
mkTopicAssignment(topicId2, 6)))
|
||||||
|
@ -360,19 +449,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnrevokedPartitionsToUnreleasedPartitions() {
|
public void testUnrevokedPartitionsToUnreleasedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 5, 6, 7))))
|
||||||
|
@ -391,6 +489,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -401,19 +500,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnreleasedPartitionsToStable() {
|
public void testUnreleasedPartitionsToStable() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6))))
|
mkTopicAssignment(topicId2, 5, 6))))
|
||||||
|
@ -425,6 +533,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(12)
|
.setMemberEpoch(12)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
@ -435,19 +544,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnreleasedPartitionsToStableWithNewPartitions() {
|
public void testUnreleasedPartitionsToStableWithNewPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 5, 6, 7))))
|
||||||
|
@ -459,6 +577,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 5, 6, 7)))
|
mkTopicAssignment(topicId2, 5, 6, 7)))
|
||||||
|
@ -469,19 +588,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnreleasedPartitionsToUnreleasedPartitions() {
|
public void testUnreleasedPartitionsToUnreleasedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 5, 6, 7))))
|
||||||
|
@ -493,19 +621,28 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnreleasedPartitionsToUnrevokedPartitions() {
|
public void testUnreleasedPartitionsToUnrevokedPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNRELEASED_PARTITIONS)
|
.setState(MemberState.UNRELEASED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 2, 3),
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 5, 6)))
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6))))
|
mkTopicAssignment(topicId2, 6))))
|
||||||
|
@ -517,6 +654,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.UNREVOKED_PARTITIONS)
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6)))
|
mkTopicAssignment(topicId2, 6)))
|
||||||
|
@ -530,13 +668,21 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUnknownState() {
|
public void testUnknownState() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
.setState(MemberState.UNKNOWN)
|
.setState(MemberState.UNKNOWN)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6)))
|
mkTopicAssignment(topicId2, 6)))
|
||||||
|
@ -548,6 +694,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
// When the member is in an unknown state, the member is first to force
|
// When the member is in an unknown state, the member is first to force
|
||||||
// a reset of the client side member state.
|
// a reset of the client side member state.
|
||||||
assertThrows(FencedMemberEpochException.class, () -> new CurrentAssignmentBuilder(member)
|
assertThrows(FencedMemberEpochException.class, () -> new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6))))
|
mkTopicAssignment(topicId2, 6))))
|
||||||
|
@ -556,6 +703,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
|
|
||||||
// Then the member rejoins with no owned partitions.
|
// Then the member rejoins with no owned partitions.
|
||||||
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(12, new Assignment(mkAssignment(
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6))))
|
mkTopicAssignment(topicId2, 6))))
|
||||||
|
@ -568,6 +716,7 @@ public class CurrentAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(12)
|
.setMemberEpoch(12)
|
||||||
.setPreviousMemberEpoch(11)
|
.setPreviousMemberEpoch(11)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 3),
|
mkTopicAssignment(topicId1, 3),
|
||||||
mkTopicAssignment(topicId2, 6)))
|
mkTopicAssignment(topicId2, 6)))
|
||||||
|
@ -575,4 +724,355 @@ public class CurrentAssignmentBuilderTest {
|
||||||
updatedMember
|
updatedMember
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@CsvSource({
|
||||||
|
"10, 11, 11, false", // When advancing to a new target assignment, the assignment should
|
||||||
|
"10, 11, 11, true", // always take the subscription into account.
|
||||||
|
"10, 10, 10, true",
|
||||||
|
})
|
||||||
|
public void testStableToStableWithAssignmentTopicsNoLongerInSubscription(
|
||||||
|
int memberEpoch,
|
||||||
|
int targetAssignmentEpoch,
|
||||||
|
int expectedMemberEpoch,
|
||||||
|
boolean hasSubscriptionChanged
|
||||||
|
) {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(memberEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(targetAssignmentEpoch, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(hasSubscriptionChanged)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(topicId2)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(expectedMemberEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@CsvSource({
|
||||||
|
"10, 11, 10, false", // When advancing to a new target assignment, the assignment should always
|
||||||
|
"10, 11, 10, true", // take the subscription into account.
|
||||||
|
"10, 10, 10, true"
|
||||||
|
})
|
||||||
|
public void testStableToUnrevokedPartitionsWithAssignmentTopicsNoLongerInSubscription(
|
||||||
|
int memberEpoch,
|
||||||
|
int targetAssignmentEpoch,
|
||||||
|
int expectedMemberEpoch,
|
||||||
|
boolean hasSubscriptionChanged
|
||||||
|
) {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(memberEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(targetAssignmentEpoch, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(hasSubscriptionChanged)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(topicId1)
|
||||||
|
.setPartitions(Arrays.asList(1, 2, 3)),
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(topicId2)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setMemberEpoch(expectedMemberEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.setPartitionsPendingRevocation(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRemainsInUnrevokedPartitionsWithAssignmentTopicsNoLongerInSubscription() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
.setPartitionsPendingRevocation(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1),
|
||||||
|
mkTopicAssignment(topicId2, 4)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(12, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 3, 4),
|
||||||
|
mkTopicAssignment(topicId2, 6, 7))))
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(topicId1)
|
||||||
|
.setPartitions(Arrays.asList(1, 2, 3)),
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(topicId2)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId2, 5, 6)))
|
||||||
|
.setPartitionsPendingRevocation(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSubscribedTopicNameAndUnresolvedRegularExpression() {
|
||||||
|
String fooTopic = "foo";
|
||||||
|
String barTopic = "bar";
|
||||||
|
Uuid fooTopicId = Uuid.randomUuid();
|
||||||
|
Uuid barTopicId = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(fooTopicId, fooTopic, 10)
|
||||||
|
.addTopic(barTopicId, barTopic, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(fooTopic))
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(10, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withResolvedRegularExpressions(Map.of())
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(fooTopicId)
|
||||||
|
.setPartitions(Arrays.asList(1, 2, 3)),
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(barTopicId)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(fooTopic))
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3)))
|
||||||
|
.setPartitionsPendingRevocation(mkAssignment(
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnresolvedRegularExpression() {
|
||||||
|
String fooTopic = "foo";
|
||||||
|
String barTopic = "bar";
|
||||||
|
Uuid fooTopicId = Uuid.randomUuid();
|
||||||
|
Uuid barTopicId = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(fooTopicId, fooTopic, 10)
|
||||||
|
.addTopic(barTopicId, barTopic, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of())
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(10, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withResolvedRegularExpressions(Map.of())
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(fooTopicId)
|
||||||
|
.setPartitions(Arrays.asList(1, 2, 3)),
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(barTopicId)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.UNREVOKED_PARTITIONS)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of())
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment())
|
||||||
|
.setPartitionsPendingRevocation(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSubscribedTopicNameAndResolvedRegularExpression() {
|
||||||
|
String fooTopic = "foo";
|
||||||
|
String barTopic = "bar";
|
||||||
|
Uuid fooTopicId = Uuid.randomUuid();
|
||||||
|
Uuid barTopicId = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(fooTopicId, fooTopic, 10)
|
||||||
|
.addTopic(barTopicId, barTopic, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ConsumerGroupMember member = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(fooTopic))
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ConsumerGroupMember updatedMember = new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(10, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withResolvedRegularExpressions(Map.of(
|
||||||
|
"bar*", new ResolvedRegularExpression(
|
||||||
|
Set.of("bar"),
|
||||||
|
12345L,
|
||||||
|
0L
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.withOwnedTopicPartitions(Arrays.asList(
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(fooTopicId)
|
||||||
|
.setPartitions(Arrays.asList(1, 2, 3)),
|
||||||
|
new ConsumerGroupHeartbeatRequestData.TopicPartitions()
|
||||||
|
.setTopicId(barTopicId)
|
||||||
|
.setPartitions(Arrays.asList(4, 5, 6))))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(fooTopic))
|
||||||
|
.setSubscribedTopicRegex("bar*")
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(fooTopicId, 1, 2, 3),
|
||||||
|
mkTopicAssignment(barTopicId, 4, 5, 6)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,16 @@
|
||||||
package org.apache.kafka.coordinator.group.modern.share;
|
package org.apache.kafka.coordinator.group.modern.share;
|
||||||
|
|
||||||
import org.apache.kafka.common.Uuid;
|
import org.apache.kafka.common.Uuid;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.CoordinatorMetadataImage;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.MetadataImageBuilder;
|
||||||
import org.apache.kafka.coordinator.group.modern.Assignment;
|
import org.apache.kafka.coordinator.group.modern.Assignment;
|
||||||
import org.apache.kafka.coordinator.group.modern.MemberState;
|
import org.apache.kafka.coordinator.group.modern.MemberState;
|
||||||
|
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
|
import org.junit.jupiter.params.provider.CsvSource;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment;
|
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment;
|
||||||
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment;
|
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment;
|
||||||
|
@ -30,19 +36,28 @@ public class ShareGroupAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToStable() {
|
public void testStableToStable() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ShareGroupMember member = new ShareGroupMember.Builder("member")
|
ShareGroupMember member = new ShareGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6))))
|
mkTopicAssignment(topicId2, 4, 5, 6))))
|
||||||
|
@ -53,6 +68,7 @@ public class ShareGroupAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
@ -63,19 +79,28 @@ public class ShareGroupAssignmentBuilderTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStableToStableWithNewPartitions() {
|
public void testStableToStableWithNewPartitions() {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
Uuid topicId1 = Uuid.randomUuid();
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
Uuid topicId2 = Uuid.randomUuid();
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
ShareGroupMember member = new ShareGroupMember.Builder("member")
|
ShareGroupMember member = new ShareGroupMember.Builder("member")
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(10)
|
.setMemberEpoch(10)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3),
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6)))
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
.withTargetAssignment(11, new Assignment(mkAssignment(
|
.withTargetAssignment(11, new Assignment(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
mkTopicAssignment(topicId2, 4, 5, 6, 7))))
|
||||||
|
@ -86,6 +111,7 @@ public class ShareGroupAssignmentBuilderTest {
|
||||||
.setState(MemberState.STABLE)
|
.setState(MemberState.STABLE)
|
||||||
.setMemberEpoch(11)
|
.setMemberEpoch(11)
|
||||||
.setPreviousMemberEpoch(10)
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(List.of(topic1, topic2))
|
||||||
.setAssignedPartitions(mkAssignment(
|
.setAssignedPartitions(mkAssignment(
|
||||||
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
mkTopicAssignment(topicId1, 1, 2, 3, 4),
|
||||||
mkTopicAssignment(topicId2, 4, 5, 6, 7)))
|
mkTopicAssignment(topicId2, 4, 5, 6, 7)))
|
||||||
|
@ -93,4 +119,56 @@ public class ShareGroupAssignmentBuilderTest {
|
||||||
updatedMember
|
updatedMember
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@CsvSource({
|
||||||
|
"10, 11, false", // When advancing to a new target assignment, the assignment should always
|
||||||
|
"10, 11, true", // take the subscription into account.
|
||||||
|
"10, 10, true"
|
||||||
|
})
|
||||||
|
public void testStableToStableWithAssignmentTopicsNoLongerInSubscription(
|
||||||
|
int memberEpoch,
|
||||||
|
int targetAssignmentEpoch,
|
||||||
|
boolean hasSubscriptionChanged
|
||||||
|
) {
|
||||||
|
String topic1 = "topic1";
|
||||||
|
String topic2 = "topic2";
|
||||||
|
Uuid topicId1 = Uuid.randomUuid();
|
||||||
|
Uuid topicId2 = Uuid.randomUuid();
|
||||||
|
|
||||||
|
CoordinatorMetadataImage metadataImage = new MetadataImageBuilder()
|
||||||
|
.addTopic(topicId1, topic1, 10)
|
||||||
|
.addTopic(topicId2, topic2, 10)
|
||||||
|
.buildCoordinatorMetadataImage();
|
||||||
|
|
||||||
|
ShareGroupMember member = new ShareGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(memberEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
ShareGroupMember updatedMember = new ShareGroupAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(targetAssignmentEpoch, new Assignment(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId1, 1, 2, 3),
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6))))
|
||||||
|
.withHasSubscriptionChanged(hasSubscriptionChanged)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
assertEquals(
|
||||||
|
new ShareGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(targetAssignmentEpoch)
|
||||||
|
.setPreviousMemberEpoch(memberEpoch)
|
||||||
|
.setSubscribedTopicNames(List.of(topic2))
|
||||||
|
.setAssignedPartitions(mkAssignment(
|
||||||
|
mkTopicAssignment(topicId2, 4, 5, 6)))
|
||||||
|
.build(),
|
||||||
|
updatedMember
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,171 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.kafka.jmh.assignor;
|
||||||
|
|
||||||
|
import org.apache.kafka.common.Uuid;
|
||||||
|
import org.apache.kafka.coordinator.common.runtime.CoordinatorMetadataImage;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.Assignment;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.MemberState;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember;
|
||||||
|
import org.apache.kafka.coordinator.group.modern.consumer.CurrentAssignmentBuilder;
|
||||||
|
|
||||||
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
|
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||||
|
import org.openjdk.jmh.annotations.Fork;
|
||||||
|
import org.openjdk.jmh.annotations.Level;
|
||||||
|
import org.openjdk.jmh.annotations.Measurement;
|
||||||
|
import org.openjdk.jmh.annotations.Mode;
|
||||||
|
import org.openjdk.jmh.annotations.OutputTimeUnit;
|
||||||
|
import org.openjdk.jmh.annotations.Param;
|
||||||
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.Threads;
|
||||||
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
|
@State(Scope.Benchmark)
|
||||||
|
@Fork(value = 1)
|
||||||
|
@Warmup(iterations = 5)
|
||||||
|
@Measurement(iterations = 5)
|
||||||
|
@BenchmarkMode(Mode.AverageTime)
|
||||||
|
@OutputTimeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
public class CurrentAssignmentBuilderBenchmark {
|
||||||
|
|
||||||
|
@Param({"5", "50"})
|
||||||
|
private int partitionsPerTopic;
|
||||||
|
|
||||||
|
@Param({"10", "100", "1000"})
|
||||||
|
private int topicCount;
|
||||||
|
|
||||||
|
private List<String> topicNames;
|
||||||
|
|
||||||
|
private List<Uuid> topicIds;
|
||||||
|
|
||||||
|
private CoordinatorMetadataImage metadataImage;
|
||||||
|
|
||||||
|
private ConsumerGroupMember member;
|
||||||
|
|
||||||
|
private ConsumerGroupMember memberWithUnsubscribedTopics;
|
||||||
|
|
||||||
|
private Assignment targetAssignment;
|
||||||
|
|
||||||
|
@Setup(Level.Trial)
|
||||||
|
public void setup() {
|
||||||
|
setupTopics();
|
||||||
|
setupMember();
|
||||||
|
setupTargetAssignment();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupTopics() {
|
||||||
|
topicNames = AssignorBenchmarkUtils.createTopicNames(topicCount);
|
||||||
|
topicIds = new ArrayList<>(topicCount);
|
||||||
|
metadataImage = AssignorBenchmarkUtils.createMetadataImage(topicNames, partitionsPerTopic);
|
||||||
|
|
||||||
|
for (String topicName : topicNames) {
|
||||||
|
Uuid topicId = metadataImage.topicMetadata(topicName).get().id();
|
||||||
|
topicIds.add(topicId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupMember() {
|
||||||
|
Map<Uuid, Set<Integer>> assignedPartitions = new HashMap<>();
|
||||||
|
for (Uuid topicId : topicIds) {
|
||||||
|
Set<Integer> partitions = IntStream.range(0, partitionsPerTopic)
|
||||||
|
.boxed()
|
||||||
|
.collect(Collectors.toSet());
|
||||||
|
assignedPartitions.put(topicId, partitions);
|
||||||
|
}
|
||||||
|
|
||||||
|
ConsumerGroupMember.Builder memberBuilder = new ConsumerGroupMember.Builder("member")
|
||||||
|
.setState(MemberState.STABLE)
|
||||||
|
.setMemberEpoch(10)
|
||||||
|
.setPreviousMemberEpoch(10)
|
||||||
|
.setSubscribedTopicNames(topicNames)
|
||||||
|
.setAssignedPartitions(assignedPartitions);
|
||||||
|
|
||||||
|
member = memberBuilder.build();
|
||||||
|
memberWithUnsubscribedTopics = memberBuilder
|
||||||
|
.setSubscribedTopicNames(topicNames.subList(0, topicNames.size() - 1))
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupTargetAssignment() {
|
||||||
|
Map<Uuid, Set<Integer>> assignedPartitions = new HashMap<>();
|
||||||
|
for (Uuid topicId : topicIds) {
|
||||||
|
Set<Integer> partitions = IntStream.range(0, partitionsPerTopic)
|
||||||
|
.boxed()
|
||||||
|
.collect(Collectors.toSet());
|
||||||
|
assignedPartitions.put(topicId, partitions);
|
||||||
|
}
|
||||||
|
targetAssignment = new Assignment(assignedPartitions);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
@Threads(1)
|
||||||
|
@OutputTimeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
public ConsumerGroupMember stableToStableWithNoChange() {
|
||||||
|
return new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(member.memberEpoch(), targetAssignment)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
@Threads(1)
|
||||||
|
@OutputTimeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
public ConsumerGroupMember stableToStableWithNewTargetAssignment() {
|
||||||
|
return new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(member.memberEpoch() + 1, targetAssignment)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
@Threads(1)
|
||||||
|
@OutputTimeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
public ConsumerGroupMember stableToStableWithSubscriptionChange() {
|
||||||
|
return new CurrentAssignmentBuilder(member)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(member.memberEpoch(), targetAssignment)
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
@Threads(1)
|
||||||
|
@OutputTimeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
public ConsumerGroupMember stableToUnrevokedPartitionsWithSubscriptionChange() {
|
||||||
|
return new CurrentAssignmentBuilder(memberWithUnsubscribedTopics)
|
||||||
|
.withMetadataImage(metadataImage)
|
||||||
|
.withTargetAssignment(memberWithUnsubscribedTopics.memberEpoch(), targetAssignment)
|
||||||
|
.withHasSubscriptionChanged(true)
|
||||||
|
.withCurrentPartitionEpoch((topicId, partitionId) -> -1)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
}
|
|
@ -494,12 +494,10 @@ public class PartitionChangeBuilder {
|
||||||
|
|
||||||
private void maybeUpdateLastKnownLeader(PartitionChangeRecord record) {
|
private void maybeUpdateLastKnownLeader(PartitionChangeRecord record) {
|
||||||
if (!useLastKnownLeaderInBalancedRecovery || !eligibleLeaderReplicasEnabled) return;
|
if (!useLastKnownLeaderInBalancedRecovery || !eligibleLeaderReplicasEnabled) return;
|
||||||
if (record.isr() != null && record.isr().isEmpty() && (partition.lastKnownElr.length != 1 ||
|
if (record.leader() == NO_LEADER && partition.lastKnownElr.length == 0) {
|
||||||
partition.lastKnownElr[0] != partition.leader)) {
|
|
||||||
// Only update the last known leader when the first time the partition becomes leaderless.
|
// Only update the last known leader when the first time the partition becomes leaderless.
|
||||||
record.setLastKnownElr(List.of(partition.leader));
|
record.setLastKnownElr(List.of(partition.leader));
|
||||||
} else if ((record.leader() >= 0 || (partition.leader != NO_LEADER && record.leader() != NO_LEADER))
|
} else if (record.leader() >= 0 && partition.lastKnownElr.length > 0) {
|
||||||
&& partition.lastKnownElr.length > 0) {
|
|
||||||
// Clear the LastKnownElr field if the partition will have or continues to have a valid leader.
|
// Clear the LastKnownElr field if the partition will have or continues to have a valid leader.
|
||||||
record.setLastKnownElr(List.of());
|
record.setLastKnownElr(List.of());
|
||||||
}
|
}
|
||||||
|
|
|
@ -406,7 +406,14 @@ public final class QuorumController implements Controller {
|
||||||
|
|
||||||
KafkaEventQueue queue = null;
|
KafkaEventQueue queue = null;
|
||||||
try {
|
try {
|
||||||
queue = new KafkaEventQueue(time, logContext, threadNamePrefix);
|
queue = new KafkaEventQueue(
|
||||||
|
time,
|
||||||
|
logContext,
|
||||||
|
threadNamePrefix,
|
||||||
|
EventQueue.VoidEvent.INSTANCE,
|
||||||
|
controllerMetrics::updateIdleTime
|
||||||
|
);
|
||||||
|
|
||||||
return new QuorumController(
|
return new QuorumController(
|
||||||
nonFatalFaultHandler,
|
nonFatalFaultHandler,
|
||||||
fatalFaultHandler,
|
fatalFaultHandler,
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.kafka.controller.metrics;
|
||||||
|
|
||||||
import org.apache.kafka.common.utils.Time;
|
import org.apache.kafka.common.utils.Time;
|
||||||
import org.apache.kafka.server.metrics.KafkaYammerMetrics;
|
import org.apache.kafka.server.metrics.KafkaYammerMetrics;
|
||||||
|
import org.apache.kafka.server.metrics.TimeRatio;
|
||||||
|
|
||||||
import com.yammer.metrics.core.Gauge;
|
import com.yammer.metrics.core.Gauge;
|
||||||
import com.yammer.metrics.core.Histogram;
|
import com.yammer.metrics.core.Histogram;
|
||||||
|
@ -48,6 +49,8 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
"ControllerEventManager", "EventQueueTimeMs");
|
"ControllerEventManager", "EventQueueTimeMs");
|
||||||
private static final MetricName EVENT_QUEUE_PROCESSING_TIME_MS = getMetricName(
|
private static final MetricName EVENT_QUEUE_PROCESSING_TIME_MS = getMetricName(
|
||||||
"ControllerEventManager", "EventQueueProcessingTimeMs");
|
"ControllerEventManager", "EventQueueProcessingTimeMs");
|
||||||
|
private static final MetricName AVERAGE_IDLE_RATIO = getMetricName(
|
||||||
|
"ControllerEventManager", "AvgIdleRatio");
|
||||||
private static final MetricName LAST_APPLIED_RECORD_OFFSET = getMetricName(
|
private static final MetricName LAST_APPLIED_RECORD_OFFSET = getMetricName(
|
||||||
"KafkaController", "LastAppliedRecordOffset");
|
"KafkaController", "LastAppliedRecordOffset");
|
||||||
private static final MetricName LAST_COMMITTED_RECORD_OFFSET = getMetricName(
|
private static final MetricName LAST_COMMITTED_RECORD_OFFSET = getMetricName(
|
||||||
|
@ -64,6 +67,7 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
"KafkaController", "EventQueueOperationsTimedOutCount");
|
"KafkaController", "EventQueueOperationsTimedOutCount");
|
||||||
private static final MetricName NEW_ACTIVE_CONTROLLERS_COUNT = getMetricName(
|
private static final MetricName NEW_ACTIVE_CONTROLLERS_COUNT = getMetricName(
|
||||||
"KafkaController", "NewActiveControllersCount");
|
"KafkaController", "NewActiveControllersCount");
|
||||||
|
|
||||||
private static final String TIME_SINCE_LAST_HEARTBEAT_RECEIVED_METRIC_NAME = "TimeSinceLastHeartbeatReceivedMs";
|
private static final String TIME_SINCE_LAST_HEARTBEAT_RECEIVED_METRIC_NAME = "TimeSinceLastHeartbeatReceivedMs";
|
||||||
private static final String BROKER_ID_TAG = "broker";
|
private static final String BROKER_ID_TAG = "broker";
|
||||||
|
|
||||||
|
@ -75,6 +79,7 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
private final AtomicLong lastAppliedRecordTimestamp = new AtomicLong(0);
|
private final AtomicLong lastAppliedRecordTimestamp = new AtomicLong(0);
|
||||||
private final Consumer<Long> eventQueueTimeUpdater;
|
private final Consumer<Long> eventQueueTimeUpdater;
|
||||||
private final Consumer<Long> eventQueueProcessingTimeUpdater;
|
private final Consumer<Long> eventQueueProcessingTimeUpdater;
|
||||||
|
private final TimeRatio avgIdleTimeRatio;
|
||||||
|
|
||||||
private final AtomicLong timedOutHeartbeats = new AtomicLong(0);
|
private final AtomicLong timedOutHeartbeats = new AtomicLong(0);
|
||||||
private final AtomicLong operationsStarted = new AtomicLong(0);
|
private final AtomicLong operationsStarted = new AtomicLong(0);
|
||||||
|
@ -109,6 +114,7 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
this.eventQueueTimeUpdater = newHistogram(EVENT_QUEUE_TIME_MS, true);
|
this.eventQueueTimeUpdater = newHistogram(EVENT_QUEUE_TIME_MS, true);
|
||||||
this.eventQueueProcessingTimeUpdater = newHistogram(EVENT_QUEUE_PROCESSING_TIME_MS, true);
|
this.eventQueueProcessingTimeUpdater = newHistogram(EVENT_QUEUE_PROCESSING_TIME_MS, true);
|
||||||
this.sessionTimeoutMs = sessionTimeoutMs;
|
this.sessionTimeoutMs = sessionTimeoutMs;
|
||||||
|
this.avgIdleTimeRatio = new TimeRatio(1);
|
||||||
registry.ifPresent(r -> r.newGauge(LAST_APPLIED_RECORD_OFFSET, new Gauge<Long>() {
|
registry.ifPresent(r -> r.newGauge(LAST_APPLIED_RECORD_OFFSET, new Gauge<Long>() {
|
||||||
@Override
|
@Override
|
||||||
public Long value() {
|
public Long value() {
|
||||||
|
@ -157,6 +163,20 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
return newActiveControllers();
|
return newActiveControllers();
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
|
registry.ifPresent(r -> r.newGauge(AVERAGE_IDLE_RATIO, new Gauge<Double>() {
|
||||||
|
@Override
|
||||||
|
public Double value() {
|
||||||
|
synchronized (avgIdleTimeRatio) {
|
||||||
|
return avgIdleTimeRatio.measure();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateIdleTime(long idleDurationMs) {
|
||||||
|
synchronized (avgIdleTimeRatio) {
|
||||||
|
avgIdleTimeRatio.record((double) idleDurationMs, time.milliseconds());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addTimeSinceLastHeartbeatMetric(int brokerId) {
|
public void addTimeSinceLastHeartbeatMetric(int brokerId) {
|
||||||
|
@ -291,7 +311,8 @@ public class QuorumControllerMetrics implements AutoCloseable {
|
||||||
TIMED_OUT_BROKER_HEARTBEAT_COUNT,
|
TIMED_OUT_BROKER_HEARTBEAT_COUNT,
|
||||||
EVENT_QUEUE_OPERATIONS_STARTED_COUNT,
|
EVENT_QUEUE_OPERATIONS_STARTED_COUNT,
|
||||||
EVENT_QUEUE_OPERATIONS_TIMED_OUT_COUNT,
|
EVENT_QUEUE_OPERATIONS_TIMED_OUT_COUNT,
|
||||||
NEW_ACTIVE_CONTROLLERS_COUNT
|
NEW_ACTIVE_CONTROLLERS_COUNT,
|
||||||
|
AVERAGE_IDLE_RATIO
|
||||||
).forEach(r::removeMetric));
|
).forEach(r::removeMetric));
|
||||||
removeTimeSinceLastHeartbeatMetrics();
|
removeTimeSinceLastHeartbeatMetrics();
|
||||||
}
|
}
|
||||||
|
|
|
@ -822,6 +822,48 @@ public class PartitionChangeBuilderTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testEligibleLeaderReplicas_lastKnownElrShouldBePopulatedWhenNoLeader() {
|
||||||
|
PartitionRegistration partition = new PartitionRegistration.Builder()
|
||||||
|
.setReplicas(new int[] {1, 2, 3})
|
||||||
|
.setDirectories(new Uuid[] {
|
||||||
|
DirectoryId.UNASSIGNED,
|
||||||
|
DirectoryId.UNASSIGNED,
|
||||||
|
DirectoryId.UNASSIGNED
|
||||||
|
})
|
||||||
|
.setIsr(new int[] {1})
|
||||||
|
.setElr(new int[] {2})
|
||||||
|
.setLeader(1)
|
||||||
|
.setLeaderRecoveryState(LeaderRecoveryState.RECOVERED)
|
||||||
|
.setLeaderEpoch(100)
|
||||||
|
.setPartitionEpoch(200)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
short version = 2; // ELR supported
|
||||||
|
Uuid topicId = Uuid.fromString("FbrrdcfiR-KC2CPSTHaJrg");
|
||||||
|
|
||||||
|
// No replica is acceptable as leader, so election yields NO_LEADER.
|
||||||
|
// We intentionally do not change target ISR so record.isr remains null.
|
||||||
|
PartitionChangeBuilder builder = new PartitionChangeBuilder(partition, topicId, 0, r -> false,
|
||||||
|
metadataVersionForPartitionChangeRecordVersion(version), 3)
|
||||||
|
.setElection(Election.PREFERRED)
|
||||||
|
.setEligibleLeaderReplicasEnabled(isElrEnabled(version))
|
||||||
|
.setDefaultDirProvider(DEFAULT_DIR_PROVIDER)
|
||||||
|
.setUseLastKnownLeaderInBalancedRecovery(true);
|
||||||
|
|
||||||
|
ApiMessageAndVersion change = builder.build().get();
|
||||||
|
PartitionChangeRecord record = (PartitionChangeRecord) change.message();
|
||||||
|
|
||||||
|
assertEquals(NO_LEADER, record.leader());
|
||||||
|
// There is no ISR update if we do not perform the leader verification on the ISR members.
|
||||||
|
assertNull(record.isr(), record.toString());
|
||||||
|
assertEquals(1, record.lastKnownElr().size(), record.toString());
|
||||||
|
assertEquals(1, record.lastKnownElr().get(0), record.toString());
|
||||||
|
partition = partition.merge((PartitionChangeRecord) builder.build().get().message());
|
||||||
|
assertArrayEquals(new int[] {1}, partition.lastKnownElr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@MethodSource("partitionChangeRecordVersions")
|
@MethodSource("partitionChangeRecordVersions")
|
||||||
public void testEligibleLeaderReplicas_IsrExpandAboveMinISR(short version) {
|
public void testEligibleLeaderReplicas_IsrExpandAboveMinISR(short version) {
|
||||||
|
|
|
@ -45,6 +45,7 @@ public class QuorumControllerMetricsTest {
|
||||||
Set<String> expected = Set.of(
|
Set<String> expected = Set.of(
|
||||||
"kafka.controller:type=ControllerEventManager,name=EventQueueProcessingTimeMs",
|
"kafka.controller:type=ControllerEventManager,name=EventQueueProcessingTimeMs",
|
||||||
"kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs",
|
"kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs",
|
||||||
|
"kafka.controller:type=ControllerEventManager,name=AvgIdleRatio",
|
||||||
"kafka.controller:type=KafkaController,name=ActiveControllerCount",
|
"kafka.controller:type=KafkaController,name=ActiveControllerCount",
|
||||||
"kafka.controller:type=KafkaController,name=EventQueueOperationsStartedCount",
|
"kafka.controller:type=KafkaController,name=EventQueueOperationsStartedCount",
|
||||||
"kafka.controller:type=KafkaController,name=EventQueueOperationsTimedOutCount",
|
"kafka.controller:type=KafkaController,name=EventQueueOperationsTimedOutCount",
|
||||||
|
@ -189,6 +190,35 @@ public class QuorumControllerMetricsTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAvgIdleRatio() {
|
||||||
|
final double delta = 0.001;
|
||||||
|
MetricsRegistry registry = new MetricsRegistry();
|
||||||
|
MockTime time = new MockTime();
|
||||||
|
try (QuorumControllerMetrics metrics = new QuorumControllerMetrics(Optional.of(registry), time, 9000)) {
|
||||||
|
Gauge<Double> avgIdleRatio = (Gauge<Double>) registry.allMetrics().get(metricName("ControllerEventManager", "AvgIdleRatio"));
|
||||||
|
|
||||||
|
// No idle time recorded yet; returns default ratio of 1.0
|
||||||
|
assertEquals(1.0, avgIdleRatio.value(), delta);
|
||||||
|
|
||||||
|
// First recording is dropped to establish the interval start time
|
||||||
|
// This is because TimeRatio needs an initial timestamp to measure intervals from
|
||||||
|
metrics.updateIdleTime(10);
|
||||||
|
time.sleep(40);
|
||||||
|
metrics.updateIdleTime(20);
|
||||||
|
// avgIdleRatio = (20ms idle) / (40ms interval) = 0.5
|
||||||
|
assertEquals(0.5, avgIdleRatio.value(), delta);
|
||||||
|
|
||||||
|
time.sleep(20);
|
||||||
|
metrics.updateIdleTime(1);
|
||||||
|
// avgIdleRatio = (1ms idle) / (20ms interval) = 0.05
|
||||||
|
assertEquals(0.05, avgIdleRatio.value(), delta);
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
registry.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static void assertMetricHistogram(MetricsRegistry registry, MetricName metricName, long count, double sum) {
|
private static void assertMetricHistogram(MetricsRegistry registry, MetricName metricName, long count, double sum) {
|
||||||
Histogram histogram = (Histogram) registry.allMetrics().get(metricName);
|
Histogram histogram = (Histogram) registry.allMetrics().get(metricName);
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.kafka.raft.LogOffsetMetadata;
|
||||||
import org.apache.kafka.raft.QuorumState;
|
import org.apache.kafka.raft.QuorumState;
|
||||||
import org.apache.kafka.raft.ReplicaKey;
|
import org.apache.kafka.raft.ReplicaKey;
|
||||||
import org.apache.kafka.server.common.OffsetAndEpoch;
|
import org.apache.kafka.server.common.OffsetAndEpoch;
|
||||||
|
import org.apache.kafka.server.metrics.TimeRatio;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.OptionalLong;
|
import java.util.OptionalLong;
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.kafka.logger;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simple class that sets logIdent appropriately depending on whether the state change logger is being used in the
|
||||||
|
* context of the broker (e.g. ReplicaManager and Partition).
|
||||||
|
*/
|
||||||
|
public class StateChangeLogger {
|
||||||
|
private static final Logger LOGGER = LoggerFactory.getLogger("state.change.logger");
|
||||||
|
|
||||||
|
private final String logIdent;
|
||||||
|
|
||||||
|
public StateChangeLogger(int brokerId) {
|
||||||
|
this.logIdent = String.format("[Broker id=%d] ", brokerId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void trace(String message) {
|
||||||
|
LOGGER.info("{}{}", logIdent, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void info(String message) {
|
||||||
|
LOGGER.info("{}{}", logIdent, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void warn(String message) {
|
||||||
|
LOGGER.warn("{}{}", logIdent, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void error(String message) {
|
||||||
|
LOGGER.error("{}{}", logIdent, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void error(String message, Throwable e) {
|
||||||
|
LOGGER.error("{}{}", logIdent, message, e);
|
||||||
|
}
|
||||||
|
}
|
|
@ -33,6 +33,7 @@ import java.util.TreeMap;
|
||||||
import java.util.concurrent.RejectedExecutionException;
|
import java.util.concurrent.RejectedExecutionException;
|
||||||
import java.util.concurrent.locks.Condition;
|
import java.util.concurrent.locks.Condition;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.UnaryOperator;
|
import java.util.function.UnaryOperator;
|
||||||
|
|
||||||
|
@ -278,22 +279,22 @@ public final class KafkaEventQueue implements EventQueue {
|
||||||
remove(toRun);
|
remove(toRun);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long startIdleMs = time.milliseconds();
|
||||||
|
try {
|
||||||
if (awaitNs == Long.MAX_VALUE) {
|
if (awaitNs == Long.MAX_VALUE) {
|
||||||
try {
|
|
||||||
cond.await();
|
cond.await();
|
||||||
} catch (InterruptedException e) {
|
|
||||||
log.warn("Interrupted while waiting for a new event. " +
|
|
||||||
"Shutting down event queue");
|
|
||||||
interrupted = true;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
try {
|
|
||||||
cond.awaitNanos(awaitNs);
|
cond.awaitNanos(awaitNs);
|
||||||
} catch (InterruptedException e) {
|
|
||||||
log.warn("Interrupted while waiting for a deferred event. " +
|
|
||||||
"Shutting down event queue");
|
|
||||||
interrupted = true;
|
|
||||||
}
|
}
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
log.warn(
|
||||||
|
"Interrupted while waiting for a {} event. Shutting down event queue",
|
||||||
|
(awaitNs == Long.MAX_VALUE) ? "new" : "deferred"
|
||||||
|
);
|
||||||
|
interrupted = true;
|
||||||
|
} finally {
|
||||||
|
idleTimeCallback.accept(Math.max(time.milliseconds() - startIdleMs, 0));
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
|
@ -440,12 +441,18 @@ public final class KafkaEventQueue implements EventQueue {
|
||||||
*/
|
*/
|
||||||
private boolean interrupted;
|
private boolean interrupted;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optional callback for queue idle time tracking.
|
||||||
|
*/
|
||||||
|
private final Consumer<Long> idleTimeCallback;
|
||||||
|
|
||||||
|
|
||||||
public KafkaEventQueue(
|
public KafkaEventQueue(
|
||||||
Time time,
|
Time time,
|
||||||
LogContext logContext,
|
LogContext logContext,
|
||||||
String threadNamePrefix
|
String threadNamePrefix
|
||||||
) {
|
) {
|
||||||
this(time, logContext, threadNamePrefix, VoidEvent.INSTANCE);
|
this(time, logContext, threadNamePrefix, VoidEvent.INSTANCE, __ -> { });
|
||||||
}
|
}
|
||||||
|
|
||||||
public KafkaEventQueue(
|
public KafkaEventQueue(
|
||||||
|
@ -453,6 +460,16 @@ public final class KafkaEventQueue implements EventQueue {
|
||||||
LogContext logContext,
|
LogContext logContext,
|
||||||
String threadNamePrefix,
|
String threadNamePrefix,
|
||||||
Event cleanupEvent
|
Event cleanupEvent
|
||||||
|
) {
|
||||||
|
this(time, logContext, threadNamePrefix, cleanupEvent, __ -> { });
|
||||||
|
}
|
||||||
|
|
||||||
|
public KafkaEventQueue(
|
||||||
|
Time time,
|
||||||
|
LogContext logContext,
|
||||||
|
String threadNamePrefix,
|
||||||
|
Event cleanupEvent,
|
||||||
|
Consumer<Long> idleTimeCallback
|
||||||
) {
|
) {
|
||||||
this.time = time;
|
this.time = time;
|
||||||
this.cleanupEvent = Objects.requireNonNull(cleanupEvent);
|
this.cleanupEvent = Objects.requireNonNull(cleanupEvent);
|
||||||
|
@ -463,6 +480,7 @@ public final class KafkaEventQueue implements EventQueue {
|
||||||
this.eventHandler, false);
|
this.eventHandler, false);
|
||||||
this.shuttingDown = false;
|
this.shuttingDown = false;
|
||||||
this.interrupted = false;
|
this.interrupted = false;
|
||||||
|
this.idleTimeCallback = Objects.requireNonNull(idleTimeCallback);
|
||||||
this.eventHandlerThread.start();
|
this.eventHandlerThread.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.kafka.raft.internals;
|
package org.apache.kafka.server.metrics;
|
||||||
|
|
||||||
import org.apache.kafka.common.metrics.MeasurableStat;
|
import org.apache.kafka.common.metrics.MeasurableStat;
|
||||||
import org.apache.kafka.common.metrics.MetricConfig;
|
import org.apache.kafka.common.metrics.MetricConfig;
|
||||||
|
@ -46,11 +46,26 @@ public class TimeRatio implements MeasurableStat {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public double measure(MetricConfig config, long currentTimestampMs) {
|
public double measure(MetricConfig config, long currentTimestampMs) {
|
||||||
|
return measure();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void record(MetricConfig config, double value, long currentTimestampMs) {
|
||||||
|
record(value, currentTimestampMs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Measures the ratio of recorded duration to the interval duration
|
||||||
|
* since the last measurement.
|
||||||
|
*
|
||||||
|
* @return The measured ratio value between 0.0 and 1.0
|
||||||
|
*/
|
||||||
|
public double measure() {
|
||||||
if (lastRecordedTimestampMs < 0) {
|
if (lastRecordedTimestampMs < 0) {
|
||||||
// Return the default value if no recordings have been captured.
|
// Return the default value if no recordings have been captured.
|
||||||
return defaultRatio;
|
return defaultRatio;
|
||||||
} else {
|
} else {
|
||||||
// We measure the ratio over the
|
// We measure the ratio over the interval
|
||||||
double intervalDurationMs = Math.max(lastRecordedTimestampMs - intervalStartTimestampMs, 0);
|
double intervalDurationMs = Math.max(lastRecordedTimestampMs - intervalStartTimestampMs, 0);
|
||||||
final double ratio;
|
final double ratio;
|
||||||
if (intervalDurationMs == 0) {
|
if (intervalDurationMs == 0) {
|
||||||
|
@ -61,15 +76,20 @@ public class TimeRatio implements MeasurableStat {
|
||||||
ratio = totalRecordedDurationMs / intervalDurationMs;
|
ratio = totalRecordedDurationMs / intervalDurationMs;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The next interval begins at the
|
// The next interval begins at the last recorded timestamp
|
||||||
intervalStartTimestampMs = lastRecordedTimestampMs;
|
intervalStartTimestampMs = lastRecordedTimestampMs;
|
||||||
totalRecordedDurationMs = 0;
|
totalRecordedDurationMs = 0;
|
||||||
return ratio;
|
return ratio;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
public void record(MetricConfig config, double value, long currentTimestampMs) {
|
* Records a duration value at the specified timestamp.
|
||||||
|
*
|
||||||
|
* @param value The duration value to record
|
||||||
|
* @param currentTimestampMs The current timestamp in milliseconds
|
||||||
|
*/
|
||||||
|
public void record(double value, long currentTimestampMs) {
|
||||||
if (intervalStartTimestampMs < 0) {
|
if (intervalStartTimestampMs < 0) {
|
||||||
// Discard the initial value since the value occurred prior to the interval start
|
// Discard the initial value since the value occurred prior to the interval start
|
||||||
intervalStartTimestampMs = currentTimestampMs;
|
intervalStartTimestampMs = currentTimestampMs;
|
||||||
|
@ -78,5 +98,4 @@ public class TimeRatio implements MeasurableStat {
|
||||||
lastRecordedTimestampMs = currentTimestampMs;
|
lastRecordedTimestampMs = currentTimestampMs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -424,4 +424,48 @@ public class KafkaEventQueueTest {
|
||||||
assertEquals(InterruptedException.class, ieTrapper2.exception.get().getClass());
|
assertEquals(InterruptedException.class, ieTrapper2.exception.get().getClass());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIdleTimeCallback() throws Exception {
|
||||||
|
MockTime time = new MockTime();
|
||||||
|
AtomicLong lastIdleTimeMs = new AtomicLong(0);
|
||||||
|
|
||||||
|
try (KafkaEventQueue queue = new KafkaEventQueue(
|
||||||
|
time,
|
||||||
|
logContext,
|
||||||
|
"testIdleTimeCallback",
|
||||||
|
EventQueue.VoidEvent.INSTANCE,
|
||||||
|
lastIdleTimeMs::set)) {
|
||||||
|
time.sleep(2);
|
||||||
|
assertEquals(0, lastIdleTimeMs.get(), "Last idle time should be 0ms");
|
||||||
|
|
||||||
|
// Test 1: Two events with a wait in between using FutureEvent
|
||||||
|
CompletableFuture<String> event1 = new CompletableFuture<>();
|
||||||
|
queue.append(new FutureEvent<>(event1, () -> {
|
||||||
|
time.sleep(1);
|
||||||
|
return "event1-processed";
|
||||||
|
}));
|
||||||
|
assertEquals("event1-processed", event1.get());
|
||||||
|
|
||||||
|
long waitTime5Ms = 5;
|
||||||
|
time.sleep(waitTime5Ms);
|
||||||
|
CompletableFuture<String> event2 = new CompletableFuture<>();
|
||||||
|
queue.append(new FutureEvent<>(event2, () -> {
|
||||||
|
time.sleep(1);
|
||||||
|
return "event2-processed";
|
||||||
|
}));
|
||||||
|
assertEquals("event2-processed", event2.get());
|
||||||
|
assertEquals(waitTime5Ms, lastIdleTimeMs.get(), "Idle time should be " + waitTime5Ms + "ms, was: " + lastIdleTimeMs.get());
|
||||||
|
|
||||||
|
// Test 2: Deferred event
|
||||||
|
long waitTime2Ms = 2;
|
||||||
|
CompletableFuture<Void> deferredEvent2 = new CompletableFuture<>();
|
||||||
|
queue.scheduleDeferred("deferred2",
|
||||||
|
__ -> OptionalLong.of(time.nanoseconds() + TimeUnit.MILLISECONDS.toNanos(waitTime2Ms)),
|
||||||
|
() -> deferredEvent2.complete(null));
|
||||||
|
time.sleep(waitTime2Ms);
|
||||||
|
deferredEvent2.get();
|
||||||
|
assertEquals(waitTime2Ms, lastIdleTimeMs.get(), "Idle time should be " + waitTime2Ms + "ms, was: " + lastIdleTimeMs.get());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.kafka.raft.internals;
|
package org.apache.kafka.server.metrics;
|
||||||
|
|
||||||
import org.apache.kafka.common.metrics.MetricConfig;
|
import org.apache.kafka.common.metrics.MetricConfig;
|
||||||
import org.apache.kafka.common.utils.MockTime;
|
import org.apache.kafka.common.utils.MockTime;
|
|
@ -72,9 +72,10 @@ import org.junit.jupiter.api.AfterAll;
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Tag;
|
import org.junit.jupiter.api.Tag;
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.Timeout;
|
import org.junit.jupiter.api.Timeout;
|
||||||
import org.junit.jupiter.params.ParameterizedTest;
|
import org.junit.jupiter.params.ParameterizedTest;
|
||||||
|
import org.junit.jupiter.params.provider.Arguments;
|
||||||
|
import org.junit.jupiter.params.provider.MethodSource;
|
||||||
import org.junit.jupiter.params.provider.ValueSource;
|
import org.junit.jupiter.params.provider.ValueSource;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -169,6 +170,15 @@ public class EosIntegrationTest {
|
||||||
|
|
||||||
private String stateTmpDir;
|
private String stateTmpDir;
|
||||||
|
|
||||||
|
private static java.util.stream.Stream<Arguments> groupProtocolAndProcessingThreadsParameters() {
|
||||||
|
return java.util.stream.Stream.of(
|
||||||
|
Arguments.of("classic", true),
|
||||||
|
Arguments.of("classic", false),
|
||||||
|
Arguments.of("streams", true),
|
||||||
|
Arguments.of("streams", false)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
public void createTopics() throws Exception {
|
public void createTopics() throws Exception {
|
||||||
applicationId = "appId-" + TEST_NUMBER.getAndIncrement();
|
applicationId = "appId-" + TEST_NUMBER.getAndIncrement();
|
||||||
|
@ -181,16 +191,19 @@ public class EosIntegrationTest {
|
||||||
CLUSTER.createTopic(MULTI_PARTITION_INPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
CLUSTER.createTopic(MULTI_PARTITION_INPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
||||||
CLUSTER.createTopic(MULTI_PARTITION_THROUGH_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
CLUSTER.createTopic(MULTI_PARTITION_THROUGH_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
||||||
CLUSTER.createTopic(MULTI_PARTITION_OUTPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
CLUSTER.createTopic(MULTI_PARTITION_OUTPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
|
||||||
|
CLUSTER.setGroupStandbyReplicas(applicationId, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToRunWithEosEnabled() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToRunWithEosEnabled(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldCommitCorrectOffsetIfInputTopicIsTransactional() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, true);
|
public void shouldCommitCorrectOffsetIfInputTopicIsTransactional(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, true, groupProtocol);
|
||||||
|
|
||||||
try (final Admin adminClient = Admin.create(mkMap(mkEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
|
try (final Admin adminClient = Admin.create(mkMap(mkEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
|
||||||
final Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(mkMap(
|
final Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(mkMap(
|
||||||
|
@ -215,36 +228,42 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToRestartAfterClose() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(2, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToRestartAfterClose(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(2, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToCommitToMultiplePartitions() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, MULTI_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToCommitToMultiplePartitions(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, MULTI_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToCommitMultiplePartitionOffsets() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToCommitMultiplePartitionOffsets(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToRunWithTwoSubtopologies() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, SINGLE_PARTITION_THROUGH_TOPIC, SINGLE_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToRunWithTwoSubtopologies(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, SINGLE_PARTITION_THROUGH_TOPIC, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToRunWithTwoSubtopologiesAndMultiplePartitions() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, MULTI_PARTITION_THROUGH_TOPIC, MULTI_PARTITION_OUTPUT_TOPIC, false);
|
public void shouldBeAbleToRunWithTwoSubtopologiesAndMultiplePartitions(final String groupProtocol) throws Exception {
|
||||||
|
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, MULTI_PARTITION_THROUGH_TOPIC, MULTI_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void runSimpleCopyTest(final int numberOfRestarts,
|
private void runSimpleCopyTest(final int numberOfRestarts,
|
||||||
final String inputTopic,
|
final String inputTopic,
|
||||||
final String throughTopic,
|
final String throughTopic,
|
||||||
final String outputTopic,
|
final String outputTopic,
|
||||||
final boolean inputTopicTransactional) throws Exception {
|
final boolean inputTopicTransactional,
|
||||||
|
final String groupProtocol) throws Exception {
|
||||||
final StreamsBuilder builder = new StreamsBuilder();
|
final StreamsBuilder builder = new StreamsBuilder();
|
||||||
final KStream<Long, Long> input = builder.stream(inputTopic);
|
final KStream<Long, Long> input = builder.stream(inputTopic);
|
||||||
KStream<Long, Long> output = input;
|
KStream<Long, Long> output = input;
|
||||||
|
@ -263,6 +282,7 @@ public class EosIntegrationTest {
|
||||||
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
|
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
|
||||||
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), MAX_POLL_INTERVAL_MS - 1);
|
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), MAX_POLL_INTERVAL_MS - 1);
|
||||||
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
|
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
|
||||||
|
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
|
||||||
|
|
||||||
for (int i = 0; i < numberOfRestarts; ++i) {
|
for (int i = 0; i < numberOfRestarts; ++i) {
|
||||||
final Properties config = StreamsTestUtils.getStreamsConfig(
|
final Properties config = StreamsTestUtils.getStreamsConfig(
|
||||||
|
@ -326,8 +346,9 @@ public class EosIntegrationTest {
|
||||||
return recordsPerKey;
|
return recordsPerKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldBeAbleToPerformMultipleTransactions() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
|
public void shouldBeAbleToPerformMultipleTransactions(final String groupProtocol) throws Exception {
|
||||||
final StreamsBuilder builder = new StreamsBuilder();
|
final StreamsBuilder builder = new StreamsBuilder();
|
||||||
builder.stream(SINGLE_PARTITION_INPUT_TOPIC).to(SINGLE_PARTITION_OUTPUT_TOPIC);
|
builder.stream(SINGLE_PARTITION_INPUT_TOPIC).to(SINGLE_PARTITION_OUTPUT_TOPIC);
|
||||||
|
|
||||||
|
@ -337,6 +358,7 @@ public class EosIntegrationTest {
|
||||||
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
|
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
|
||||||
properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
|
properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
|
||||||
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||||
|
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
|
||||||
|
|
||||||
final Properties config = StreamsTestUtils.getStreamsConfig(
|
final Properties config = StreamsTestUtils.getStreamsConfig(
|
||||||
applicationId,
|
applicationId,
|
||||||
|
@ -374,8 +396,8 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@ValueSource(booleans = {true, false})
|
@MethodSource("groupProtocolAndProcessingThreadsParameters")
|
||||||
public void shouldNotViolateEosIfOneTaskFails(final boolean processingThreadsEnabled) throws Exception {
|
public void shouldNotViolateEosIfOneTaskFails(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
|
||||||
|
|
||||||
// this test writes 10 + 5 + 5 records per partition (running with 2 partitions)
|
// this test writes 10 + 5 + 5 records per partition (running with 2 partitions)
|
||||||
// the app is supposed to copy all 40 records into the output topic
|
// the app is supposed to copy all 40 records into the output topic
|
||||||
|
@ -386,7 +408,7 @@ public class EosIntegrationTest {
|
||||||
// -> the failure only kills one thread
|
// -> the failure only kills one thread
|
||||||
// after fail over, we should read 40 committed records (even if 50 record got written)
|
// after fail over, we should read 40 committed records (even if 50 record got written)
|
||||||
|
|
||||||
try (final KafkaStreams streams = getKafkaStreams("dummy", false, "appDir", 2, processingThreadsEnabled)) {
|
try (final KafkaStreams streams = getKafkaStreams("dummy", false, "appDir", 2, groupProtocol, processingThreadsEnabled)) {
|
||||||
startApplicationAndWaitUntilRunning(streams);
|
startApplicationAndWaitUntilRunning(streams);
|
||||||
|
|
||||||
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
|
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
|
||||||
|
@ -476,8 +498,8 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@ValueSource(booleans = {true, false})
|
@MethodSource("groupProtocolAndProcessingThreadsParameters")
|
||||||
public void shouldNotViolateEosIfOneTaskFailsWithState(final boolean processingThreadsEnabled) throws Exception {
|
public void shouldNotViolateEosIfOneTaskFailsWithState(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
|
||||||
|
|
||||||
// this test updates a store with 10 + 5 + 5 records per partition (running with 2 partitions)
|
// this test updates a store with 10 + 5 + 5 records per partition (running with 2 partitions)
|
||||||
// the app is supposed to emit all 40 update records into the output topic
|
// the app is supposed to emit all 40 update records into the output topic
|
||||||
|
@ -493,7 +515,7 @@ public class EosIntegrationTest {
|
||||||
|
|
||||||
// We need more processing time under "with state" situation, so increasing the max.poll.interval.ms
|
// We need more processing time under "with state" situation, so increasing the max.poll.interval.ms
|
||||||
// to avoid unexpected rebalance during test, which will cause unexpected fail over triggered
|
// to avoid unexpected rebalance during test, which will cause unexpected fail over triggered
|
||||||
try (final KafkaStreams streams = getKafkaStreams("dummy", true, "appDir", 2, processingThreadsEnabled)) {
|
try (final KafkaStreams streams = getKafkaStreams("dummy", true, "appDir", 2, groupProtocol, processingThreadsEnabled)) {
|
||||||
startApplicationAndWaitUntilRunning(streams);
|
startApplicationAndWaitUntilRunning(streams);
|
||||||
|
|
||||||
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
|
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
|
||||||
|
@ -594,8 +616,8 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@ValueSource(booleans = {true, false})
|
@MethodSource("groupProtocolAndProcessingThreadsParameters")
|
||||||
public void shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances(final boolean processingThreadsEnabled) throws Exception {
|
public void shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
|
||||||
// this test writes 10 + 5 + 5 + 10 records per partition (running with 2 partitions)
|
// this test writes 10 + 5 + 5 + 10 records per partition (running with 2 partitions)
|
||||||
// the app is supposed to copy all 60 records into the output topic
|
// the app is supposed to copy all 60 records into the output topic
|
||||||
//
|
//
|
||||||
|
@ -607,10 +629,9 @@ public class EosIntegrationTest {
|
||||||
//
|
//
|
||||||
// afterward, the "stalling" thread resumes, and another rebalance should get triggered
|
// afterward, the "stalling" thread resumes, and another rebalance should get triggered
|
||||||
// we write the remaining 20 records and verify to read 60 result records
|
// we write the remaining 20 records and verify to read 60 result records
|
||||||
|
|
||||||
try (
|
try (
|
||||||
final KafkaStreams streams1 = getKafkaStreams("streams1", false, "appDir1", 1, processingThreadsEnabled);
|
final KafkaStreams streams1 = getKafkaStreams("streams1", false, "appDir1", 1, groupProtocol, processingThreadsEnabled);
|
||||||
final KafkaStreams streams2 = getKafkaStreams("streams2", false, "appDir2", 1, processingThreadsEnabled)
|
final KafkaStreams streams2 = getKafkaStreams("streams2", false, "appDir2", 1, groupProtocol, processingThreadsEnabled)
|
||||||
) {
|
) {
|
||||||
startApplicationAndWaitUntilRunning(streams1);
|
startApplicationAndWaitUntilRunning(streams1);
|
||||||
startApplicationAndWaitUntilRunning(streams2);
|
startApplicationAndWaitUntilRunning(streams2);
|
||||||
|
@ -667,13 +688,10 @@ public class EosIntegrationTest {
|
||||||
"Expected a host to start stalling"
|
"Expected a host to start stalling"
|
||||||
);
|
);
|
||||||
final String observedStallingHost = stallingHost.get();
|
final String observedStallingHost = stallingHost.get();
|
||||||
final KafkaStreams stallingInstance;
|
|
||||||
final KafkaStreams remainingInstance;
|
final KafkaStreams remainingInstance;
|
||||||
if ("streams1".equals(observedStallingHost)) {
|
if ("streams1".equals(observedStallingHost)) {
|
||||||
stallingInstance = streams1;
|
|
||||||
remainingInstance = streams2;
|
remainingInstance = streams2;
|
||||||
} else if ("streams2".equals(observedStallingHost)) {
|
} else if ("streams2".equals(observedStallingHost)) {
|
||||||
stallingInstance = streams2;
|
|
||||||
remainingInstance = streams1;
|
remainingInstance = streams1;
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("unexpected host name: " + observedStallingHost);
|
throw new IllegalArgumentException("unexpected host name: " + observedStallingHost);
|
||||||
|
@ -683,8 +701,7 @@ public class EosIntegrationTest {
|
||||||
// the assignment is. We only really care that the remaining instance only sees one host
|
// the assignment is. We only really care that the remaining instance only sees one host
|
||||||
// that owns both partitions.
|
// that owns both partitions.
|
||||||
waitForCondition(
|
waitForCondition(
|
||||||
() -> stallingInstance.metadataForAllStreamsClients().size() == 2
|
() -> remainingInstance.metadataForAllStreamsClients().size() == 1
|
||||||
&& remainingInstance.metadataForAllStreamsClients().size() == 1
|
|
||||||
&& remainingInstance.metadataForAllStreamsClients().iterator().next().topicPartitions().size() == 2,
|
&& remainingInstance.metadataForAllStreamsClients().iterator().next().topicPartitions().size() == 2,
|
||||||
MAX_WAIT_TIME_MS,
|
MAX_WAIT_TIME_MS,
|
||||||
() -> "Should have rebalanced.\n" +
|
() -> "Should have rebalanced.\n" +
|
||||||
|
@ -755,12 +772,12 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@ValueSource(booleans = {true, false})
|
@MethodSource("groupProtocolAndProcessingThreadsParameters")
|
||||||
public void shouldWriteLatestOffsetsToCheckpointOnShutdown(final boolean processingThreadsEnabled) throws Exception {
|
public void shouldWriteLatestOffsetsToCheckpointOnShutdown(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
|
||||||
final List<KeyValue<Long, Long>> writtenData = prepareData(0L, 10, 0L, 1L);
|
final List<KeyValue<Long, Long>> writtenData = prepareData(0L, 10, 0L, 1L);
|
||||||
final List<KeyValue<Long, Long>> expectedResult = computeExpectedResult(writtenData);
|
final List<KeyValue<Long, Long>> expectedResult = computeExpectedResult(writtenData);
|
||||||
|
|
||||||
try (final KafkaStreams streams = getKafkaStreams("streams", true, "appDir", 1, processingThreadsEnabled)) {
|
try (final KafkaStreams streams = getKafkaStreams("streams", true, "appDir", 1, groupProtocol, processingThreadsEnabled)) {
|
||||||
writeInputData(writtenData);
|
writeInputData(writtenData);
|
||||||
|
|
||||||
startApplicationAndWaitUntilRunning(streams);
|
startApplicationAndWaitUntilRunning(streams);
|
||||||
|
@ -787,9 +804,9 @@ public class EosIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@ValueSource(booleans = {true, false})
|
@MethodSource("groupProtocolAndProcessingThreadsParameters")
|
||||||
public void shouldCheckpointRestoredOffsetsWhenClosingCleanDuringRestoring(
|
public void shouldCheckpointRestoredOffsetsWhenClosingCleanDuringRestoring(
|
||||||
final boolean processingThreadsEnabled) throws Exception {
|
final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
|
||||||
|
|
||||||
final Properties streamsConfiguration = new Properties();
|
final Properties streamsConfiguration = new Properties();
|
||||||
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
||||||
|
@ -801,6 +818,7 @@ public class EosIntegrationTest {
|
||||||
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||||
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(applicationId).getPath());
|
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(applicationId).getPath());
|
||||||
streamsConfiguration.put(InternalConfig.PROCESSING_THREADS_ENABLED, processingThreadsEnabled);
|
streamsConfiguration.put(InternalConfig.PROCESSING_THREADS_ENABLED, processingThreadsEnabled);
|
||||||
|
streamsConfiguration.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
|
||||||
streamsConfiguration.put(StreamsConfig.restoreConsumerPrefix(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), 100);
|
streamsConfiguration.put(StreamsConfig.restoreConsumerPrefix(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), 100);
|
||||||
final String stateStoreName = "stateStore";
|
final String stateStoreName = "stateStore";
|
||||||
|
|
||||||
|
@ -934,8 +952,13 @@ public class EosIntegrationTest {
|
||||||
static final AtomicReference<TaskId> TASK_WITH_DATA = new AtomicReference<>();
|
static final AtomicReference<TaskId> TASK_WITH_DATA = new AtomicReference<>();
|
||||||
static final AtomicBoolean DID_REVOKE_IDLE_TASK = new AtomicBoolean(false);
|
static final AtomicBoolean DID_REVOKE_IDLE_TASK = new AtomicBoolean(false);
|
||||||
|
|
||||||
@Test
|
@ParameterizedTest
|
||||||
public void shouldNotCommitActiveTasksWithPendingInputIfRevokedTaskDidNotMakeProgress() throws Exception {
|
@ValueSource(strings = {"classic", "streams"})
|
||||||
|
public void shouldNotCommitActiveTasksWithPendingInputIfRevokedTaskDidNotMakeProgress(final String groupProtocol) throws Exception {
|
||||||
|
// Reset static variables to ensure test isolation
|
||||||
|
TASK_WITH_DATA.set(null);
|
||||||
|
DID_REVOKE_IDLE_TASK.set(false);
|
||||||
|
|
||||||
final AtomicBoolean requestCommit = new AtomicBoolean(false);
|
final AtomicBoolean requestCommit = new AtomicBoolean(false);
|
||||||
|
|
||||||
final StreamsBuilder builder = new StreamsBuilder();
|
final StreamsBuilder builder = new StreamsBuilder();
|
||||||
|
@ -970,6 +993,7 @@ public class EosIntegrationTest {
|
||||||
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
|
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
|
||||||
properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), Integer.MAX_VALUE);
|
properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), Integer.MAX_VALUE);
|
||||||
properties.put(StreamsConfig.TASK_ASSIGNOR_CLASS_CONFIG, TestTaskAssignor.class.getName());
|
properties.put(StreamsConfig.TASK_ASSIGNOR_CLASS_CONFIG, TestTaskAssignor.class.getName());
|
||||||
|
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
|
||||||
|
|
||||||
final Properties config = StreamsTestUtils.getStreamsConfig(
|
final Properties config = StreamsTestUtils.getStreamsConfig(
|
||||||
applicationId,
|
applicationId,
|
||||||
|
@ -1003,9 +1027,9 @@ public class EosIntegrationTest {
|
||||||
// add second thread, to trigger rebalance
|
// add second thread, to trigger rebalance
|
||||||
// expect idle task to get revoked -- this should not trigger a TX commit
|
// expect idle task to get revoked -- this should not trigger a TX commit
|
||||||
streams.addStreamThread();
|
streams.addStreamThread();
|
||||||
|
if (groupProtocol.equals("classic")) {
|
||||||
waitForCondition(DID_REVOKE_IDLE_TASK::get, "Idle Task was not revoked as expected.");
|
waitForCondition(DID_REVOKE_IDLE_TASK::get, "Idle Task was not revoked as expected.");
|
||||||
|
}
|
||||||
// best-effort sanity check (might pass and not detect issue in slow environments)
|
// best-effort sanity check (might pass and not detect issue in slow environments)
|
||||||
try {
|
try {
|
||||||
readResult(SINGLE_PARTITION_OUTPUT_TOPIC, 1, "consumer", 10_000L);
|
readResult(SINGLE_PARTITION_OUTPUT_TOPIC, 1, "consumer", 10_000L);
|
||||||
|
@ -1104,6 +1128,7 @@ public class EosIntegrationTest {
|
||||||
final boolean withState,
|
final boolean withState,
|
||||||
final String appDir,
|
final String appDir,
|
||||||
final int numberOfStreamsThreads,
|
final int numberOfStreamsThreads,
|
||||||
|
final String groupProtocol,
|
||||||
final boolean processingThreadsEnabled) {
|
final boolean processingThreadsEnabled) {
|
||||||
commitRequested = new AtomicInteger(0);
|
commitRequested = new AtomicInteger(0);
|
||||||
errorInjected = new AtomicBoolean(false);
|
errorInjected = new AtomicBoolean(false);
|
||||||
|
@ -1212,6 +1237,7 @@ public class EosIntegrationTest {
|
||||||
properties.put(StreamsConfig.STATE_DIR_CONFIG, stateTmpDir + appDir);
|
properties.put(StreamsConfig.STATE_DIR_CONFIG, stateTmpDir + appDir);
|
||||||
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, dummyHostName + ":2142");
|
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, dummyHostName + ":2142");
|
||||||
properties.put(InternalConfig.PROCESSING_THREADS_ENABLED, processingThreadsEnabled);
|
properties.put(InternalConfig.PROCESSING_THREADS_ENABLED, processingThreadsEnabled);
|
||||||
|
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
|
||||||
|
|
||||||
final Properties config = StreamsTestUtils.getStreamsConfig(
|
final Properties config = StreamsTestUtils.getStreamsConfig(
|
||||||
applicationId,
|
applicationId,
|
||||||
|
|
|
@ -112,6 +112,9 @@ import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.wa
|
||||||
import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName;
|
import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName;
|
||||||
import static org.apache.kafka.test.TestUtils.waitForCondition;
|
import static org.apache.kafka.test.TestUtils.waitForCondition;
|
||||||
import static org.hamcrest.MatcherAssert.assertThat;
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
|
import static org.hamcrest.Matchers.allOf;
|
||||||
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
|
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||||
import static org.hamcrest.core.IsEqual.equalTo;
|
import static org.hamcrest.core.IsEqual.equalTo;
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
import static org.junit.jupiter.api.Assertions.fail;
|
import static org.junit.jupiter.api.Assertions.fail;
|
||||||
|
@ -685,6 +688,52 @@ public class RestoreIntegrationTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ParameterizedTest
|
||||||
|
@ValueSource(booleans = {true, false})
|
||||||
|
public void shouldRecordRestoreMetrics(final boolean useNewProtocol) throws Exception {
|
||||||
|
final AtomicInteger numReceived = new AtomicInteger(0);
|
||||||
|
final StreamsBuilder builder = new StreamsBuilder();
|
||||||
|
|
||||||
|
final Properties props = props();
|
||||||
|
|
||||||
|
if (useNewProtocol) {
|
||||||
|
props.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.STREAMS.name());
|
||||||
|
}
|
||||||
|
|
||||||
|
props.put(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG");
|
||||||
|
|
||||||
|
createStateForRestoration(inputStream, 10000);
|
||||||
|
|
||||||
|
final CountDownLatch shutdownLatch = new CountDownLatch(1);
|
||||||
|
builder.table(inputStream, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as("store"))
|
||||||
|
.toStream()
|
||||||
|
.foreach((key, value) -> {
|
||||||
|
if (numReceived.incrementAndGet() == numberOfKeys) {
|
||||||
|
shutdownLatch.countDown();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
kafkaStreams = new KafkaStreams(builder.build(), props);
|
||||||
|
|
||||||
|
final AtomicLong restored = new AtomicLong(0);
|
||||||
|
final TrackingStateRestoreListener restoreListener = new TrackingStateRestoreListener(restored);
|
||||||
|
kafkaStreams.setGlobalStateRestoreListener(restoreListener);
|
||||||
|
kafkaStreams.start();
|
||||||
|
|
||||||
|
assertTrue(shutdownLatch.await(30, TimeUnit.SECONDS));
|
||||||
|
assertThat(numReceived.get(), equalTo(numberOfKeys));
|
||||||
|
|
||||||
|
final Map<String, Long> taskIdToMetricValue = kafkaStreams.metrics().entrySet().stream()
|
||||||
|
.filter(e -> e.getKey().name().equals("restore-latency-max"))
|
||||||
|
.collect(Collectors.toMap(e -> e.getKey().tags().get("task-id"), e -> ((Double) e.getValue().metricValue()).longValue()));
|
||||||
|
|
||||||
|
for (final Map.Entry<TopicPartition, Long> entry : restoreListener.changelogToRestoreTime().entrySet()) {
|
||||||
|
final long lowerBound = entry.getValue() - TimeUnit.NANOSECONDS.convert(1, TimeUnit.SECONDS);
|
||||||
|
final long upperBound = entry.getValue() + TimeUnit.NANOSECONDS.convert(1, TimeUnit.SECONDS);
|
||||||
|
assertThat(taskIdToMetricValue.get("0_" + entry.getKey().partition()), allOf(greaterThanOrEqualTo(lowerBound), lessThanOrEqualTo(upperBound)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void validateReceivedMessages(final List<KeyValue<Integer, Integer>> expectedRecords,
|
private void validateReceivedMessages(final List<KeyValue<Integer, Integer>> expectedRecords,
|
||||||
final String outputTopic) throws Exception {
|
final String outputTopic) throws Exception {
|
||||||
final Properties consumerProperties = new Properties();
|
final Properties consumerProperties = new Properties();
|
||||||
|
|
|
@ -1337,6 +1337,8 @@ public class IntegrationTestUtils {
|
||||||
public final Map<TopicPartition, AtomicLong> changelogToStartOffset = new ConcurrentHashMap<>();
|
public final Map<TopicPartition, AtomicLong> changelogToStartOffset = new ConcurrentHashMap<>();
|
||||||
public final Map<TopicPartition, AtomicLong> changelogToEndOffset = new ConcurrentHashMap<>();
|
public final Map<TopicPartition, AtomicLong> changelogToEndOffset = new ConcurrentHashMap<>();
|
||||||
public final Map<TopicPartition, AtomicLong> changelogToTotalNumRestored = new ConcurrentHashMap<>();
|
public final Map<TopicPartition, AtomicLong> changelogToTotalNumRestored = new ConcurrentHashMap<>();
|
||||||
|
private final Map<TopicPartition, AtomicLong> changelogToRestoreStartTime = new ConcurrentHashMap<>();
|
||||||
|
private final Map<TopicPartition, AtomicLong> changelogToRestoreEndTime = new ConcurrentHashMap<>();
|
||||||
private final AtomicLong restored;
|
private final AtomicLong restored;
|
||||||
|
|
||||||
public TrackingStateRestoreListener() {
|
public TrackingStateRestoreListener() {
|
||||||
|
@ -1355,6 +1357,7 @@ public class IntegrationTestUtils {
|
||||||
changelogToStartOffset.put(topicPartition, new AtomicLong(startingOffset));
|
changelogToStartOffset.put(topicPartition, new AtomicLong(startingOffset));
|
||||||
changelogToEndOffset.put(topicPartition, new AtomicLong(endingOffset));
|
changelogToEndOffset.put(topicPartition, new AtomicLong(endingOffset));
|
||||||
changelogToTotalNumRestored.put(topicPartition, new AtomicLong(0L));
|
changelogToTotalNumRestored.put(topicPartition, new AtomicLong(0L));
|
||||||
|
changelogToRestoreStartTime.put(topicPartition, new AtomicLong(System.nanoTime()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1372,6 +1375,7 @@ public class IntegrationTestUtils {
|
||||||
if (restored != null) {
|
if (restored != null) {
|
||||||
restored.addAndGet(totalRestored);
|
restored.addAndGet(totalRestored);
|
||||||
}
|
}
|
||||||
|
changelogToRestoreEndTime.put(topicPartition, new AtomicLong(System.nanoTime()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public long totalNumRestored() {
|
public long totalNumRestored() {
|
||||||
|
@ -1381,6 +1385,11 @@ public class IntegrationTestUtils {
|
||||||
}
|
}
|
||||||
return totalNumRestored;
|
return totalNumRestored;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<TopicPartition, Long> changelogToRestoreTime() {
|
||||||
|
return changelogToRestoreStartTime.entrySet().stream()
|
||||||
|
.collect(Collectors.toMap(Map.Entry::getKey, e -> changelogToRestoreEndTime.get(e.getKey()).get() - e.getValue().get()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class TrackingStandbyUpdateListener implements StandbyUpdateListener {
|
public static class TrackingStandbyUpdateListener implements StandbyUpdateListener {
|
||||||
|
|
|
@ -19,9 +19,6 @@ package org.apache.kafka.streams;
|
||||||
import org.apache.kafka.clients.CommonClientConfigs;
|
import org.apache.kafka.clients.CommonClientConfigs;
|
||||||
import org.apache.kafka.clients.admin.Admin;
|
import org.apache.kafka.clients.admin.Admin;
|
||||||
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
|
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
|
||||||
import org.apache.kafka.clients.admin.MemberToRemove;
|
|
||||||
import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions;
|
|
||||||
import org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult;
|
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
import org.apache.kafka.common.KafkaFuture;
|
import org.apache.kafka.common.KafkaFuture;
|
||||||
|
@ -491,7 +488,7 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
closeToError();
|
closeToError();
|
||||||
}
|
}
|
||||||
final StreamThread deadThread = (StreamThread) Thread.currentThread();
|
final StreamThread deadThread = (StreamThread) Thread.currentThread();
|
||||||
deadThread.shutdown();
|
deadThread.shutdown(false);
|
||||||
addStreamThread();
|
addStreamThread();
|
||||||
if (throwable instanceof RuntimeException) {
|
if (throwable instanceof RuntimeException) {
|
||||||
throw (RuntimeException) throwable;
|
throw (RuntimeException) throwable;
|
||||||
|
@ -1139,7 +1136,7 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
return Optional.of(streamThread.getName());
|
return Optional.of(streamThread.getName());
|
||||||
} else {
|
} else {
|
||||||
log.warn("Terminating the new thread because the Kafka Streams client is in state {}", state);
|
log.warn("Terminating the new thread because the Kafka Streams client is in state {}", state);
|
||||||
streamThread.shutdown();
|
streamThread.shutdown(true);
|
||||||
threads.remove(streamThread);
|
threads.remove(streamThread);
|
||||||
final long cacheSizePerThread = cacheSizePerThread(numLiveStreamThreads());
|
final long cacheSizePerThread = cacheSizePerThread(numLiveStreamThreads());
|
||||||
log.info("Resizing thread cache due to terminating added thread, new cache size per thread is {}", cacheSizePerThread);
|
log.info("Resizing thread cache due to terminating added thread, new cache size per thread is {}", cacheSizePerThread);
|
||||||
|
@ -1159,6 +1156,10 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
* The removed stream thread is gracefully shut down. This method does not specify which stream
|
* The removed stream thread is gracefully shut down. This method does not specify which stream
|
||||||
* thread is shut down.
|
* thread is shut down.
|
||||||
* <p>
|
* <p>
|
||||||
|
* The consumer associated with the stream thread is closed using consumer.close() during the shutdown process.
|
||||||
|
* Note that this method does not guarantee immediate removal of the consumer from the consumer group.
|
||||||
|
* The consumer is only kicked off from the group after the stream thread completes its run function.
|
||||||
|
* <p>
|
||||||
* Since the number of stream threads decreases, the sizes of the caches in the remaining stream
|
* Since the number of stream threads decreases, the sizes of the caches in the remaining stream
|
||||||
* threads are adapted so that the sum of the cache sizes over all stream threads equals the total
|
* threads are adapted so that the sum of the cache sizes over all stream threads equals the total
|
||||||
* cache size specified in configuration {@link StreamsConfig#STATESTORE_CACHE_MAX_BYTES_CONFIG}.
|
* cache size specified in configuration {@link StreamsConfig#STATESTORE_CACHE_MAX_BYTES_CONFIG}.
|
||||||
|
@ -1171,17 +1172,15 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes one stream thread out of the running stream threads from this Kafka Streams client.
|
* The consumer associated with the stream thread is closed using consumer.close() during the shutdown process.
|
||||||
* <p>
|
* Note that this method does not guarantee immediate removal of the consumer from the consumer group.
|
||||||
* The removed stream thread is gracefully shut down. This method does not specify which stream
|
* The consumer is only kicked off from the group after the stream thread completes its run function.
|
||||||
* thread is shut down.
|
|
||||||
* <p>
|
* <p>
|
||||||
* Since the number of stream threads decreases, the sizes of the caches in the remaining stream
|
* Since the number of stream threads decreases, the sizes of the caches in the remaining stream
|
||||||
* threads are adapted so that the sum of the cache sizes over all stream threads equals the total
|
* threads are adapted so that the sum of the cache sizes over all stream threads equals the total
|
||||||
* cache size specified in configuration {@link StreamsConfig#STATESTORE_CACHE_MAX_BYTES_CONFIG}.
|
* cache size specified in configuration {@link StreamsConfig#STATESTORE_CACHE_MAX_BYTES_CONFIG}.
|
||||||
*
|
*
|
||||||
* @param timeout The length of time to wait for the thread to shut down
|
* @param timeout The length of time to wait for the thread to shut down
|
||||||
* @throws org.apache.kafka.common.errors.TimeoutException if the thread does not stop in time
|
|
||||||
* @return name of the removed stream thread or empty if a stream thread could not be removed because
|
* @return name of the removed stream thread or empty if a stream thread could not be removed because
|
||||||
* no stream threads are alive
|
* no stream threads are alive
|
||||||
*/
|
*/
|
||||||
|
@ -1201,10 +1200,8 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
final boolean callingThreadIsNotCurrentStreamThread = !streamThread.getName().equals(Thread.currentThread().getName());
|
final boolean callingThreadIsNotCurrentStreamThread = !streamThread.getName().equals(Thread.currentThread().getName());
|
||||||
if (streamThread.isThreadAlive() && (callingThreadIsNotCurrentStreamThread || numLiveStreamThreads() == 1)) {
|
if (streamThread.isThreadAlive() && (callingThreadIsNotCurrentStreamThread || numLiveStreamThreads() == 1)) {
|
||||||
log.info("Removing StreamThread {}", streamThread.getName());
|
log.info("Removing StreamThread {}", streamThread.getName());
|
||||||
final Optional<String> groupInstanceID = streamThread.groupInstanceID();
|
streamThread.shutdown(true);
|
||||||
streamThread.requestLeaveGroupDuringShutdown();
|
if (callingThreadIsNotCurrentStreamThread) {
|
||||||
streamThread.shutdown();
|
|
||||||
if (!streamThread.getName().equals(Thread.currentThread().getName())) {
|
|
||||||
final long remainingTimeMs = timeoutMs - (time.milliseconds() - startMs);
|
final long remainingTimeMs = timeoutMs - (time.milliseconds() - startMs);
|
||||||
if (remainingTimeMs <= 0 || !streamThread.waitOnThreadState(StreamThread.State.DEAD, remainingTimeMs)) {
|
if (remainingTimeMs <= 0 || !streamThread.waitOnThreadState(StreamThread.State.DEAD, remainingTimeMs)) {
|
||||||
log.warn("{} did not shutdown in the allotted time.", streamThread.getName());
|
log.warn("{} did not shutdown in the allotted time.", streamThread.getName());
|
||||||
|
@ -1224,46 +1221,6 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
final long cacheSizePerThread = cacheSizePerThread(numLiveStreamThreads());
|
final long cacheSizePerThread = cacheSizePerThread(numLiveStreamThreads());
|
||||||
log.info("Resizing thread cache due to thread removal, new cache size per thread is {}", cacheSizePerThread);
|
log.info("Resizing thread cache due to thread removal, new cache size per thread is {}", cacheSizePerThread);
|
||||||
resizeThreadCache(cacheSizePerThread);
|
resizeThreadCache(cacheSizePerThread);
|
||||||
if (groupInstanceID.isPresent() && callingThreadIsNotCurrentStreamThread) {
|
|
||||||
final MemberToRemove memberToRemove = new MemberToRemove(groupInstanceID.get());
|
|
||||||
final Collection<MemberToRemove> membersToRemove = Collections.singletonList(memberToRemove);
|
|
||||||
final RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroupResult =
|
|
||||||
adminClient.removeMembersFromConsumerGroup(
|
|
||||||
applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG),
|
|
||||||
new RemoveMembersFromConsumerGroupOptions(membersToRemove)
|
|
||||||
);
|
|
||||||
try {
|
|
||||||
final long remainingTimeMs = timeoutMs - (time.milliseconds() - startMs);
|
|
||||||
removeMembersFromConsumerGroupResult.memberResult(memberToRemove).get(remainingTimeMs, TimeUnit.MILLISECONDS);
|
|
||||||
} catch (final java.util.concurrent.TimeoutException exception) {
|
|
||||||
log.error(
|
|
||||||
String.format(
|
|
||||||
"Could not remove static member %s from consumer group %s due to a timeout:",
|
|
||||||
groupInstanceID.get(),
|
|
||||||
applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG)
|
|
||||||
),
|
|
||||||
exception
|
|
||||||
);
|
|
||||||
throw new TimeoutException(exception.getMessage(), exception);
|
|
||||||
} catch (final InterruptedException e) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
} catch (final ExecutionException exception) {
|
|
||||||
log.error(
|
|
||||||
String.format(
|
|
||||||
"Could not remove static member %s from consumer group %s due to:",
|
|
||||||
groupInstanceID.get(),
|
|
||||||
applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG)
|
|
||||||
),
|
|
||||||
exception
|
|
||||||
);
|
|
||||||
throw new StreamsException(
|
|
||||||
"Could not remove static member " + groupInstanceID.get()
|
|
||||||
+ " from consumer group " + applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG)
|
|
||||||
+ " for the following reason: ",
|
|
||||||
exception.getCause()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
final long remainingTimeMs = timeoutMs - (time.milliseconds() - startMs);
|
final long remainingTimeMs = timeoutMs - (time.milliseconds() - startMs);
|
||||||
if (remainingTimeMs <= 0) {
|
if (remainingTimeMs <= 0) {
|
||||||
throw new TimeoutException("Thread " + streamThread.getName() + " did not stop in the allotted time");
|
throw new TimeoutException("Thread " + streamThread.getName() + " did not stop in the allotted time");
|
||||||
|
@ -1496,7 +1453,7 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
return new Thread(() -> {
|
return new Thread(() -> {
|
||||||
// notify all the threads to stop; avoid deadlocks by stopping any
|
// notify all the threads to stop; avoid deadlocks by stopping any
|
||||||
// further state reports from the thread since we're shutting down
|
// further state reports from the thread since we're shutting down
|
||||||
int numStreamThreads = processStreamThread(StreamThread::shutdown);
|
int numStreamThreads = processStreamThread(streamThread -> streamThread.shutdown(leaveGroup));
|
||||||
|
|
||||||
log.info("Shutting down {} stream threads", numStreamThreads);
|
log.info("Shutting down {} stream threads", numStreamThreads);
|
||||||
|
|
||||||
|
@ -1516,10 +1473,6 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (leaveGroup) {
|
|
||||||
processStreamThread(streamThreadLeaveConsumerGroup(timeoutMs));
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Shutdown {} stream threads complete", numStreamThreads);
|
log.info("Shutdown {} stream threads complete", numStreamThreads);
|
||||||
|
|
||||||
if (globalStreamThread != null) {
|
if (globalStreamThread != null) {
|
||||||
|
@ -1659,33 +1612,6 @@ public class KafkaStreams implements AutoCloseable {
|
||||||
return close(Optional.of(timeoutMs), options.leaveGroup);
|
return close(Optional.of(timeoutMs), options.leaveGroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Consumer<StreamThread> streamThreadLeaveConsumerGroup(final long remainingTimeMs) {
|
|
||||||
return thread -> {
|
|
||||||
final Optional<String> groupInstanceId = thread.groupInstanceID();
|
|
||||||
if (groupInstanceId.isPresent()) {
|
|
||||||
log.debug("Sending leave group trigger to removing instance from consumer group: {}.",
|
|
||||||
groupInstanceId.get());
|
|
||||||
final MemberToRemove memberToRemove = new MemberToRemove(groupInstanceId.get());
|
|
||||||
final Collection<MemberToRemove> membersToRemove = Collections.singletonList(memberToRemove);
|
|
||||||
|
|
||||||
final RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroupResult = adminClient
|
|
||||||
.removeMembersFromConsumerGroup(
|
|
||||||
applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG),
|
|
||||||
new RemoveMembersFromConsumerGroupOptions(membersToRemove)
|
|
||||||
);
|
|
||||||
|
|
||||||
try {
|
|
||||||
removeMembersFromConsumerGroupResult.memberResult(memberToRemove)
|
|
||||||
.get(remainingTimeMs, TimeUnit.MILLISECONDS);
|
|
||||||
} catch (final Exception e) {
|
|
||||||
final String msg = String.format("Could not remove static member %s from consumer group %s.",
|
|
||||||
groupInstanceId.get(), applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG));
|
|
||||||
log.error(msg, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do a cleanup of the local {@link StateStore} directory ({@link StreamsConfig#STATE_DIR_CONFIG}) by deleting all
|
* Do a cleanup of the local {@link StateStore} directory ({@link StreamsConfig#STATE_DIR_CONFIG}) by deleting all
|
||||||
* data with regard to the {@link StreamsConfig#APPLICATION_ID_CONFIG application ID}.
|
* data with regard to the {@link StreamsConfig#APPLICATION_ID_CONFIG application ID}.
|
||||||
|
|
|
@ -287,66 +287,6 @@ public class StreamsConfig extends AbstractConfig {
|
||||||
OPTIMIZE, NO_OPTIMIZATION, REUSE_KTABLE_SOURCE_TOPICS, MERGE_REPARTITION_TOPICS,
|
OPTIMIZE, NO_OPTIMIZATION, REUSE_KTABLE_SOURCE_TOPICS, MERGE_REPARTITION_TOPICS,
|
||||||
SINGLE_STORE_SELF_JOIN);
|
SINGLE_STORE_SELF_JOIN);
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 0.10.0.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_0100 = UpgradeFromValues.UPGRADE_FROM_0100.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 0.10.1.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_0101 = UpgradeFromValues.UPGRADE_FROM_0101.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 0.10.2.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_0102 = UpgradeFromValues.UPGRADE_FROM_0102.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 0.11.0.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_0110 = UpgradeFromValues.UPGRADE_FROM_0110.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 1.0.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_10 = UpgradeFromValues.UPGRADE_FROM_10.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 1.1.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_11 = UpgradeFromValues.UPGRADE_FROM_11.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.0.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_20 = UpgradeFromValues.UPGRADE_FROM_20.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.1.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_21 = UpgradeFromValues.UPGRADE_FROM_21.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.2.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_22 = UpgradeFromValues.UPGRADE_FROM_22.toString();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.3.x}.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("WeakerAccess")
|
|
||||||
public static final String UPGRADE_FROM_23 = UpgradeFromValues.UPGRADE_FROM_23.toString();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.4.x}.
|
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.4.x}.
|
||||||
*/
|
*/
|
||||||
|
@ -1315,7 +1255,6 @@ public class StreamsConfig extends AbstractConfig {
|
||||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000",
|
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000",
|
||||||
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
|
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
|
||||||
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false",
|
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false",
|
||||||
"internal.leave.group.on.close", false,
|
|
||||||
ConsumerConfig.GROUP_PROTOCOL_CONFIG, "classic"
|
ConsumerConfig.GROUP_PROTOCOL_CONFIG, "classic"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -17,16 +17,6 @@
|
||||||
package org.apache.kafka.streams.internals;
|
package org.apache.kafka.streams.internals;
|
||||||
|
|
||||||
public enum UpgradeFromValues {
|
public enum UpgradeFromValues {
|
||||||
UPGRADE_FROM_0100("0.10.0"),
|
|
||||||
UPGRADE_FROM_0101("0.10.1"),
|
|
||||||
UPGRADE_FROM_0102("0.10.2"),
|
|
||||||
UPGRADE_FROM_0110("0.11.0"),
|
|
||||||
UPGRADE_FROM_10("1.0"),
|
|
||||||
UPGRADE_FROM_11("1.1"),
|
|
||||||
UPGRADE_FROM_20("2.0"),
|
|
||||||
UPGRADE_FROM_21("2.1"),
|
|
||||||
UPGRADE_FROM_22("2.2"),
|
|
||||||
UPGRADE_FROM_23("2.3"),
|
|
||||||
UPGRADE_FROM_24("2.4"),
|
UPGRADE_FROM_24("2.4"),
|
||||||
UPGRADE_FROM_25("2.5"),
|
UPGRADE_FROM_25("2.5"),
|
||||||
UPGRADE_FROM_26("2.6"),
|
UPGRADE_FROM_26("2.6"),
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class OpenIterators {
|
||||||
|
|
||||||
public void remove(final MeteredIterator iterator) {
|
public void remove(final MeteredIterator iterator) {
|
||||||
if (openIterators.size() == 1) {
|
if (openIterators.size() == 1) {
|
||||||
streamsMetrics.removeMetric(metricName);
|
streamsMetrics.removeStoreLevelMetric(metricName);
|
||||||
}
|
}
|
||||||
openIterators.remove(iterator);
|
openIterators.remove(iterator);
|
||||||
updateOldestStartTimestamp();
|
updateOldestStartTimestamp();
|
||||||
|
|
|
@ -58,16 +58,6 @@ public class ChangedSerializer<T> implements Serializer<Change<T>>, WrappingNull
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
||||||
case UPGRADE_FROM_0100:
|
|
||||||
case UPGRADE_FROM_0101:
|
|
||||||
case UPGRADE_FROM_0102:
|
|
||||||
case UPGRADE_FROM_0110:
|
|
||||||
case UPGRADE_FROM_10:
|
|
||||||
case UPGRADE_FROM_11:
|
|
||||||
case UPGRADE_FROM_20:
|
|
||||||
case UPGRADE_FROM_21:
|
|
||||||
case UPGRADE_FROM_22:
|
|
||||||
case UPGRADE_FROM_23:
|
|
||||||
case UPGRADE_FROM_24:
|
case UPGRADE_FROM_24:
|
||||||
case UPGRADE_FROM_25:
|
case UPGRADE_FROM_25:
|
||||||
case UPGRADE_FROM_26:
|
case UPGRADE_FROM_26:
|
||||||
|
|
|
@ -100,16 +100,6 @@ public class KTableRepartitionMap<K, V, K1, V1> implements KTableRepartitionMapS
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
||||||
case UPGRADE_FROM_0100:
|
|
||||||
case UPGRADE_FROM_0101:
|
|
||||||
case UPGRADE_FROM_0102:
|
|
||||||
case UPGRADE_FROM_0110:
|
|
||||||
case UPGRADE_FROM_10:
|
|
||||||
case UPGRADE_FROM_11:
|
|
||||||
case UPGRADE_FROM_20:
|
|
||||||
case UPGRADE_FROM_21:
|
|
||||||
case UPGRADE_FROM_22:
|
|
||||||
case UPGRADE_FROM_23:
|
|
||||||
case UPGRADE_FROM_24:
|
case UPGRADE_FROM_24:
|
||||||
case UPGRADE_FROM_25:
|
case UPGRADE_FROM_25:
|
||||||
case UPGRADE_FROM_26:
|
case UPGRADE_FROM_26:
|
||||||
|
|
|
@ -77,16 +77,6 @@ public class SubscriptionWrapperSerde<KLeft> extends WrappingNullableSerde<Subsc
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
switch (UpgradeFromValues.fromString((String) upgradeFrom)) {
|
||||||
case UPGRADE_FROM_0100:
|
|
||||||
case UPGRADE_FROM_0101:
|
|
||||||
case UPGRADE_FROM_0102:
|
|
||||||
case UPGRADE_FROM_0110:
|
|
||||||
case UPGRADE_FROM_10:
|
|
||||||
case UPGRADE_FROM_11:
|
|
||||||
case UPGRADE_FROM_20:
|
|
||||||
case UPGRADE_FROM_21:
|
|
||||||
case UPGRADE_FROM_22:
|
|
||||||
case UPGRADE_FROM_23:
|
|
||||||
case UPGRADE_FROM_24:
|
case UPGRADE_FROM_24:
|
||||||
case UPGRADE_FROM_25:
|
case UPGRADE_FROM_25:
|
||||||
case UPGRADE_FROM_26:
|
case UPGRADE_FROM_26:
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.kafka.streams.processor.StateRestoreListener;
|
||||||
import org.apache.kafka.streams.processor.TaskId;
|
import org.apache.kafka.streams.processor.TaskId;
|
||||||
import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata;
|
import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata;
|
||||||
import org.apache.kafka.streams.processor.internals.Task.TaskType;
|
import org.apache.kafka.streams.processor.internals.Task.TaskType;
|
||||||
|
import org.apache.kafka.streams.state.internals.MeteredStateStore;
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
|
||||||
|
@ -138,6 +139,8 @@ public class StoreChangelogReader implements ChangelogReader {
|
||||||
// either due to limit offset (standby) or committed end offset (active)
|
// either due to limit offset (standby) or committed end offset (active)
|
||||||
private int bufferedLimitIndex;
|
private int bufferedLimitIndex;
|
||||||
|
|
||||||
|
private long restoreStartTimeNs;
|
||||||
|
|
||||||
private ChangelogMetadata(final StateStoreMetadata storeMetadata, final ProcessorStateManager stateManager) {
|
private ChangelogMetadata(final StateStoreMetadata storeMetadata, final ProcessorStateManager stateManager) {
|
||||||
this.changelogState = ChangelogState.REGISTERED;
|
this.changelogState = ChangelogState.REGISTERED;
|
||||||
this.storeMetadata = storeMetadata;
|
this.storeMetadata = storeMetadata;
|
||||||
|
@ -188,6 +191,10 @@ public class StoreChangelogReader implements ChangelogReader {
|
||||||
int bufferedLimitIndex() {
|
int bufferedLimitIndex() {
|
||||||
return bufferedLimitIndex;
|
return bufferedLimitIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long calculateRestoreTime(final long restoreEndTimeNs) {
|
||||||
|
return restoreEndTimeNs - restoreStartTimeNs;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final long DEFAULT_OFFSET_UPDATE_MS = Duration.ofMinutes(5L).toMillis();
|
private static final long DEFAULT_OFFSET_UPDATE_MS = Duration.ofMinutes(5L).toMillis();
|
||||||
|
@ -695,6 +702,9 @@ public class StoreChangelogReader implements ChangelogReader {
|
||||||
|
|
||||||
changelogMetadata.transitTo(ChangelogState.COMPLETED);
|
changelogMetadata.transitTo(ChangelogState.COMPLETED);
|
||||||
pauseChangelogsFromRestoreConsumer(Collections.singleton(partition));
|
pauseChangelogsFromRestoreConsumer(Collections.singleton(partition));
|
||||||
|
if (storeMetadata.store() instanceof MeteredStateStore) {
|
||||||
|
((MeteredStateStore) storeMetadata.store()).recordRestoreTime(changelogMetadata.calculateRestoreTime(time.nanoseconds()));
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
stateRestoreListener.onRestoreEnd(partition, storeName, changelogMetadata.totalRestored);
|
stateRestoreListener.onRestoreEnd(partition, storeName, changelogMetadata.totalRestored);
|
||||||
|
@ -1026,6 +1036,7 @@ public class StoreChangelogReader implements ChangelogReader {
|
||||||
// no records to restore; in this case we just initialize the sensor to zero
|
// no records to restore; in this case we just initialize the sensor to zero
|
||||||
final long recordsToRestore = Math.max(changelogMetadata.restoreEndOffset - startOffset, 0L);
|
final long recordsToRestore = Math.max(changelogMetadata.restoreEndOffset - startOffset, 0L);
|
||||||
task.recordRestoration(time, recordsToRestore, true);
|
task.recordRestoration(time, recordsToRestore, true);
|
||||||
|
changelogMetadata.restoreStartTimeNs = time.nanoseconds();
|
||||||
} else if (changelogMetadata.stateManager.taskType() == TaskType.STANDBY) {
|
} else if (changelogMetadata.stateManager.taskType() == TaskType.STANDBY) {
|
||||||
try {
|
try {
|
||||||
standbyUpdateListener.onUpdateStart(partition, storeName, startOffset);
|
standbyUpdateListener.onUpdateStart(partition, storeName, startOffset);
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
package org.apache.kafka.streams.processor.internals;
|
package org.apache.kafka.streams.processor.internals;
|
||||||
|
|
||||||
import org.apache.kafka.clients.admin.Admin;
|
import org.apache.kafka.clients.admin.Admin;
|
||||||
|
import org.apache.kafka.clients.consumer.CloseOptions;
|
||||||
|
import org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||||
|
@ -95,6 +97,8 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.function.BiConsumer;
|
import java.util.function.BiConsumer;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.LEAVE_GROUP;
|
||||||
|
import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP;
|
||||||
import static org.apache.kafka.streams.internals.StreamsConfigUtils.eosEnabled;
|
import static org.apache.kafka.streams.internals.StreamsConfigUtils.eosEnabled;
|
||||||
import static org.apache.kafka.streams.processor.internals.ClientUtils.adminClientId;
|
import static org.apache.kafka.streams.processor.internals.ClientUtils.adminClientId;
|
||||||
import static org.apache.kafka.streams.processor.internals.ClientUtils.consumerClientId;
|
import static org.apache.kafka.streams.processor.internals.ClientUtils.consumerClientId;
|
||||||
|
@ -894,7 +898,7 @@ public class StreamThread extends Thread implements ProcessingThread {
|
||||||
cleanRun = runLoop();
|
cleanRun = runLoop();
|
||||||
} catch (final Throwable e) {
|
} catch (final Throwable e) {
|
||||||
failedStreamThreadSensor.record();
|
failedStreamThreadSensor.record();
|
||||||
requestLeaveGroupDuringShutdown();
|
leaveGroupRequested.set(true);
|
||||||
streamsUncaughtExceptionHandler.accept(e, false);
|
streamsUncaughtExceptionHandler.accept(e, false);
|
||||||
// Note: the above call currently rethrows the exception, so nothing below this line will be executed
|
// Note: the above call currently rethrows the exception, so nothing below this line will be executed
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1874,10 +1878,13 @@ public class StreamThread extends Thread implements ProcessingThread {
|
||||||
* <p>
|
* <p>
|
||||||
* Note that there is nothing to prevent this function from being called multiple times
|
* Note that there is nothing to prevent this function from being called multiple times
|
||||||
* (e.g., in testing), hence the state is set only the first time
|
* (e.g., in testing), hence the state is set only the first time
|
||||||
|
*
|
||||||
|
* @param leaveGroup this flag will control whether the consumer will leave the group on close or not
|
||||||
*/
|
*/
|
||||||
public void shutdown() {
|
public void shutdown(final boolean leaveGroup) {
|
||||||
log.info("Informed to shut down");
|
log.info("Informed to shut down");
|
||||||
final State oldState = setState(State.PENDING_SHUTDOWN);
|
final State oldState = setState(State.PENDING_SHUTDOWN);
|
||||||
|
leaveGroupRequested.set(leaveGroup);
|
||||||
if (oldState == State.CREATED) {
|
if (oldState == State.CREATED) {
|
||||||
// The thread may not have been started. Take responsibility for shutting down
|
// The thread may not have been started. Take responsibility for shutting down
|
||||||
completeShutdown(true);
|
completeShutdown(true);
|
||||||
|
@ -1910,18 +1917,13 @@ public class StreamThread extends Thread implements ProcessingThread {
|
||||||
log.error("Failed to close changelog reader due to the following error:", e);
|
log.error("Failed to close changelog reader due to the following error:", e);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
if (leaveGroupRequested.get()) {
|
final GroupMembershipOperation membershipOperation = leaveGroupRequested.get() ? LEAVE_GROUP : REMAIN_IN_GROUP;
|
||||||
mainConsumer.unsubscribe();
|
mainConsumer.close(CloseOptions.groupMembershipOperation(membershipOperation));
|
||||||
}
|
|
||||||
} catch (final Throwable e) {
|
|
||||||
log.error("Failed to unsubscribe due to the following error: ", e);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
mainConsumer.close();
|
|
||||||
} catch (final Throwable e) {
|
} catch (final Throwable e) {
|
||||||
log.error("Failed to close consumer due to the following error:", e);
|
log.error("Failed to close consumer due to the following error:", e);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
// restore consumer isn't part of a consumer group so we use REMAIN_IN_GROUP to skip any leaveGroup checks
|
||||||
restoreConsumer.close();
|
restoreConsumer.close();
|
||||||
} catch (final Throwable e) {
|
} catch (final Throwable e) {
|
||||||
log.error("Failed to close restore consumer due to the following error:", e);
|
log.error("Failed to close restore consumer due to the following error:", e);
|
||||||
|
@ -2039,10 +2041,6 @@ public class StreamThread extends Thread implements ProcessingThread {
|
||||||
return groupInstanceID;
|
return groupInstanceID;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void requestLeaveGroupDuringShutdown() {
|
|
||||||
leaveGroupRequested.set(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<MetricName, Metric> producerMetrics() {
|
public Map<MetricName, Metric> producerMetrics() {
|
||||||
return taskManager.producerMetrics();
|
return taskManager.producerMetrics();
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.kafka.common.utils.LogContext;
|
||||||
import org.apache.kafka.common.utils.Utils;
|
import org.apache.kafka.common.utils.Utils;
|
||||||
import org.apache.kafka.streams.StreamsConfig;
|
import org.apache.kafka.streams.StreamsConfig;
|
||||||
import org.apache.kafka.streams.StreamsConfig.InternalConfig;
|
import org.apache.kafka.streams.StreamsConfig.InternalConfig;
|
||||||
import org.apache.kafka.streams.internals.UpgradeFromValues;
|
|
||||||
import org.apache.kafka.streams.processor.assignment.AssignmentConfigs;
|
import org.apache.kafka.streams.processor.assignment.AssignmentConfigs;
|
||||||
import org.apache.kafka.streams.processor.internals.ClientUtils;
|
import org.apache.kafka.streams.processor.internals.ClientUtils;
|
||||||
import org.apache.kafka.streams.processor.internals.InternalTopicManager;
|
import org.apache.kafka.streams.processor.internals.InternalTopicManager;
|
||||||
|
@ -59,8 +58,6 @@ public final class AssignorConfiguration {
|
||||||
final LogContext logContext = new LogContext(logPrefix);
|
final LogContext logContext = new LogContext(logPrefix);
|
||||||
log = logContext.logger(getClass());
|
log = logContext.logger(getClass());
|
||||||
|
|
||||||
validateUpgradeFrom();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
final Object o = configs.get(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR);
|
final Object o = configs.get(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR);
|
||||||
if (o == null) {
|
if (o == null) {
|
||||||
|
@ -94,32 +91,6 @@ public final class AssignorConfiguration {
|
||||||
return referenceContainer;
|
return referenceContainer;
|
||||||
}
|
}
|
||||||
|
|
||||||
// cooperative rebalancing was introduced in 2.4 and the old protocol (eager rebalancing) was removed
|
|
||||||
// in 4.0, meaning live upgrades from 2.3 or below to 4.0+ are no longer possible without a bridge release
|
|
||||||
public void validateUpgradeFrom() {
|
|
||||||
final String upgradeFrom = streamsConfig.getString(StreamsConfig.UPGRADE_FROM_CONFIG);
|
|
||||||
if (upgradeFrom != null) {
|
|
||||||
switch (UpgradeFromValues.fromString(upgradeFrom)) {
|
|
||||||
case UPGRADE_FROM_0100:
|
|
||||||
case UPGRADE_FROM_0101:
|
|
||||||
case UPGRADE_FROM_0102:
|
|
||||||
case UPGRADE_FROM_0110:
|
|
||||||
case UPGRADE_FROM_10:
|
|
||||||
case UPGRADE_FROM_11:
|
|
||||||
case UPGRADE_FROM_20:
|
|
||||||
case UPGRADE_FROM_21:
|
|
||||||
case UPGRADE_FROM_22:
|
|
||||||
case UPGRADE_FROM_23:
|
|
||||||
final String errMsg = String.format(
|
|
||||||
"The eager rebalancing protocol is no longer supported in 4.0 which means live upgrades from 2.3 or below are not possible."
|
|
||||||
+ " Please see the Streams upgrade guide for the bridge releases and recommended upgrade path. Got upgrade.from='%s'", upgradeFrom);
|
|
||||||
log.error(errMsg);
|
|
||||||
throw new ConfigException(errMsg);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public String logPrefix() {
|
public String logPrefix() {
|
||||||
return logPrefix;
|
return logPrefix;
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,12 +41,14 @@ import java.util.Deque;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class StreamsMetricsImpl implements StreamsMetrics {
|
public class StreamsMetricsImpl implements StreamsMetrics {
|
||||||
|
|
||||||
|
@ -339,6 +341,30 @@ public class StreamsMetricsImpl implements StreamsMetrics {
|
||||||
metrics.removeMetric(metricName);
|
metrics.removeMetric(metricName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void removeStoreLevelMetric(final MetricName metricName) {
|
||||||
|
metrics.removeMetric(metricName);
|
||||||
|
|
||||||
|
final List<String> metricsScopeCandidates = metricName.tags().keySet().stream()
|
||||||
|
.filter(tag -> !tag.equals(THREAD_ID_TAG) && !tag.equals(TASK_ID_TAG))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
if (metricsScopeCandidates.size() != 1) {
|
||||||
|
// should never happen
|
||||||
|
throw new IllegalStateException("Expected exactly one metric scope tag, but found " + metricsScopeCandidates);
|
||||||
|
}
|
||||||
|
|
||||||
|
final Deque<MetricName> metricsForStore = storeLevelMetrics.get(
|
||||||
|
storeSensorPrefix(
|
||||||
|
metricName.tags().get(THREAD_ID_TAG),
|
||||||
|
metricName.tags().get(TASK_ID_TAG),
|
||||||
|
metricName.tags().get(metricsScopeCandidates.get(0))
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (metricsForStore != null) {
|
||||||
|
metricsForStore.remove(metricName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public Map<String, String> taskLevelTagMap(final String threadId, final String taskId) {
|
public Map<String, String> taskLevelTagMap(final String threadId, final String taskId) {
|
||||||
final Map<String, String> tagMap = new LinkedHashMap<>();
|
final Map<String, String> tagMap = new LinkedHashMap<>();
|
||||||
tagMap.put(THREAD_ID_TAG, threadId);
|
tagMap.put(THREAD_ID_TAG, threadId);
|
||||||
|
|
|
@ -69,7 +69,7 @@ import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetric
|
||||||
*/
|
*/
|
||||||
public class MeteredKeyValueStore<K, V>
|
public class MeteredKeyValueStore<K, V>
|
||||||
extends WrappedStateStore<KeyValueStore<Bytes, byte[]>, K, V>
|
extends WrappedStateStore<KeyValueStore<Bytes, byte[]>, K, V>
|
||||||
implements KeyValueStore<K, V> {
|
implements KeyValueStore<K, V>, MeteredStateStore {
|
||||||
|
|
||||||
final Serde<K> keySerde;
|
final Serde<K> keySerde;
|
||||||
final Serde<V> valueSerde;
|
final Serde<V> valueSerde;
|
||||||
|
@ -91,6 +91,7 @@ public class MeteredKeyValueStore<K, V>
|
||||||
protected InternalProcessorContext<?, ?> internalContext;
|
protected InternalProcessorContext<?, ?> internalContext;
|
||||||
private StreamsMetricsImpl streamsMetrics;
|
private StreamsMetricsImpl streamsMetrics;
|
||||||
private TaskId taskId;
|
private TaskId taskId;
|
||||||
|
private Sensor restoreSensor;
|
||||||
|
|
||||||
protected OpenIterators openIterators;
|
protected OpenIterators openIterators;
|
||||||
|
|
||||||
|
@ -128,11 +129,10 @@ public class MeteredKeyValueStore<K, V>
|
||||||
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
||||||
|
|
||||||
registerMetrics();
|
registerMetrics();
|
||||||
final Sensor restoreSensor =
|
|
||||||
StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
|
||||||
|
|
||||||
// register and possibly restore the state from the logs
|
restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
||||||
maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor);
|
|
||||||
|
super.init(stateStoreContext, root);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void registerMetrics() {
|
private void registerMetrics() {
|
||||||
|
@ -152,6 +152,11 @@ public class MeteredKeyValueStore<K, V>
|
||||||
openIterators = new OpenIterators(taskId, metricsScope, name(), streamsMetrics);
|
openIterators = new OpenIterators(taskId, metricsScope, name(), streamsMetrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recordRestoreTime(final long restoreTimeNs) {
|
||||||
|
restoreSensor.record(restoreTimeNs);
|
||||||
|
}
|
||||||
|
|
||||||
protected Serde<V> prepareValueSerdeForStore(final Serde<V> valueSerde, final SerdeGetter getter) {
|
protected Serde<V> prepareValueSerdeForStore(final Serde<V> valueSerde, final SerdeGetter getter) {
|
||||||
return WrappingNullableUtils.prepareValueSerde(valueSerde, getter);
|
return WrappingNullableUtils.prepareValueSerde(valueSerde, getter);
|
||||||
}
|
}
|
||||||
|
@ -482,9 +487,11 @@ public class MeteredKeyValueStore<K, V>
|
||||||
private final long startTimestamp;
|
private final long startTimestamp;
|
||||||
private final Function<byte[], V> valueDeserializer;
|
private final Function<byte[], V> valueDeserializer;
|
||||||
|
|
||||||
private MeteredKeyValueTimestampedIterator(final KeyValueIterator<Bytes, byte[]> iter,
|
private MeteredKeyValueTimestampedIterator(
|
||||||
|
final KeyValueIterator<Bytes, byte[]> iter,
|
||||||
final Sensor sensor,
|
final Sensor sensor,
|
||||||
final Function<byte[], V> valueDeserializer) {
|
final Function<byte[], V> valueDeserializer
|
||||||
|
) {
|
||||||
this.iter = iter;
|
this.iter = iter;
|
||||||
this.sensor = sensor;
|
this.sensor = sensor;
|
||||||
this.valueDeserializer = valueDeserializer;
|
this.valueDeserializer = valueDeserializer;
|
||||||
|
|
|
@ -57,7 +57,7 @@ import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetric
|
||||||
|
|
||||||
public class MeteredSessionStore<K, V>
|
public class MeteredSessionStore<K, V>
|
||||||
extends WrappedStateStore<SessionStore<Bytes, byte[]>, Windowed<K>, V>
|
extends WrappedStateStore<SessionStore<Bytes, byte[]>, Windowed<K>, V>
|
||||||
implements SessionStore<K, V> {
|
implements SessionStore<K, V>, MeteredStateStore {
|
||||||
|
|
||||||
private final String metricsScope;
|
private final String metricsScope;
|
||||||
private final Serde<K> keySerde;
|
private final Serde<K> keySerde;
|
||||||
|
@ -73,6 +73,7 @@ public class MeteredSessionStore<K, V>
|
||||||
private Sensor iteratorDurationSensor;
|
private Sensor iteratorDurationSensor;
|
||||||
private InternalProcessorContext<?, ?> internalContext;
|
private InternalProcessorContext<?, ?> internalContext;
|
||||||
private TaskId taskId;
|
private TaskId taskId;
|
||||||
|
private Sensor restoreSensor;
|
||||||
|
|
||||||
private final LongAdder numOpenIterators = new LongAdder();
|
private final LongAdder numOpenIterators = new LongAdder();
|
||||||
private final NavigableSet<MeteredIterator> openIterators = new ConcurrentSkipListSet<>(Comparator.comparingLong(MeteredIterator::startTimestamp));
|
private final NavigableSet<MeteredIterator> openIterators = new ConcurrentSkipListSet<>(Comparator.comparingLong(MeteredIterator::startTimestamp));
|
||||||
|
@ -108,11 +109,9 @@ public class MeteredSessionStore<K, V>
|
||||||
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
||||||
|
|
||||||
registerMetrics();
|
registerMetrics();
|
||||||
final Sensor restoreSensor =
|
restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
||||||
StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
|
||||||
|
|
||||||
// register and possibly restore the state from the logs
|
super.init(stateStoreContext, root);
|
||||||
maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void registerMetrics() {
|
private void registerMetrics() {
|
||||||
|
@ -132,6 +131,11 @@ public class MeteredSessionStore<K, V>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recordRestoreTime(final long restoreTimeNs) {
|
||||||
|
restoreSensor.record(restoreTimeNs);
|
||||||
|
}
|
||||||
|
|
||||||
private void initStoreSerde(final StateStoreContext context) {
|
private void initStoreSerde(final StateStoreContext context) {
|
||||||
final String storeName = name();
|
final String storeName = name();
|
||||||
final String changelogTopic = ProcessorContextUtils.changelogFor(context, storeName, Boolean.FALSE);
|
final String changelogTopic = ProcessorContextUtils.changelogFor(context, storeName, Boolean.FALSE);
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.kafka.streams.state.internals;
|
||||||
|
|
||||||
|
public interface MeteredStateStore {
|
||||||
|
|
||||||
|
void recordRestoreTime(final long restoreTimeNs);
|
||||||
|
}
|
|
@ -60,7 +60,7 @@ import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetric
|
||||||
|
|
||||||
public class MeteredWindowStore<K, V>
|
public class MeteredWindowStore<K, V>
|
||||||
extends WrappedStateStore<WindowStore<Bytes, byte[]>, Windowed<K>, V>
|
extends WrappedStateStore<WindowStore<Bytes, byte[]>, Windowed<K>, V>
|
||||||
implements WindowStore<K, V> {
|
implements WindowStore<K, V>, MeteredStateStore {
|
||||||
|
|
||||||
private final long windowSizeMs;
|
private final long windowSizeMs;
|
||||||
private final String metricsScope;
|
private final String metricsScope;
|
||||||
|
@ -76,6 +76,7 @@ public class MeteredWindowStore<K, V>
|
||||||
private Sensor iteratorDurationSensor;
|
private Sensor iteratorDurationSensor;
|
||||||
private InternalProcessorContext<?, ?> internalContext;
|
private InternalProcessorContext<?, ?> internalContext;
|
||||||
private TaskId taskId;
|
private TaskId taskId;
|
||||||
|
private Sensor restoreSensor;
|
||||||
|
|
||||||
private final LongAdder numOpenIterators = new LongAdder();
|
private final LongAdder numOpenIterators = new LongAdder();
|
||||||
private final NavigableSet<MeteredIterator> openIterators = new ConcurrentSkipListSet<>(Comparator.comparingLong(MeteredIterator::startTimestamp));
|
private final NavigableSet<MeteredIterator> openIterators = new ConcurrentSkipListSet<>(Comparator.comparingLong(MeteredIterator::startTimestamp));
|
||||||
|
@ -124,8 +125,8 @@ public class MeteredWindowStore<K, V>
|
||||||
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics();
|
||||||
|
|
||||||
registerMetrics();
|
registerMetrics();
|
||||||
final Sensor restoreSensor =
|
|
||||||
StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
|
||||||
|
|
||||||
// register and possibly restore the state from the logs
|
// register and possibly restore the state from the logs
|
||||||
maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor);
|
maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor);
|
||||||
|
@ -150,6 +151,11 @@ public class MeteredWindowStore<K, V>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recordRestoreTime(final long restoreTimeNs) {
|
||||||
|
restoreSensor.record(restoreTimeNs);
|
||||||
|
}
|
||||||
|
|
||||||
private void initStoreSerde(final StateStoreContext context) {
|
private void initStoreSerde(final StateStoreContext context) {
|
||||||
final String storeName = name();
|
final String storeName = name();
|
||||||
final String changelogTopic = ProcessorContextUtils.changelogFor(context, storeName, Boolean.FALSE);
|
final String changelogTopic = ProcessorContextUtils.changelogFor(context, storeName, Boolean.FALSE);
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.kafka.streams;
|
||||||
import org.apache.kafka.clients.admin.Admin;
|
import org.apache.kafka.clients.admin.Admin;
|
||||||
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
|
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
|
||||||
import org.apache.kafka.clients.admin.MockAdminClient;
|
import org.apache.kafka.clients.admin.MockAdminClient;
|
||||||
|
import org.apache.kafka.clients.consumer.CloseOptions;
|
||||||
import org.apache.kafka.clients.producer.MockProducer;
|
import org.apache.kafka.clients.producer.MockProducer;
|
||||||
import org.apache.kafka.common.Cluster;
|
import org.apache.kafka.common.Cluster;
|
||||||
import org.apache.kafka.common.KafkaFuture;
|
import org.apache.kafka.common.KafkaFuture;
|
||||||
|
@ -309,8 +310,8 @@ public class KafkaStreamsTest {
|
||||||
|
|
||||||
private void prepareConsumer(final StreamThread thread, final AtomicReference<StreamThread.State> state) {
|
private void prepareConsumer(final StreamThread thread, final AtomicReference<StreamThread.State> state) {
|
||||||
doAnswer(invocation -> {
|
doAnswer(invocation -> {
|
||||||
supplier.consumer.close();
|
supplier.consumer.close(CloseOptions.groupMembershipOperation(CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP));
|
||||||
supplier.restoreConsumer.close();
|
supplier.restoreConsumer.close(CloseOptions.groupMembershipOperation(CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP));
|
||||||
for (final MockProducer<byte[], byte[]> producer : supplier.producers) {
|
for (final MockProducer<byte[], byte[]> producer : supplier.producers) {
|
||||||
producer.close();
|
producer.close();
|
||||||
}
|
}
|
||||||
|
@ -319,7 +320,7 @@ public class KafkaStreamsTest {
|
||||||
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.PENDING_SHUTDOWN, StreamThread.State.RUNNING);
|
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.PENDING_SHUTDOWN, StreamThread.State.RUNNING);
|
||||||
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.DEAD, StreamThread.State.PENDING_SHUTDOWN);
|
threadStateListenerCapture.getValue().onChange(thread, StreamThread.State.DEAD, StreamThread.State.PENDING_SHUTDOWN);
|
||||||
return null;
|
return null;
|
||||||
}).when(thread).shutdown();
|
}).when(thread).shutdown(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void prepareThreadLock(final StreamThread thread) {
|
private void prepareThreadLock(final StreamThread thread) {
|
||||||
|
@ -570,7 +571,7 @@ public class KafkaStreamsTest {
|
||||||
|
|
||||||
for (int i = 0; i < NUM_THREADS; i++) {
|
for (int i = 0; i < NUM_THREADS; i++) {
|
||||||
final StreamThread tmpThread = streams.threads.get(i);
|
final StreamThread tmpThread = streams.threads.get(i);
|
||||||
tmpThread.shutdown();
|
tmpThread.shutdown(false);
|
||||||
waitForCondition(() -> tmpThread.state() == StreamThread.State.DEAD,
|
waitForCondition(() -> tmpThread.state() == StreamThread.State.DEAD,
|
||||||
"Thread never stopped.");
|
"Thread never stopped.");
|
||||||
streams.threads.get(i).join();
|
streams.threads.get(i).join();
|
||||||
|
@ -789,7 +790,7 @@ public class KafkaStreamsTest {
|
||||||
prepareThreadLock(streamThreadTwo);
|
prepareThreadLock(streamThreadTwo);
|
||||||
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
|
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
|
||||||
streams.start();
|
streams.start();
|
||||||
streamThreadOne.shutdown();
|
streamThreadOne.shutdown(true);
|
||||||
final Set<ThreadMetadata> threads = streams.metadataForLocalThreads();
|
final Set<ThreadMetadata> threads = streams.metadataForLocalThreads();
|
||||||
assertThat(threads.size(), equalTo(1));
|
assertThat(threads.size(), equalTo(1));
|
||||||
assertThat(threads, hasItem(streamThreadTwo.threadMetadata()));
|
assertThat(threads, hasItem(streamThreadTwo.threadMetadata()));
|
||||||
|
|
|
@ -597,13 +597,6 @@ public class StreamsConfigTest {
|
||||||
assertEquals("50", returnedProps.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));
|
assertEquals("50", returnedProps.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldSetInternalLeaveGroupOnCloseConfigToFalseInConsumer() {
|
|
||||||
final StreamsConfig streamsConfig = new StreamsConfig(props);
|
|
||||||
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
|
|
||||||
assertThat(consumerConfigs.get("internal.leave.group.on.close"), is(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosDisabled() {
|
public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosDisabled() {
|
||||||
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
|
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
|
||||||
|
|
|
@ -58,11 +58,11 @@ public class OpenIteratorsTest {
|
||||||
assertThat(gauge.value(null, 0), is(2L));
|
assertThat(gauge.value(null, 0), is(2L));
|
||||||
|
|
||||||
openIterators.remove(meteredIterator2);
|
openIterators.remove(meteredIterator2);
|
||||||
verify(streamsMetrics, never()).removeMetric(any());
|
verify(streamsMetrics, never()).removeStoreLevelMetric(any());
|
||||||
assertThat(gauge.value(null, 0), is(5L));
|
assertThat(gauge.value(null, 0), is(5L));
|
||||||
|
|
||||||
openIterators.remove(meteredIterator1);
|
openIterators.remove(meteredIterator1);
|
||||||
verify(streamsMetrics).removeMetric(any());
|
verify(streamsMetrics).removeStoreLevelMetric(any());
|
||||||
assertThat(gauge.value(null, 0), is(5L));
|
assertThat(gauge.value(null, 0), is(5L));
|
||||||
|
|
||||||
openIterators.add(meteredIterator3);
|
openIterators.add(meteredIterator3);
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.kafka.streams.errors.StreamsException;
|
||||||
import org.apache.kafka.streams.processor.StateStore;
|
import org.apache.kafka.streams.processor.StateStore;
|
||||||
import org.apache.kafka.streams.processor.TaskId;
|
import org.apache.kafka.streams.processor.TaskId;
|
||||||
import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata;
|
import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata;
|
||||||
|
import org.apache.kafka.streams.state.internals.MeteredKeyValueStore;
|
||||||
import org.apache.kafka.test.MockStandbyUpdateListener;
|
import org.apache.kafka.test.MockStandbyUpdateListener;
|
||||||
import org.apache.kafka.test.MockStateRestoreListener;
|
import org.apache.kafka.test.MockStateRestoreListener;
|
||||||
import org.apache.kafka.test.StreamsTestUtils;
|
import org.apache.kafka.test.StreamsTestUtils;
|
||||||
|
@ -89,7 +90,9 @@ import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||||
import static org.mockito.ArgumentMatchers.anyLong;
|
import static org.mockito.ArgumentMatchers.anyLong;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.never;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
@ -1364,6 +1367,58 @@ public class StoreChangelogReaderTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldCallRecordRestoreTimeAtTheEndOfRestore() {
|
||||||
|
setupActiveStateManager();
|
||||||
|
|
||||||
|
final MeteredKeyValueStore<?, ?> meteredStateStore = mock(MeteredKeyValueStore.class);
|
||||||
|
|
||||||
|
when(storeMetadata.changelogPartition()).thenReturn(tp);
|
||||||
|
when(storeMetadata.store()).thenReturn(meteredStateStore);
|
||||||
|
when(meteredStateStore.name()).thenReturn(storeName);
|
||||||
|
final TaskId taskId = new TaskId(0, 0);
|
||||||
|
|
||||||
|
when(storeMetadata.offset()).thenReturn(0L);
|
||||||
|
when(activeStateManager.taskId()).thenReturn(taskId);
|
||||||
|
|
||||||
|
setupConsumer(2, tp);
|
||||||
|
consumer.updateEndOffsets(Collections.singletonMap(tp, 2L));
|
||||||
|
adminClient.updateEndOffsets(Collections.singletonMap(tp, 2L));
|
||||||
|
|
||||||
|
changelogReader.register(tp, activeStateManager);
|
||||||
|
|
||||||
|
changelogReader.restore(Collections.singletonMap(taskId, mock(Task.class)));
|
||||||
|
|
||||||
|
assertEquals(1L, changelogReader.changelogMetadata(tp).totalRestored());
|
||||||
|
verify(meteredStateStore).recordRestoreTime(anyLong());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldNotCallRecordRestoreTimeIfRestoreDoesNotComplete() {
|
||||||
|
setupActiveStateManager();
|
||||||
|
|
||||||
|
final MeteredKeyValueStore<?, ?> meteredStateStore = mock(MeteredKeyValueStore.class);
|
||||||
|
|
||||||
|
when(storeMetadata.changelogPartition()).thenReturn(tp);
|
||||||
|
when(storeMetadata.store()).thenReturn(meteredStateStore);
|
||||||
|
when(meteredStateStore.name()).thenReturn(storeName);
|
||||||
|
final TaskId taskId = new TaskId(0, 0);
|
||||||
|
|
||||||
|
when(storeMetadata.offset()).thenReturn(0L);
|
||||||
|
when(activeStateManager.taskId()).thenReturn(taskId);
|
||||||
|
|
||||||
|
setupConsumer(2, tp);
|
||||||
|
consumer.updateEndOffsets(Collections.singletonMap(tp, 3L));
|
||||||
|
adminClient.updateEndOffsets(Collections.singletonMap(tp, 3L));
|
||||||
|
|
||||||
|
changelogReader.register(tp, activeStateManager);
|
||||||
|
|
||||||
|
changelogReader.restore(Collections.singletonMap(taskId, mock(Task.class)));
|
||||||
|
|
||||||
|
assertEquals(1L, changelogReader.changelogMetadata(tp).totalRestored());
|
||||||
|
verify(meteredStateStore, never()).recordRestoreTime(anyLong());
|
||||||
|
}
|
||||||
|
|
||||||
private void setupConsumer(final long messages, final TopicPartition topicPartition) {
|
private void setupConsumer(final long messages, final TopicPartition topicPartition) {
|
||||||
assignPartition(messages, topicPartition);
|
assignPartition(messages, topicPartition);
|
||||||
addRecords(messages, topicPartition);
|
addRecords(messages, topicPartition);
|
||||||
|
|
|
@ -247,7 +247,7 @@ public class StreamThreadTest {
|
||||||
if (thread.state() != State.CREATED) {
|
if (thread.state() != State.CREATED) {
|
||||||
thread.taskManager().shutdown(false);
|
thread.taskManager().shutdown(false);
|
||||||
}
|
}
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
thread = null;
|
thread = null;
|
||||||
}
|
}
|
||||||
final Set<Thread> t = Collections.unmodifiableSet(Thread.getAllStackTraces().keySet());
|
final Set<Thread> t = Collections.unmodifiableSet(Thread.getAllStackTraces().keySet());
|
||||||
|
@ -409,7 +409,7 @@ public class StreamThreadTest {
|
||||||
assertEquals(4, stateListener.numChanges);
|
assertEquals(4, stateListener.numChanges);
|
||||||
assertEquals(StreamThread.State.PARTITIONS_ASSIGNED, stateListener.oldState);
|
assertEquals(StreamThread.State.PARTITIONS_ASSIGNED, stateListener.oldState);
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
assertSame(StreamThread.State.PENDING_SHUTDOWN, thread.state());
|
assertSame(StreamThread.State.PENDING_SHUTDOWN, thread.state());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,13 +427,13 @@ public class StreamThreadTest {
|
||||||
10 * 1000,
|
10 * 1000,
|
||||||
"Thread never started.");
|
"Thread never started.");
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
TestUtils.waitForCondition(
|
TestUtils.waitForCondition(
|
||||||
() -> thread.state() == StreamThread.State.DEAD,
|
() -> thread.state() == StreamThread.State.DEAD,
|
||||||
10 * 1000,
|
10 * 1000,
|
||||||
"Thread never shut down.");
|
"Thread never shut down.");
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
assertEquals(thread.state(), StreamThread.State.DEAD);
|
assertEquals(thread.state(), StreamThread.State.DEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -812,7 +812,7 @@ public class StreamThreadTest {
|
||||||
10 * 1000,
|
10 * 1000,
|
||||||
"Thread never started.");
|
"Thread never started.");
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
TestUtils.waitForCondition(
|
TestUtils.waitForCondition(
|
||||||
() -> thread.state() == StreamThread.State.DEAD,
|
() -> thread.state() == StreamThread.State.DEAD,
|
||||||
10 * 1000,
|
10 * 1000,
|
||||||
|
@ -880,7 +880,7 @@ public class StreamThreadTest {
|
||||||
() -> { }
|
() -> { }
|
||||||
);
|
);
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
|
|
||||||
// Validate that the scheduled rebalance wasn't reset then set to MAX_VALUE so we
|
// Validate that the scheduled rebalance wasn't reset then set to MAX_VALUE so we
|
||||||
// don't trigger one before we can shut down, since the rebalance must be ended
|
// don't trigger one before we can shut down, since the rebalance must be ended
|
||||||
|
@ -1390,7 +1390,7 @@ public class StreamThreadTest {
|
||||||
10 * 1000,
|
10 * 1000,
|
||||||
"Thread never started.");
|
"Thread never started.");
|
||||||
|
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
|
|
||||||
// even if thread is no longer running, it should still be polling
|
// even if thread is no longer running, it should still be polling
|
||||||
// as long as the rebalance is still ongoing
|
// as long as the rebalance is still ongoing
|
||||||
|
@ -1426,7 +1426,7 @@ public class StreamThreadTest {
|
||||||
thread.setStateListener(
|
thread.setStateListener(
|
||||||
(t, newState, oldState) -> {
|
(t, newState, oldState) -> {
|
||||||
if (oldState == StreamThread.State.CREATED && newState == StreamThread.State.STARTING) {
|
if (oldState == StreamThread.State.CREATED && newState == StreamThread.State.STARTING) {
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
thread.run();
|
thread.run();
|
||||||
|
@ -1524,7 +1524,7 @@ public class StreamThreadTest {
|
||||||
topologyMetadata.buildAndRewriteTopology();
|
topologyMetadata.buildAndRewriteTopology();
|
||||||
thread = buildStreamThread(consumer, taskManager, config, topologyMetadata)
|
thread = buildStreamThread(consumer, taskManager, config, topologyMetadata)
|
||||||
.updateThreadMetadata(adminClientId(CLIENT_ID));
|
.updateThreadMetadata(adminClientId(CLIENT_ID));
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
|
|
||||||
verify(taskManager).shutdown(true);
|
verify(taskManager).shutdown(true);
|
||||||
}
|
}
|
||||||
|
@ -1542,7 +1542,7 @@ public class StreamThreadTest {
|
||||||
topologyMetadata.buildAndRewriteTopology();
|
topologyMetadata.buildAndRewriteTopology();
|
||||||
thread = buildStreamThread(consumer, taskManager, config, topologyMetadata)
|
thread = buildStreamThread(consumer, taskManager, config, topologyMetadata)
|
||||||
.updateThreadMetadata(adminClientId(CLIENT_ID));
|
.updateThreadMetadata(adminClientId(CLIENT_ID));
|
||||||
thread.shutdown();
|
thread.shutdown(true);
|
||||||
// Execute the run method. Verification of the mock will check that shutdown was only done once
|
// Execute the run method. Verification of the mock will check that shutdown was only done once
|
||||||
thread.run();
|
thread.run();
|
||||||
|
|
||||||
|
@ -2812,7 +2812,6 @@ public class StreamThreadTest {
|
||||||
assertThat(exceptionHandlerInvoked.get(), is(true));
|
assertThat(exceptionHandlerInvoked.get(), is(true));
|
||||||
|
|
||||||
verify(consumer).subscribe((Collection<String>) any(), any());
|
verify(consumer).subscribe((Collection<String>) any(), any());
|
||||||
verify(consumer).unsubscribe();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
|
|
|
@ -570,28 +570,6 @@ public class StreamsPartitionAssignorTest {
|
||||||
assertThat(interleavedTaskIds, equalTo(assignment));
|
assertThat(interleavedTaskIds, equalTo(assignment));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
|
||||||
@MethodSource("parameter")
|
|
||||||
public void shouldThrowOnEagerSubscription(final Map<String, Object> parameterizedConfig) {
|
|
||||||
setUp(parameterizedConfig, false);
|
|
||||||
builder.addSource(null, "source1", null, null, null, "topic1");
|
|
||||||
builder.addSource(null, "source2", null, null, null, "topic2");
|
|
||||||
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
|
|
||||||
|
|
||||||
final Set<TaskId> prevTasks = Set.of(
|
|
||||||
new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1)
|
|
||||||
);
|
|
||||||
final Set<TaskId> standbyTasks = Set.of(
|
|
||||||
new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2)
|
|
||||||
);
|
|
||||||
|
|
||||||
createMockTaskManager(prevTasks, standbyTasks);
|
|
||||||
assertThrows(
|
|
||||||
ConfigException.class,
|
|
||||||
() -> configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.UPGRADE_FROM_CONFIG, StreamsConfig.UPGRADE_FROM_23), parameterizedConfig)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
@MethodSource("parameter")
|
@MethodSource("parameter")
|
||||||
public void testCooperativeSubscription(final Map<String, Object> parameterizedConfig) {
|
public void testCooperativeSubscription(final Map<String, Object> parameterizedConfig) {
|
||||||
|
|
|
@ -58,7 +58,6 @@ import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.nullValue;
|
import static org.hamcrest.CoreMatchers.nullValue;
|
||||||
import static org.hamcrest.MatcherAssert.assertThat;
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
import static org.hamcrest.Matchers.empty;
|
import static org.hamcrest.Matchers.empty;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
|
||||||
import static org.hamcrest.Matchers.not;
|
import static org.hamcrest.Matchers.not;
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||||
|
@ -210,16 +209,19 @@ public class MeteredKeyValueStoreTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void shouldRecordRestoreLatencyOnInit() {
|
public void shouldRecordRestoreLatencyOnRecordRestoreTime() {
|
||||||
setUp();
|
setUp();
|
||||||
doNothing().when(inner).init(context, metered);
|
doNothing().when(inner).init(context, metered);
|
||||||
|
|
||||||
init();
|
init();
|
||||||
|
|
||||||
|
final long restoreTimeNs = 1000L;
|
||||||
|
metered.recordRestoreTime(restoreTimeNs);
|
||||||
|
|
||||||
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
||||||
// and the sensor is tested elsewhere
|
// and the sensor is tested elsewhere
|
||||||
final KafkaMetric metric = metric("restore-rate");
|
final KafkaMetric metric = metric("restore-latency-max");
|
||||||
assertThat((Double) metric.metricValue(), greaterThan(0.0));
|
assertThat((Double) metric.metricValue(), equalTo((double) restoreTimeNs));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -458,14 +458,17 @@ public class MeteredSessionStoreTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void shouldRecordRestoreTimeOnInit() {
|
public void shouldRecordRestoreLatencyOnRecordRestoreTime() {
|
||||||
setUp();
|
setUp();
|
||||||
init();
|
init();
|
||||||
|
|
||||||
|
final long restoreTimeNs = 1000L;
|
||||||
|
store.recordRestoreTime(restoreTimeNs);
|
||||||
|
|
||||||
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
||||||
// and the sensor is tested elsewhere
|
// and the sensor is tested elsewhere
|
||||||
final KafkaMetric metric = metric("restore-rate");
|
final KafkaMetric metric = metric("restore-latency-max");
|
||||||
assertTrue((Double) metric.metricValue() > 0);
|
assertThat((Double) metric.metricValue(), equalTo((double) restoreTimeNs));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -183,13 +183,6 @@ public class MeteredVersionedKeyValueStoreTest {
|
||||||
verify(valueSerializer).serialize(changelogTopicName, VALUE);
|
verify(valueSerializer).serialize(changelogTopicName, VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldRecordMetricsOnInit() {
|
|
||||||
// init is called in setUp(). it suffices to verify one restore metric since all restore
|
|
||||||
// metrics are recorded by the same sensor, and the sensor is tested elsewhere.
|
|
||||||
assertThat((Double) getMetric("restore-rate").metricValue(), greaterThan(0.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void shouldDelegateAndRecordMetricsOnPut() {
|
public void shouldDelegateAndRecordMetricsOnPut() {
|
||||||
when(inner.put(RAW_KEY, RAW_VALUE, TIMESTAMP)).thenReturn(PUT_RETURN_CODE_VALID_TO_UNDEFINED);
|
when(inner.put(RAW_KEY, RAW_VALUE, TIMESTAMP)).thenReturn(PUT_RETURN_CODE_VALID_TO_UNDEFINED);
|
||||||
|
|
|
@ -210,14 +210,19 @@ public class MeteredWindowStoreTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void shouldRecordRestoreLatencyOnInit() {
|
public void shouldRecordRestoreLatencyOnRecordRestoreTime() {
|
||||||
|
setUp();
|
||||||
doNothing().when(innerStoreMock).init(context, store);
|
doNothing().when(innerStoreMock).init(context, store);
|
||||||
|
|
||||||
store.init(context, store);
|
store.init(context, store);
|
||||||
|
|
||||||
|
final long restoreTimeNs = 1000L;
|
||||||
|
store.recordRestoreTime(restoreTimeNs);
|
||||||
|
|
||||||
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
// it suffices to verify one restore metric since all restore metrics are recorded by the same sensor
|
||||||
// and the sensor is tested elsewhere
|
// and the sensor is tested elsewhere
|
||||||
final KafkaMetric metric = metric("restore-rate");
|
final KafkaMetric metric = metric("restore-latency-max");
|
||||||
assertThat((Double) metric.metricValue(), greaterThan(0.0));
|
assertThat((Double) metric.metricValue(), equalTo((double) restoreTimeNs));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue