mirror of https://github.com/apache/kafka.git
KAFKA-15517: Improve MirrorMaker logging in case of authorization errors (#15558)
Reviewers: Mickael Maison <mickael.maison@gmail.com>
This commit is contained in:
parent
f7eb96230d
commit
a1ca788c99
|
@ -43,6 +43,7 @@ import java.util.stream.IntStream;
|
|||
|
||||
import static org.apache.kafka.connect.mirror.Checkpoint.CONSUMER_GROUP_ID_KEY;
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.TOPIC_KEY;
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.adminCall;
|
||||
|
||||
/** Replicate consumer group state between clusters. Emits checkpoint records.
|
||||
*
|
||||
|
@ -216,7 +217,10 @@ public class MirrorCheckpointConnector extends SourceConnector {
|
|||
|
||||
Collection<ConsumerGroupListing> listConsumerGroups()
|
||||
throws InterruptedException, ExecutionException {
|
||||
return sourceAdminClient.listConsumerGroups().valid().get();
|
||||
return adminCall(
|
||||
() -> sourceAdminClient.listConsumerGroups().valid().get(),
|
||||
() -> "list consumer groups on " + config.sourceClusterAlias() + " cluster"
|
||||
);
|
||||
}
|
||||
|
||||
private void createInternalTopics() {
|
||||
|
@ -229,7 +233,11 @@ public class MirrorCheckpointConnector extends SourceConnector {
|
|||
|
||||
Map<TopicPartition, OffsetAndMetadata> listConsumerGroupOffsets(String group)
|
||||
throws InterruptedException, ExecutionException {
|
||||
return sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get();
|
||||
return adminCall(
|
||||
() -> sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get(),
|
||||
() -> String.format("list offsets for consumer group %s on %s cluster", group,
|
||||
config.sourceClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
boolean shouldReplicateByGroupFilter(String group) {
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package org.apache.kafka.connect.mirror;
|
||||
|
||||
import org.apache.kafka.clients.admin.Admin;
|
||||
import org.apache.kafka.clients.admin.AlterConsumerGroupOffsetsResult;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
|
@ -48,6 +47,8 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.time.Duration;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.adminCall;
|
||||
|
||||
/** Emits checkpoints for upstream consumer groups. */
|
||||
public class MirrorCheckpointTask extends SourceTask {
|
||||
|
||||
|
@ -228,7 +229,10 @@ public class MirrorCheckpointTask extends SourceTask {
|
|||
// short circuit if stopping
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
return sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get();
|
||||
return adminCall(
|
||||
() -> sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get(),
|
||||
() -> String.format("list offsets for consumer group %s on %s cluster", group, sourceClusterAlias)
|
||||
);
|
||||
}
|
||||
|
||||
Optional<Checkpoint> checkpoint(String group, TopicPartition topicPartition,
|
||||
|
@ -277,9 +281,11 @@ public class MirrorCheckpointTask extends SourceTask {
|
|||
System.currentTimeMillis() - record.timestamp());
|
||||
}
|
||||
|
||||
private void refreshIdleConsumerGroupOffset() {
|
||||
Map<String, KafkaFuture<ConsumerGroupDescription>> consumerGroupsDesc = targetAdminClient
|
||||
.describeConsumerGroups(consumerGroups).describedGroups();
|
||||
private void refreshIdleConsumerGroupOffset() throws ExecutionException, InterruptedException {
|
||||
Map<String, KafkaFuture<ConsumerGroupDescription>> consumerGroupsDesc = adminCall(
|
||||
() -> targetAdminClient.describeConsumerGroups(consumerGroups).describedGroups(),
|
||||
() -> String.format("describe consumer groups %s on %s cluster", consumerGroups, targetClusterAlias)
|
||||
);
|
||||
|
||||
for (String group : consumerGroups) {
|
||||
try {
|
||||
|
@ -289,8 +295,13 @@ public class MirrorCheckpointTask extends SourceTask {
|
|||
// (1) idle: because the consumer at target is not actively consuming the mirrored topic
|
||||
// (2) dead: the new consumer that is recently created at source and never existed at target
|
||||
if (consumerGroupState == ConsumerGroupState.EMPTY) {
|
||||
idleConsumerGroupsOffset.put(group, targetAdminClient.listConsumerGroupOffsets(group)
|
||||
.partitionsToOffsetAndMetadata().get());
|
||||
idleConsumerGroupsOffset.put(
|
||||
group,
|
||||
adminCall(
|
||||
() -> targetAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get(),
|
||||
() -> String.format("list offsets for consumer group %s on %s cluster", group, targetClusterAlias)
|
||||
)
|
||||
);
|
||||
}
|
||||
// new consumer upstream has state "DEAD" and will be identified during the offset sync-up
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
|
@ -299,7 +310,7 @@ public class MirrorCheckpointTask extends SourceTask {
|
|||
}
|
||||
}
|
||||
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> syncGroupOffset() {
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> syncGroupOffset() throws ExecutionException, InterruptedException {
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> offsetToSyncAll = new HashMap<>();
|
||||
|
||||
// first, sync offsets for the idle consumers at target
|
||||
|
@ -360,20 +371,24 @@ public class MirrorCheckpointTask extends SourceTask {
|
|||
return offsetToSyncAll;
|
||||
}
|
||||
|
||||
void syncGroupOffset(String consumerGroupId, Map<TopicPartition, OffsetAndMetadata> offsetToSync) {
|
||||
void syncGroupOffset(String consumerGroupId, Map<TopicPartition, OffsetAndMetadata> offsetToSync) throws ExecutionException, InterruptedException {
|
||||
if (targetAdminClient != null) {
|
||||
AlterConsumerGroupOffsetsResult result = targetAdminClient.alterConsumerGroupOffsets(consumerGroupId, offsetToSync);
|
||||
result.all().whenComplete((v, throwable) -> {
|
||||
if (throwable != null) {
|
||||
if (throwable.getCause() instanceof UnknownMemberIdException) {
|
||||
log.warn("Unable to sync offsets for consumer group {}. This is likely caused by consumers currently using this group in the target cluster.", consumerGroupId);
|
||||
} else {
|
||||
log.error("Unable to sync offsets for consumer group {}.", consumerGroupId, throwable);
|
||||
}
|
||||
} else {
|
||||
log.trace("Sync-ed {} offsets for consumer group {}.", offsetToSync.size(), consumerGroupId);
|
||||
}
|
||||
});
|
||||
adminCall(
|
||||
() -> targetAdminClient.alterConsumerGroupOffsets(consumerGroupId, offsetToSync).all()
|
||||
.whenComplete((v, throwable) -> {
|
||||
if (throwable != null) {
|
||||
if (throwable.getCause() instanceof UnknownMemberIdException) {
|
||||
log.warn("Unable to sync offsets for consumer group {}. This is likely caused " +
|
||||
"by consumers currently using this group in the target cluster.", consumerGroupId);
|
||||
} else {
|
||||
log.error("Unable to sync offsets for consumer group {}.", consumerGroupId, throwable);
|
||||
}
|
||||
} else {
|
||||
log.trace("Sync-ed {} offsets for consumer group {}.", offsetToSync.size(), consumerGroupId);
|
||||
}
|
||||
}),
|
||||
() -> String.format("alter offsets for consumer group %s on %s cluster", consumerGroupId, targetClusterAlias)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ import org.slf4j.LoggerFactory;
|
|||
import static org.apache.kafka.connect.mirror.MirrorSourceConfig.SYNC_TOPIC_ACLS_ENABLED;
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.SOURCE_CLUSTER_KEY;
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.TOPIC_KEY;
|
||||
import static org.apache.kafka.connect.mirror.MirrorUtils.adminCall;
|
||||
|
||||
/** Replicate data, configuration, and ACLs between clusters.
|
||||
*
|
||||
|
@ -139,9 +140,10 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
}
|
||||
|
||||
// visible for testing
|
||||
MirrorSourceConnector(Admin sourceAdminClient, Admin targetAdminClient) {
|
||||
MirrorSourceConnector(Admin sourceAdminClient, Admin targetAdminClient, MirrorSourceConfig config) {
|
||||
this.sourceAdminClient = sourceAdminClient;
|
||||
this.targetAdminClient = targetAdminClient;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -495,59 +497,83 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
}
|
||||
|
||||
// visible for testing
|
||||
void createNewTopics(Map<String, NewTopic> newTopics) {
|
||||
targetAdminClient.createTopics(newTopics.values(), new CreateTopicsOptions()).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not create topic {}.", k, e);
|
||||
} else {
|
||||
log.info("Created remote topic {} with {} partitions.", k, newTopics.get(k).numPartitions());
|
||||
}
|
||||
}));
|
||||
void createNewTopics(Map<String, NewTopic> newTopics) throws ExecutionException, InterruptedException {
|
||||
adminCall(
|
||||
() -> {
|
||||
targetAdminClient.createTopics(newTopics.values(), new CreateTopicsOptions()).values()
|
||||
.forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not create topic {}.", k, e);
|
||||
} else {
|
||||
log.info("Created remote topic {} with {} partitions.", k, newTopics.get(k).numPartitions());
|
||||
}
|
||||
}));
|
||||
return null;
|
||||
},
|
||||
() -> String.format("create topics %s on %s cluster", newTopics, config.targetClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
void createNewPartitions(Map<String, NewPartitions> newPartitions) {
|
||||
targetAdminClient.createPartitions(newPartitions).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e instanceof InvalidPartitionsException) {
|
||||
// swallow, this is normal
|
||||
} else if (e != null) {
|
||||
log.warn("Could not create topic-partitions for {}.", k, e);
|
||||
} else {
|
||||
log.info("Increased size of {} to {} partitions.", k, newPartitions.get(k).totalCount());
|
||||
}
|
||||
}));
|
||||
void createNewPartitions(Map<String, NewPartitions> newPartitions) throws ExecutionException, InterruptedException {
|
||||
adminCall(
|
||||
() -> {
|
||||
targetAdminClient.createPartitions(newPartitions).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e instanceof InvalidPartitionsException) {
|
||||
// swallow, this is normal
|
||||
} else if (e != null) {
|
||||
log.warn("Could not create topic-partitions for {}.", k, e);
|
||||
} else {
|
||||
log.info("Increased size of {} to {} partitions.", k, newPartitions.get(k).totalCount());
|
||||
}
|
||||
}));
|
||||
return null;
|
||||
},
|
||||
() -> String.format("create partitions %s on %s cluster", newPartitions, config.targetClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
private Set<String> listTopics(Admin adminClient)
|
||||
throws InterruptedException, ExecutionException {
|
||||
return adminClient.listTopics().names().get();
|
||||
return adminCall(
|
||||
() -> adminClient.listTopics().names().get(),
|
||||
() -> "list topics on " + actualClusterAlias(adminClient) + " cluster"
|
||||
);
|
||||
}
|
||||
|
||||
private Optional<Collection<AclBinding>> listTopicAclBindings()
|
||||
throws InterruptedException, ExecutionException {
|
||||
Collection<AclBinding> bindings;
|
||||
try {
|
||||
bindings = sourceAdminClient.describeAcls(ANY_TOPIC_ACL).values().get();
|
||||
} catch (ExecutionException e) {
|
||||
if (e.getCause() instanceof SecurityDisabledException) {
|
||||
if (noAclAuthorizer.compareAndSet(false, true)) {
|
||||
log.info(
|
||||
"No ACL authorizer is configured on the source Kafka cluster, so no topic ACL syncing will take place. "
|
||||
+ "Consider disabling topic ACL syncing by setting " + SYNC_TOPIC_ACLS_ENABLED + " to 'false'."
|
||||
);
|
||||
} else {
|
||||
log.debug("Source-side ACL authorizer still not found; skipping topic ACL sync");
|
||||
}
|
||||
return Optional.empty();
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return Optional.of(bindings);
|
||||
return adminCall(
|
||||
() -> {
|
||||
Collection<AclBinding> bindings;
|
||||
try {
|
||||
bindings = sourceAdminClient.describeAcls(ANY_TOPIC_ACL).values().get();
|
||||
} catch (ExecutionException e) {
|
||||
if (e.getCause() instanceof SecurityDisabledException) {
|
||||
if (noAclAuthorizer.compareAndSet(false, true)) {
|
||||
log.info(
|
||||
"No ACL authorizer is configured on the source Kafka cluster, so no topic ACL syncing will take place. "
|
||||
+ "Consider disabling topic ACL syncing by setting " + SYNC_TOPIC_ACLS_ENABLED + " to 'false'."
|
||||
);
|
||||
} else {
|
||||
log.debug("Source-side ACL authorizer still not found; skipping topic ACL sync");
|
||||
}
|
||||
return Optional.empty();
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return Optional.of(bindings);
|
||||
},
|
||||
() -> "describe ACLs on " + config.sourceClusterAlias() + " cluster"
|
||||
);
|
||||
}
|
||||
|
||||
private static Collection<TopicDescription> describeTopics(Admin adminClient, Collection<String> topics)
|
||||
private Collection<TopicDescription> describeTopics(Admin adminClient, Collection<String> topics)
|
||||
throws InterruptedException, ExecutionException {
|
||||
return adminClient.describeTopics(topics).allTopicNames().get().values();
|
||||
return adminCall(
|
||||
() -> adminClient.describeTopics(topics).allTopicNames().get().values(),
|
||||
() -> String.format("describe topics %s on %s cluster", topics, actualClusterAlias(adminClient))
|
||||
);
|
||||
}
|
||||
|
||||
static Map<String, String> configToMap(Config config) {
|
||||
|
@ -558,20 +584,26 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
// visible for testing
|
||||
// use deprecated alterConfigs API for broker compatibility back to 0.11.0
|
||||
@SuppressWarnings("deprecation")
|
||||
void deprecatedAlterConfigs(Map<String, Config> topicConfigs) {
|
||||
void deprecatedAlterConfigs(Map<String, Config> topicConfigs) throws ExecutionException, InterruptedException {
|
||||
Map<ConfigResource, Config> configs = topicConfigs.entrySet().stream()
|
||||
.collect(Collectors.toMap(x ->
|
||||
new ConfigResource(ConfigResource.Type.TOPIC, x.getKey()), Entry::getValue));
|
||||
log.trace("Syncing configs for {} topics.", configs.size());
|
||||
targetAdminClient.alterConfigs(configs).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not alter configuration of topic {}.", k.name(), e);
|
||||
}
|
||||
}));
|
||||
adminCall(
|
||||
() -> {
|
||||
targetAdminClient.alterConfigs(configs).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not alter configuration of topic {}.", k.name(), e);
|
||||
}
|
||||
}));
|
||||
return null;
|
||||
},
|
||||
() -> String.format("alter topic configs %s on %s cluster", topicConfigs, config.targetClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
void incrementalAlterConfigs(Map<String, Config> topicConfigs) {
|
||||
void incrementalAlterConfigs(Map<String, Config> topicConfigs) throws ExecutionException, InterruptedException {
|
||||
Map<ConfigResource, Collection<AlterConfigOp>> configOps = new HashMap<>();
|
||||
for (Map.Entry<String, Config> topicConfig : topicConfigs.entrySet()) {
|
||||
Collection<AlterConfigOp> ops = new ArrayList<>();
|
||||
|
@ -587,36 +619,48 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
}
|
||||
log.trace("Syncing configs for {} topics.", configOps.size());
|
||||
AtomicReference<Boolean> encounteredError = new AtomicReference<>(false);
|
||||
targetAdminClient.incrementalAlterConfigs(configOps).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
if (config.useIncrementalAlterConfigs().equals(MirrorSourceConfig.REQUEST_INCREMENTAL_ALTER_CONFIGS)
|
||||
&& e instanceof UnsupportedVersionException && !encounteredError.get()) {
|
||||
//Fallback logic
|
||||
log.warn("The target cluster {} is not compatible with IncrementalAlterConfigs API. "
|
||||
+ "Therefore using deprecated AlterConfigs API for syncing configs for topic {}",
|
||||
sourceAndTarget.target(), k.name(), e);
|
||||
encounteredError.set(true);
|
||||
useIncrementalAlterConfigs = false;
|
||||
} else if (config.useIncrementalAlterConfigs().equals(MirrorSourceConfig.REQUIRE_INCREMENTAL_ALTER_CONFIGS)
|
||||
&& e instanceof UnsupportedVersionException && !encounteredError.get()) {
|
||||
log.error("Failed to sync configs for topic {} on cluster {} with IncrementalAlterConfigs API", k.name(), sourceAndTarget.target(), e);
|
||||
encounteredError.set(true);
|
||||
context.raiseError(new ConnectException("use.incremental.alter.configs was set to \"required\", but the target cluster '"
|
||||
+ sourceAndTarget.target() + "' is not compatible with IncrementalAlterConfigs API", e));
|
||||
} else {
|
||||
log.warn("Could not alter configuration of topic {}.", k.name(), e);
|
||||
}
|
||||
}
|
||||
}));
|
||||
adminCall(
|
||||
() -> {
|
||||
targetAdminClient.incrementalAlterConfigs(configOps).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
if (config.useIncrementalAlterConfigs().equals(MirrorSourceConfig.REQUEST_INCREMENTAL_ALTER_CONFIGS)
|
||||
&& e instanceof UnsupportedVersionException && !encounteredError.get()) {
|
||||
//Fallback logic
|
||||
log.warn("The target cluster {} is not compatible with IncrementalAlterConfigs API. "
|
||||
+ "Therefore using deprecated AlterConfigs API for syncing configs for topic {}",
|
||||
sourceAndTarget.target(), k.name(), e);
|
||||
encounteredError.set(true);
|
||||
useIncrementalAlterConfigs = false;
|
||||
} else if (config.useIncrementalAlterConfigs().equals(MirrorSourceConfig.REQUIRE_INCREMENTAL_ALTER_CONFIGS)
|
||||
&& e instanceof UnsupportedVersionException && !encounteredError.get()) {
|
||||
log.error("Failed to sync configs for topic {} on cluster {} with IncrementalAlterConfigs API", k.name(), sourceAndTarget.target(), e);
|
||||
encounteredError.set(true);
|
||||
context.raiseError(new ConnectException("use.incremental.alter.configs was set to \"required\", but the target cluster '"
|
||||
+ sourceAndTarget.target() + "' is not compatible with IncrementalAlterConfigs API", e));
|
||||
} else {
|
||||
log.warn("Could not alter configuration of topic {}.", k.name(), e);
|
||||
}
|
||||
}
|
||||
}));
|
||||
return null;
|
||||
},
|
||||
() -> String.format("incremental alter topic configs %s on %s cluster", topicConfigs, config.targetClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
private void updateTopicAcls(List<AclBinding> bindings) {
|
||||
private void updateTopicAcls(List<AclBinding> bindings) throws ExecutionException, InterruptedException {
|
||||
log.trace("Syncing {} topic ACL bindings.", bindings.size());
|
||||
targetAdminClient.createAcls(bindings).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not sync ACL of topic {}.", k.pattern().name(), e);
|
||||
}
|
||||
}));
|
||||
adminCall(
|
||||
() -> {
|
||||
targetAdminClient.createAcls(bindings).values().forEach((k, v) -> v.whenComplete((x, e) -> {
|
||||
if (e != null) {
|
||||
log.warn("Could not sync ACL of topic {}.", k.pattern().name(), e);
|
||||
}
|
||||
}));
|
||||
return null;
|
||||
},
|
||||
() -> String.format("create ACLs %s on %s cluster", bindings, config.targetClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
private static Stream<TopicPartition> expandTopicDescription(TopicDescription description) {
|
||||
|
@ -630,8 +674,11 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
Set<ConfigResource> resources = topics.stream()
|
||||
.map(x -> new ConfigResource(ConfigResource.Type.TOPIC, x))
|
||||
.collect(Collectors.toSet());
|
||||
return sourceAdminClient.describeConfigs(resources).all().get().entrySet().stream()
|
||||
.collect(Collectors.toMap(x -> x.getKey().name(), Entry::getValue));
|
||||
return adminCall(
|
||||
() -> sourceAdminClient.describeConfigs(resources).all().get().entrySet().stream()
|
||||
.collect(Collectors.toMap(x -> x.getKey().name(), Entry::getValue)),
|
||||
() -> String.format("describe configs for topics %s on %s cluster", topics, config.sourceClusterAlias())
|
||||
);
|
||||
}
|
||||
|
||||
Config targetConfig(Config sourceConfig, boolean incremental) {
|
||||
|
@ -701,4 +748,8 @@ public class MirrorSourceConnector extends SourceConnector {
|
|||
String formatRemoteTopic(String topic) {
|
||||
return replicationPolicy.formatRemoteTopic(sourceAndTarget.source(), topic);
|
||||
}
|
||||
|
||||
private String actualClusterAlias(Admin adminClient) {
|
||||
return adminClient.equals(sourceAdminClient) ? config.sourceClusterAlias() : config.targetClusterAlias();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.kafka.clients.admin.NewTopic;
|
|||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.ClusterAuthorizationException;
|
||||
import org.apache.kafka.common.errors.GroupAuthorizationException;
|
||||
import org.apache.kafka.common.errors.InvalidConfigurationException;
|
||||
import org.apache.kafka.common.errors.TimeoutException;
|
||||
import org.apache.kafka.common.errors.TopicAuthorizationException;
|
||||
|
@ -42,7 +43,9 @@ import java.util.List;
|
|||
import java.util.HashMap;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
@ -320,4 +323,21 @@ public final class MirrorUtils {
|
|||
static void createSinglePartitionCompactedTopic(String topicName, short replicationFactor, Admin admin) {
|
||||
createCompactedTopic(topicName, (short) 1, replicationFactor, admin);
|
||||
}
|
||||
|
||||
static <T> T adminCall(Callable<T> callable, Supplier<String> errMsg)
|
||||
throws ExecutionException, InterruptedException {
|
||||
try {
|
||||
return callable.call();
|
||||
} catch (ExecutionException | InterruptedException e) {
|
||||
Throwable cause = e.getCause();
|
||||
if (cause instanceof TopicAuthorizationException ||
|
||||
cause instanceof ClusterAuthorizationException ||
|
||||
cause instanceof GroupAuthorizationException) {
|
||||
log.error("{} occurred while trying to {}", cause.getClass().getSimpleName(), errMsg.get());
|
||||
}
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.util.Map;
|
|||
import java.util.Collections;
|
||||
import java.util.Optional;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
|
@ -116,7 +117,7 @@ public class MirrorCheckpointTaskTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testSyncOffset() {
|
||||
public void testSyncOffset() throws ExecutionException, InterruptedException {
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> idleConsumerGroupsOffset = new HashMap<>();
|
||||
Map<String, Map<TopicPartition, Checkpoint>> checkpointsPerConsumerGroup = new HashMap<>();
|
||||
|
||||
|
@ -170,7 +171,7 @@ public class MirrorCheckpointTaskTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testSyncOffsetForTargetGroupWithNullOffsetAndMetadata() {
|
||||
public void testSyncOffsetForTargetGroupWithNullOffsetAndMetadata() throws ExecutionException, InterruptedException {
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> idleConsumerGroupsOffset = new HashMap<>();
|
||||
Map<String, Map<TopicPartition, Checkpoint>> checkpointsPerConsumerGroup = new HashMap<>();
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.kafka.connect.mirror;
|
|||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||
import org.apache.kafka.clients.admin.Admin;
|
||||
import org.apache.kafka.clients.admin.DescribeAclsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeConfigsResult;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
|
@ -27,6 +28,7 @@ import org.apache.kafka.common.acl.AclOperation;
|
|||
import org.apache.kafka.common.acl.AclPermissionType;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.apache.kafka.common.config.ConfigValue;
|
||||
import org.apache.kafka.common.errors.TopicAuthorizationException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.errors.SecurityDisabledException;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
|
@ -78,9 +80,11 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -183,7 +187,8 @@ public class MirrorSourceConnectorTest {
|
|||
public void testNoBrokerAclAuthorizer() throws Exception {
|
||||
Admin sourceAdmin = mock(Admin.class);
|
||||
Admin targetAdmin = mock(Admin.class);
|
||||
MirrorSourceConnector connector = new MirrorSourceConnector(sourceAdmin, targetAdmin);
|
||||
MirrorSourceConnector connector = new MirrorSourceConnector(sourceAdmin, targetAdmin,
|
||||
new MirrorSourceConfig(makeProps()));
|
||||
|
||||
ExecutionException describeAclsFailure = new ExecutionException(
|
||||
"Failed to describe ACLs",
|
||||
|
@ -224,6 +229,39 @@ public class MirrorSourceConnectorTest {
|
|||
verifyNoInteractions(targetAdmin);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingDescribeConfigsAcl() throws Exception {
|
||||
Admin sourceAdmin = mock(Admin.class);
|
||||
MirrorSourceConnector connector = new MirrorSourceConnector(
|
||||
sourceAdmin,
|
||||
mock(Admin.class),
|
||||
new MirrorSourceConfig(makeProps())
|
||||
);
|
||||
|
||||
ExecutionException describeConfigsFailure = new ExecutionException(
|
||||
"Failed to describe topic configs",
|
||||
new TopicAuthorizationException("Topic authorization failed")
|
||||
);
|
||||
@SuppressWarnings("unchecked")
|
||||
KafkaFuture<Map<ConfigResource, Config>> describeConfigsFuture = mock(KafkaFuture.class);
|
||||
when(describeConfigsFuture.get()).thenThrow(describeConfigsFailure);
|
||||
DescribeConfigsResult describeConfigsResult = mock(DescribeConfigsResult.class);
|
||||
when(describeConfigsResult.all()).thenReturn(describeConfigsFuture);
|
||||
when(sourceAdmin.describeConfigs(any())).thenReturn(describeConfigsResult);
|
||||
|
||||
try (LogCaptureAppender connectorLogs = LogCaptureAppender.createAndRegister(MirrorUtils.class)) {
|
||||
connectorLogs.setClassLoggerToTrace(MirrorUtils.class);
|
||||
Set<String> topics = new HashSet<>();
|
||||
topics.add("topic1");
|
||||
topics.add("topic2");
|
||||
ExecutionException exception = assertThrows(ExecutionException.class, () -> connector.describeTopicConfigs(topics));
|
||||
assertEquals(
|
||||
exception.getCause().getClass().getSimpleName() + " occurred while trying to describe configs for topics [topic1, topic2] on source1 cluster",
|
||||
connectorLogs.getMessages().get(0)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConfigPropertyFiltering() {
|
||||
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
|
||||
|
|
Loading…
Reference in New Issue