mirror of https://github.com/apache/kafka.git
KAFKA-8804: Secure internal Connect REST endpoints (#7310)
Implemented KIP-507 to secure the internal Connect REST endpoints that are only for intra-cluster communication. A new V2 of the Connect subprotocol enables this feature, where the leader generates a new session key, shares it with the other workers via the configuration topic, and workers send and validate requests to these internal endpoints using the shared key. Currently the internal `POST /connectors/<connector>/tasks` endpoint is the only one that is secured. This change adds unit tests and makes some small alterations to system tests to target the new `sessioned` Connect subprotocol. A new integration test ensures that the endpoint is actually secured (i.e., requests with missing/invalid signatures are rejected with a 400 BAD RESPONSE status). Author: Chris Egerton <chrise@confluent.io> Reviewed: Konstantine Karantasis <konstantine@confluent.io>, Randall Hauch <rhauch@gmail.com>
This commit is contained in:
parent
16edc54048
commit
791d0d61bf
|
|
@ -346,6 +346,7 @@
|
|||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.reflections"/>
|
||||
<allow pkg="org.reflections.util"/>
|
||||
<allow pkg="javax.crypto"/>
|
||||
|
||||
<subpackage name="rest">
|
||||
<allow pkg="org.eclipse.jetty" />
|
||||
|
|
@ -363,6 +364,10 @@
|
|||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.maven.artifact.versioning" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="distributed">
|
||||
<allow pkg="javax.ws.rs.core" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="cli">
|
||||
|
|
@ -376,6 +381,7 @@
|
|||
<subpackage name="storage">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="javax.crypto.spec"/>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="util">
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@
|
|||
files="Values.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(DistributedHerder|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask).java"/>
|
||||
files="(DistributedHerder|RestClient|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="Values.java"/>
|
||||
|
|
|
|||
|
|
@ -87,6 +87,7 @@ public class ConnectMetricsRegistry {
|
|||
public final MetricNameTemplate taskStartupSuccessPercentage;
|
||||
public final MetricNameTemplate taskStartupFailureTotal;
|
||||
public final MetricNameTemplate taskStartupFailurePercentage;
|
||||
public final MetricNameTemplate connectProtocol;
|
||||
public final MetricNameTemplate leaderName;
|
||||
public final MetricNameTemplate epoch;
|
||||
public final MetricNameTemplate rebalanceCompletedTotal;
|
||||
|
|
@ -291,6 +292,7 @@ public class ConnectMetricsRegistry {
|
|||
/***** Worker rebalance level *****/
|
||||
Set<String> rebalanceTags = new LinkedHashSet<>(tags);
|
||||
|
||||
connectProtocol = createTemplate("connect-protocol", WORKER_REBALANCE_GROUP_NAME, "The Connect protocol used by this cluster", rebalanceTags);
|
||||
leaderName = createTemplate("leader-name", WORKER_REBALANCE_GROUP_NAME, "The name of the group leader.", rebalanceTags);
|
||||
epoch = createTemplate("epoch", WORKER_REBALANCE_GROUP_NAME, "The epoch or generation number of this worker.", rebalanceTags);
|
||||
rebalanceCompletedTotal = createTemplate("completed-rebalances-total", WORKER_REBALANCE_GROUP_NAME,
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
|
|
@ -120,8 +121,10 @@ public interface Herder {
|
|||
* @param connName connector to update
|
||||
* @param configs list of configurations
|
||||
* @param callback callback to invoke upon completion
|
||||
* @param requestSignature the signature of the request made for this task (re-)configuration;
|
||||
* may be null if no signature was provided
|
||||
*/
|
||||
void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback);
|
||||
void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback, InternalRequestSignature requestSignature);
|
||||
|
||||
/**
|
||||
* Get a list of connectors currently running in this cluster.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A session key, which can be used to validate internal REST requests between workers.
|
||||
*/
|
||||
public class SessionKey {
|
||||
|
||||
private final SecretKey key;
|
||||
private final long creationTimestamp;
|
||||
|
||||
/**
|
||||
* Create a new session key with the given key value and creation timestamp
|
||||
* @param key the actual cryptographic key to use for request validation; may not be null
|
||||
* @param creationTimestamp the time at which the key was generated
|
||||
*/
|
||||
public SessionKey(SecretKey key, long creationTimestamp) {
|
||||
this.key = Objects.requireNonNull(key, "Key may not be null");
|
||||
this.creationTimestamp = creationTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cryptographic key to use for request validation.
|
||||
*
|
||||
* @return the cryptographic key; may not be null
|
||||
*/
|
||||
public SecretKey key() {
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the time at which the key was generated.
|
||||
*
|
||||
* @return the time at which the key was generated
|
||||
*/
|
||||
public long creationTimestamp() {
|
||||
return creationTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
SessionKey that = (SessionKey) o;
|
||||
return creationTimestamp == that.creationTimestamp
|
||||
&& key.equals(that.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key, creationTimestamp);
|
||||
}
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.config.provider.ConfigProvider;
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
|
@ -37,6 +38,7 @@ public class ClusterConfigState {
|
|||
public static final long NO_OFFSET = -1;
|
||||
public static final ClusterConfigState EMPTY = new ClusterConfigState(
|
||||
NO_OFFSET,
|
||||
null,
|
||||
Collections.<String, Integer>emptyMap(),
|
||||
Collections.<String, Map<String, String>>emptyMap(),
|
||||
Collections.<String, TargetState>emptyMap(),
|
||||
|
|
@ -44,6 +46,7 @@ public class ClusterConfigState {
|
|||
Collections.<String>emptySet());
|
||||
|
||||
private final long offset;
|
||||
private final SessionKey sessionKey;
|
||||
private final Map<String, Integer> connectorTaskCounts;
|
||||
private final Map<String, Map<String, String>> connectorConfigs;
|
||||
private final Map<String, TargetState> connectorTargetStates;
|
||||
|
|
@ -52,12 +55,14 @@ public class ClusterConfigState {
|
|||
private final WorkerConfigTransformer configTransformer;
|
||||
|
||||
public ClusterConfigState(long offset,
|
||||
SessionKey sessionKey,
|
||||
Map<String, Integer> connectorTaskCounts,
|
||||
Map<String, Map<String, String>> connectorConfigs,
|
||||
Map<String, TargetState> connectorTargetStates,
|
||||
Map<ConnectorTaskId, Map<String, String>> taskConfigs,
|
||||
Set<String> inconsistentConnectors) {
|
||||
this(offset,
|
||||
sessionKey,
|
||||
connectorTaskCounts,
|
||||
connectorConfigs,
|
||||
connectorTargetStates,
|
||||
|
|
@ -67,6 +72,7 @@ public class ClusterConfigState {
|
|||
}
|
||||
|
||||
public ClusterConfigState(long offset,
|
||||
SessionKey sessionKey,
|
||||
Map<String, Integer> connectorTaskCounts,
|
||||
Map<String, Map<String, String>> connectorConfigs,
|
||||
Map<String, TargetState> connectorTargetStates,
|
||||
|
|
@ -74,6 +80,7 @@ public class ClusterConfigState {
|
|||
Set<String> inconsistentConnectors,
|
||||
WorkerConfigTransformer configTransformer) {
|
||||
this.offset = offset;
|
||||
this.sessionKey = sessionKey;
|
||||
this.connectorTaskCounts = connectorTaskCounts;
|
||||
this.connectorConfigs = connectorConfigs;
|
||||
this.connectorTargetStates = connectorTargetStates;
|
||||
|
|
@ -91,6 +98,14 @@ public class ClusterConfigState {
|
|||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest session key from the config state
|
||||
* @return the {@link SessionKey session key}; may be null if no key has been read yet
|
||||
*/
|
||||
public SessionKey sessionKey() {
|
||||
return sessionKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this snapshot contains configuration for a connector.
|
||||
* @param connector name of the connector
|
||||
|
|
@ -229,6 +244,7 @@ public class ClusterConfigState {
|
|||
public String toString() {
|
||||
return "ClusterConfigState{" +
|
||||
"offset=" + offset +
|
||||
", sessionKey=" + (sessionKey != null ? "[hidden]" : "null") +
|
||||
", connectorTaskCounts=" + connectorTaskCounts +
|
||||
", connectorConfigs=" + connectorConfigs +
|
||||
", taskConfigs=" + taskConfigs +
|
||||
|
|
@ -242,6 +258,7 @@ public class ClusterConfigState {
|
|||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ClusterConfigState that = (ClusterConfigState) o;
|
||||
return offset == that.offset &&
|
||||
Objects.equals(sessionKey, that.sessionKey) &&
|
||||
Objects.equals(connectorTaskCounts, that.connectorTaskCounts) &&
|
||||
Objects.equals(connectorConfigs, that.connectorConfigs) &&
|
||||
Objects.equals(connectorTargetStates, that.connectorTargetStates) &&
|
||||
|
|
@ -254,6 +271,7 @@ public class ClusterConfigState {
|
|||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
offset,
|
||||
sessionKey,
|
||||
connectorTaskCounts,
|
||||
connectorConfigs,
|
||||
connectorTargetStates,
|
||||
|
|
|
|||
|
|
@ -19,6 +19,10 @@ package org.apache.kafka.connect.runtime.distributed;
|
|||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
|
||||
/**
|
||||
* An enumeration of the modes available to the worker to signal which Connect protocols are
|
||||
* enabled at any time.
|
||||
|
|
@ -28,7 +32,11 @@ import java.util.Locale;
|
|||
*
|
||||
* {@code COMPATIBLE} signifies that this worker supports both eager and incremental cooperative
|
||||
* Connect protocols and will use the version that is elected by the Kafka broker coordinator
|
||||
* during rebalancing.
|
||||
* during rebalance.
|
||||
*
|
||||
* {@code SESSIONED} signifies that this worker supports all of the above protocols in addition to
|
||||
* a protocol that uses incremental cooperative rebalancing for worker assignment and uses session
|
||||
* keys distributed via the config topic to verify internal REST requests
|
||||
*/
|
||||
public enum ConnectProtocolCompatibility {
|
||||
EAGER {
|
||||
|
|
@ -36,6 +44,11 @@ public enum ConnectProtocolCompatibility {
|
|||
public String protocol() {
|
||||
return "default";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V0;
|
||||
}
|
||||
},
|
||||
|
||||
COMPATIBLE {
|
||||
|
|
@ -43,11 +56,28 @@ public enum ConnectProtocolCompatibility {
|
|||
public String protocol() {
|
||||
return "compatible";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V1;
|
||||
}
|
||||
},
|
||||
|
||||
SESSIONED {
|
||||
@Override
|
||||
public String protocol() {
|
||||
return "sessioned";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short protocolVersion() {
|
||||
return CONNECT_PROTOCOL_V2;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the enum that corresponds to the name that is given as an argument;
|
||||
* if the no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param name the name of the protocol compatibility mode
|
||||
* @return the enum that corresponds to the protocol compatibility mode
|
||||
|
|
@ -60,11 +90,39 @@ public enum ConnectProtocolCompatibility {
|
|||
"Unknown Connect protocol compatibility mode: " + name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the enum that corresponds to the Connect protocol version that is given as an argument;
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param protocolVersion the version of the protocol; for example,
|
||||
* {@link ConnectProtocol#CONNECT_PROTOCOL_V0 CONNECT_PROTOCOL_V0}. May not be null
|
||||
* @return the enum that corresponds to the protocol compatibility mode
|
||||
*/
|
||||
public static ConnectProtocolCompatibility fromProtocolVersion(short protocolVersion) {
|
||||
switch (protocolVersion) {
|
||||
case CONNECT_PROTOCOL_V0:
|
||||
return EAGER;
|
||||
case CONNECT_PROTOCOL_V1:
|
||||
return COMPATIBLE;
|
||||
case CONNECT_PROTOCOL_V2:
|
||||
return SESSIONED;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown Connect protocol version: " + protocolVersion);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the version of the protocol for this mode.
|
||||
*
|
||||
* @return the protocol version
|
||||
*/
|
||||
public abstract short protocolVersion();
|
||||
|
||||
/**
|
||||
* Return the name of the protocol that this mode will use in {@code ProtocolMetadata}.
|
||||
*
|
||||
|
|
@ -74,7 +132,7 @@ public enum ConnectProtocolCompatibility {
|
|||
|
||||
/**
|
||||
* Return the enum that corresponds to the protocol name that is given as an argument;
|
||||
* if the no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
* if no mapping is found {@code IllegalArgumentException} is thrown.
|
||||
*
|
||||
* @param protocolName the name of the connect protocol
|
||||
* @return the enum that corresponds to the protocol compatibility mode that supports the
|
||||
|
|
|
|||
|
|
@ -22,15 +22,20 @@ import org.apache.kafka.common.config.ConfigException;
|
|||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.Mac;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
|
||||
import static org.apache.kafka.common.config.ConfigDef.Range.between;
|
||||
|
||||
public class DistributedConfig extends WorkerConfig {
|
||||
private static final ConfigDef CONFIG;
|
||||
|
||||
/*
|
||||
* NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS
|
||||
* THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
|
||||
|
|
@ -138,7 +143,7 @@ public class DistributedConfig extends WorkerConfig {
|
|||
*/
|
||||
public static final String CONNECT_PROTOCOL_CONFIG = "connect.protocol";
|
||||
public static final String CONNECT_PROTOCOL_DOC = "Compatibility mode for Kafka Connect Protocol";
|
||||
public static final String CONNECT_PROTOCOL_DEFAULT = ConnectProtocolCompatibility.COMPATIBLE.toString();
|
||||
public static final String CONNECT_PROTOCOL_DEFAULT = ConnectProtocolCompatibility.SESSIONED.toString();
|
||||
|
||||
/**
|
||||
* <code>connect.protocol</code>
|
||||
|
|
@ -150,162 +155,220 @@ public class DistributedConfig extends WorkerConfig {
|
|||
+ "period the connectors and tasks of the departed workers remain unassigned";
|
||||
public static final int SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT = Math.toIntExact(TimeUnit.SECONDS.toMillis(300));
|
||||
|
||||
static {
|
||||
CONFIG = baseConfigDef()
|
||||
.define(GROUP_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
GROUP_ID_DOC)
|
||||
.define(SESSION_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
10000,
|
||||
ConfigDef.Importance.HIGH,
|
||||
SESSION_TIMEOUT_MS_DOC)
|
||||
.define(REBALANCE_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
60000,
|
||||
ConfigDef.Importance.HIGH,
|
||||
REBALANCE_TIMEOUT_MS_DOC)
|
||||
.define(HEARTBEAT_INTERVAL_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
3000,
|
||||
ConfigDef.Importance.HIGH,
|
||||
HEARTBEAT_INTERVAL_MS_DOC)
|
||||
.define(CommonClientConfigs.METADATA_MAX_AGE_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
5 * 60 * 1000,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.METADATA_MAX_AGE_DOC)
|
||||
.define(CommonClientConfigs.CLIENT_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
"",
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.CLIENT_ID_DOC)
|
||||
.define(CommonClientConfigs.SEND_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
128 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SEND_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECEIVE_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
32 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.RECEIVE_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
50L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
1000L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC)
|
||||
.define(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
100L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RETRY_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
40 * 1000,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC)
|
||||
/* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */
|
||||
.define(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
9 * 60 * 1000,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC)
|
||||
// security support
|
||||
.define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SECURITY_PROTOCOL_DOC)
|
||||
.withClientSslSupport()
|
||||
.withClientSaslSupport()
|
||||
.define(WORKER_SYNC_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
3000,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_SYNC_TIMEOUT_MS_DOC)
|
||||
.define(WORKER_UNSYNC_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DEFAULT,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DOC)
|
||||
.define(OFFSET_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
OFFSET_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
25,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONFIG_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
CONFIG_TOPIC_CONFIG_DOC)
|
||||
.define(CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
STATUS_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
5,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONNECT_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CONNECT_PROTOCOL_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> {
|
||||
try {
|
||||
ConnectProtocolCompatibility.compatibility((String) value);
|
||||
} catch (Throwable t) {
|
||||
throw new ConfigException(name, value, "Invalid Connect protocol "
|
||||
+ "compatibility");
|
||||
}
|
||||
},
|
||||
() -> "[" + Utils.join(ConnectProtocolCompatibility.values(), ", ") + "]"),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONNECT_PROTOCOL_DOC)
|
||||
.define(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT,
|
||||
between(0, Integer.MAX_VALUE),
|
||||
ConfigDef.Importance.LOW,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC);
|
||||
}
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG = "inter.worker.key.generation.algorithm";
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC = "The algorithm to use for generating internal request keys";
|
||||
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT = "HmacSHA256";
|
||||
|
||||
public static final String INTER_WORKER_KEY_SIZE_CONFIG = "inter.worker.key.size";
|
||||
public static final String INTER_WORKER_KEY_SIZE_DOC = "The size of the key to use for signing internal requests, in bits. "
|
||||
+ "If null, the default key size for the key generation algorithm will be used.";
|
||||
public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null;
|
||||
|
||||
public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms";
|
||||
public static final String INTER_WORKER_KEY_TTL_MS_MS_DOC = "The TTL of generated session keys used for "
|
||||
+ "internal request validation (in milliseconds)";
|
||||
public static final int INTER_WORKER_KEY_TTL_MS_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1));
|
||||
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm";
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests";
|
||||
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256";
|
||||
|
||||
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms";
|
||||
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests";
|
||||
public static final List<String> INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = Collections.singletonList(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConfigDef CONFIG = baseConfigDef()
|
||||
.define(GROUP_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
GROUP_ID_DOC)
|
||||
.define(SESSION_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(10)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
SESSION_TIMEOUT_MS_DOC)
|
||||
.define(REBALANCE_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.MINUTES.toMillis(1)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
REBALANCE_TIMEOUT_MS_DOC)
|
||||
.define(HEARTBEAT_INTERVAL_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(3)),
|
||||
ConfigDef.Importance.HIGH,
|
||||
HEARTBEAT_INTERVAL_MS_DOC)
|
||||
.define(CommonClientConfigs.METADATA_MAX_AGE_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.MINUTES.toMillis(5),
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.METADATA_MAX_AGE_DOC)
|
||||
.define(CommonClientConfigs.CLIENT_ID_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
"",
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.CLIENT_ID_DOC)
|
||||
.define(CommonClientConfigs.SEND_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
128 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SEND_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECEIVE_BUFFER_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
32 * 1024,
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.RECEIVE_BUFFER_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
50L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.SECONDS.toMillis(1),
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC)
|
||||
.define(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
100L,
|
||||
atLeast(0L),
|
||||
ConfigDef.Importance.LOW,
|
||||
CommonClientConfigs.RETRY_BACKOFF_MS_DOC)
|
||||
.define(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
Math.toIntExact(TimeUnit.SECONDS.toMillis(40)),
|
||||
atLeast(0),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC)
|
||||
/* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */
|
||||
.define(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG,
|
||||
ConfigDef.Type.LONG,
|
||||
TimeUnit.MINUTES.toMillis(9),
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC)
|
||||
// security support
|
||||
.define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
CommonClientConfigs.SECURITY_PROTOCOL_DOC)
|
||||
.withClientSslSupport()
|
||||
.withClientSaslSupport()
|
||||
.define(WORKER_SYNC_TIMEOUT_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
3000,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_SYNC_TIMEOUT_MS_DOC)
|
||||
.define(WORKER_UNSYNC_BACKOFF_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DEFAULT,
|
||||
ConfigDef.Importance.MEDIUM,
|
||||
WORKER_UNSYNC_BACKOFF_MS_DOC)
|
||||
.define(OFFSET_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
OFFSET_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
25,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONFIG_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
CONFIG_TOPIC_CONFIG_DOC)
|
||||
.define(CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_TOPIC_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
ConfigDef.Importance.HIGH,
|
||||
STATUS_STORAGE_TOPIC_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_PARTITIONS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
5,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_PARTITIONS_CONFIG_DOC)
|
||||
.define(STATUS_STORAGE_REPLICATION_FACTOR_CONFIG,
|
||||
ConfigDef.Type.SHORT,
|
||||
(short) 3,
|
||||
atLeast(1),
|
||||
ConfigDef.Importance.LOW,
|
||||
STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC)
|
||||
.define(CONNECT_PROTOCOL_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
CONNECT_PROTOCOL_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> {
|
||||
try {
|
||||
ConnectProtocolCompatibility.compatibility((String) value);
|
||||
} catch (Throwable t) {
|
||||
throw new ConfigException(name, value, "Invalid Connect protocol "
|
||||
+ "compatibility");
|
||||
}
|
||||
},
|
||||
() -> "[" + Utils.join(ConnectProtocolCompatibility.values(), ", ") + "]"),
|
||||
ConfigDef.Importance.LOW,
|
||||
CONNECT_PROTOCOL_DOC)
|
||||
.define(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT,
|
||||
between(0, Integer.MAX_VALUE),
|
||||
ConfigDef.Importance.LOW,
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC)
|
||||
.define(INTER_WORKER_KEY_TTL_MS_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
INTER_WORKER_KEY_TTL_MS_MS_DEFAULT,
|
||||
between(0, Integer.MAX_VALUE),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_TTL_MS_MS_DOC)
|
||||
.define(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateKeyAlgorithm(name, (String) value),
|
||||
() -> "Any KeyGenerator algorithm supported by the worker JVM"
|
||||
),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC)
|
||||
.define(INTER_WORKER_KEY_SIZE_CONFIG,
|
||||
ConfigDef.Type.INT,
|
||||
INTER_WORKER_KEY_SIZE_DEFAULT,
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_KEY_SIZE_DOC)
|
||||
.define(INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG,
|
||||
ConfigDef.Type.STRING,
|
||||
INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateSignatureAlgorithm(name, (String) value),
|
||||
() -> "Any MAC algorithm supported by the worker JVM"),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_SIGNATURE_ALGORITHM_DOC)
|
||||
.define(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG,
|
||||
ConfigDef.Type.LIST,
|
||||
INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT,
|
||||
ConfigDef.LambdaValidator.with(
|
||||
(name, value) -> validateSignatureAlgorithms(name, (List<String>) value),
|
||||
() -> "A list of one or more MAC algorithms, each supported by the worker JVM"
|
||||
),
|
||||
ConfigDef.Importance.LOW,
|
||||
INTER_WORKER_VERIFICATION_ALGORITHMS_DOC);
|
||||
|
||||
@Override
|
||||
public Integer getRebalanceTimeout() {
|
||||
|
|
@ -314,9 +377,65 @@ public class DistributedConfig extends WorkerConfig {
|
|||
|
||||
public DistributedConfig(Map<String, String> props) {
|
||||
super(CONFIG, props);
|
||||
getInternalRequestKeyGenerator(); // Check here for a valid key size + key algorithm to fail fast if either are invalid
|
||||
validateKeyAlgorithmAndVerificationAlgorithms();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(CONFIG.toHtml());
|
||||
}
|
||||
|
||||
public KeyGenerator getInternalRequestKeyGenerator() {
|
||||
try {
|
||||
KeyGenerator result = KeyGenerator.getInstance(getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG));
|
||||
Optional.ofNullable(getInt(INTER_WORKER_KEY_SIZE_CONFIG)).ifPresent(result::init);
|
||||
return result;
|
||||
} catch (NoSuchAlgorithmException | InvalidParameterException e) {
|
||||
throw new ConfigException(String.format(
|
||||
"Unable to create key generator with algorithm %s and key size %d: %s",
|
||||
getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG),
|
||||
getInt(INTER_WORKER_KEY_SIZE_CONFIG),
|
||||
e.getMessage()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
private void validateKeyAlgorithmAndVerificationAlgorithms() {
|
||||
String keyAlgorithm = getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG);
|
||||
List<String> verificationAlgorithms = getList(INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG);
|
||||
if (!verificationAlgorithms.contains(keyAlgorithm)) {
|
||||
throw new ConfigException(
|
||||
INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG,
|
||||
keyAlgorithm,
|
||||
String.format("Key generation algorithm must be present in %s list", INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateSignatureAlgorithms(String configName, List<String> algorithms) {
|
||||
if (algorithms.isEmpty()) {
|
||||
throw new ConfigException(
|
||||
configName,
|
||||
algorithms,
|
||||
"At least one signature verification algorithm must be provided"
|
||||
);
|
||||
}
|
||||
algorithms.forEach(algorithm -> validateSignatureAlgorithm(configName, algorithm));
|
||||
}
|
||||
|
||||
private static void validateSignatureAlgorithm(String configName, String algorithm) {
|
||||
try {
|
||||
Mac.getInstance(algorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConfigException(configName, algorithm, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateKeyAlgorithm(String configName, String algorithm) {
|
||||
try {
|
||||
KeyGenerator.getInstance(algorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConfigException(configName, algorithm, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,14 +42,18 @@ import org.apache.kafka.connect.runtime.ConnectMetricsRegistry;
|
|||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.HerderConnectorContext;
|
||||
import org.apache.kafka.connect.runtime.HerderRequest;
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.SinkConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.SourceConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.runtime.Worker;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.RestClient;
|
||||
import org.apache.kafka.connect.runtime.rest.RestServer;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
import org.apache.kafka.connect.storage.StatusBackingStore;
|
||||
|
|
@ -58,6 +62,9 @@ import org.apache.kafka.connect.util.ConnectorTaskId;
|
|||
import org.apache.kafka.connect.util.SinkUtils;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
|
@ -84,6 +91,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
|
@ -132,6 +140,10 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
private final int workerSyncTimeoutMs;
|
||||
private final long workerTasksShutdownTimeoutMs;
|
||||
private final int workerUnsyncBackoffMs;
|
||||
private final int keyRotationIntervalMs;
|
||||
private final String requestSignatureAlgorithm;
|
||||
private final List<String> keySignatureVerificationAlgorithms;
|
||||
private final KeyGenerator keyGenerator;
|
||||
|
||||
private final ExecutorService herderExecutor;
|
||||
private final ExecutorService forwardRequestExecutor;
|
||||
|
|
@ -161,6 +173,9 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
private boolean needsReconfigRebalance;
|
||||
private volatile int generation;
|
||||
private volatile long scheduledRebalance;
|
||||
private SecretKey sessionKey;
|
||||
private volatile long keyExpiration;
|
||||
private short currentProtocolVersion;
|
||||
|
||||
private final DistributedConfig config;
|
||||
|
||||
|
|
@ -197,6 +212,10 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
this.workerSyncTimeoutMs = config.getInt(DistributedConfig.WORKER_SYNC_TIMEOUT_MS_CONFIG);
|
||||
this.workerTasksShutdownTimeoutMs = config.getLong(DistributedConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG);
|
||||
this.workerUnsyncBackoffMs = config.getInt(DistributedConfig.WORKER_UNSYNC_BACKOFF_MS_CONFIG);
|
||||
this.requestSignatureAlgorithm = config.getString(DistributedConfig.INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG);
|
||||
this.keyRotationIntervalMs = config.getInt(DistributedConfig.INTER_WORKER_KEY_TTL_MS_CONFIG);
|
||||
this.keySignatureVerificationAlgorithms = config.getList(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG);
|
||||
this.keyGenerator = config.getInternalRequestKeyGenerator();
|
||||
|
||||
String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
|
||||
String clientId = clientIdConfig.length() <= 0 ? "connect-" + CONNECT_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
|
||||
|
|
@ -225,6 +244,24 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
needsReconfigRebalance = false;
|
||||
canReadConfigs = true; // We didn't try yet, but Configs are readable until proven otherwise
|
||||
scheduledRebalance = Long.MAX_VALUE;
|
||||
keyExpiration = Long.MAX_VALUE;
|
||||
sessionKey = null;
|
||||
|
||||
currentProtocolVersion = ConnectProtocolCompatibility.compatibility(
|
||||
config.getString(DistributedConfig.CONNECT_PROTOCOL_CONFIG)
|
||||
).protocolVersion();
|
||||
if (!internalRequestValidationEnabled(currentProtocolVersion)) {
|
||||
log.warn(
|
||||
"Internal request verification will be disabled for this cluster as this worker's {} configuration has been set to '{}'. "
|
||||
+ "If this is not intentional, either remove the '{}' configuration from the worker config file or change its value "
|
||||
+ "to '{}'. If this configuration is left as-is, the cluster will be insecure; for more information, see KIP-507: "
|
||||
+ "https://cwiki.apache.org/confluence/display/KAFKA/KIP-507%3A+Securing+Internal+Connect+REST+Endpoints",
|
||||
DistributedConfig.CONNECT_PROTOCOL_CONFIG,
|
||||
config.getString(DistributedConfig.CONNECT_PROTOCOL_CONFIG),
|
||||
DistributedConfig.CONNECT_PROTOCOL_CONFIG,
|
||||
ConnectProtocolCompatibility.SESSIONED.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -278,8 +315,17 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
return;
|
||||
}
|
||||
|
||||
long now = time.milliseconds();
|
||||
|
||||
if (checkForKeyRotation(now)) {
|
||||
keyExpiration = Long.MAX_VALUE;
|
||||
configBackingStore.putSessionKey(new SessionKey(
|
||||
keyGenerator.generateKey(),
|
||||
now
|
||||
));
|
||||
}
|
||||
|
||||
// Process any external requests
|
||||
final long now = time.milliseconds();
|
||||
long nextRequestTimeoutMs = Long.MAX_VALUE;
|
||||
while (true) {
|
||||
final DistributedHerderRequest next = peekWithoutException();
|
||||
|
|
@ -306,6 +352,11 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
log.debug("Scheduled rebalance at: {} (now: {} nextRequestTimeoutMs: {}) ",
|
||||
scheduledRebalance, now, nextRequestTimeoutMs);
|
||||
}
|
||||
if (internalRequestValidationEnabled() && keyExpiration < Long.MAX_VALUE) {
|
||||
nextRequestTimeoutMs = Math.min(nextRequestTimeoutMs, Math.max(keyExpiration - now, 0));
|
||||
log.debug("Scheduled next key rotation at: {} (now: {} nextRequestTimeoutMs: {}) ",
|
||||
keyExpiration, now, nextRequestTimeoutMs);
|
||||
}
|
||||
|
||||
// Process any configuration updates
|
||||
AtomicReference<Set<String>> connectorConfigUpdatesCopy = new AtomicReference<>();
|
||||
|
|
@ -360,6 +411,31 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized boolean checkForKeyRotation(long now) {
|
||||
if (internalRequestValidationEnabled()) {
|
||||
if (isLeader()) {
|
||||
if (sessionKey == null) {
|
||||
log.debug("Internal request signing is enabled but no session key has been distributed yet. "
|
||||
+ "Distributing new key now.");
|
||||
return true;
|
||||
} else if (keyExpiration <= now) {
|
||||
log.debug("Existing key has expired. Distributing new key now.");
|
||||
return true;
|
||||
} else if (!sessionKey.getAlgorithm().equals(keyGenerator.getAlgorithm())
|
||||
|| sessionKey.getEncoded().length != keyGenerator.generateKey().getEncoded().length) {
|
||||
log.debug("Previously-distributed key uses different algorithm/key size "
|
||||
+ "than required by current worker configuration. Distributing new key now.");
|
||||
return true;
|
||||
}
|
||||
} else if (sessionKey == null) {
|
||||
// This happens on startup for follower workers; the snapshot contains the session key,
|
||||
// but no callback in the config update listener has been fired for it yet.
|
||||
sessionKey = configState.sessionKey().key();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private synchronized boolean updateConfigsWithEager(AtomicReference<Set<String>> connectorConfigUpdatesCopy,
|
||||
AtomicReference<Set<String>> connectorTargetStateChangesCopy) {
|
||||
// This branch is here to avoid creating a snapshot if not needed
|
||||
|
|
@ -481,7 +557,7 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
// additionally, if the worker is running the connector itself, then we need to
|
||||
// request reconfiguration to ensure that config changes while paused take effect
|
||||
if (targetState == TargetState.STARTED)
|
||||
reconfigureConnectorTasksWithRetry(connector);
|
||||
reconfigureConnectorTasksWithRetry(time.milliseconds(), connector);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -706,7 +782,7 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
reconfigureConnectorTasksWithRetry(connName);
|
||||
reconfigureConnectorTasksWithRetry(time.milliseconds(), connName);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
|
|
@ -751,8 +827,36 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback) {
|
||||
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback, InternalRequestSignature requestSignature) {
|
||||
log.trace("Submitting put task configuration request {}", connName);
|
||||
if (internalRequestValidationEnabled()) {
|
||||
ConnectRestException requestValidationError = null;
|
||||
if (requestSignature == null) {
|
||||
requestValidationError = new BadRequestException("Internal request missing required signature");
|
||||
} else if (!keySignatureVerificationAlgorithms.contains(requestSignature.keyAlgorithm())) {
|
||||
requestValidationError = new BadRequestException(String.format(
|
||||
"This worker does not support the '%s' key signing algorithm used by other workers. "
|
||||
+ "This worker is currently configured to use: %s. "
|
||||
+ "Check that all workers' configuration files permit the same set of signature algorithms, "
|
||||
+ "and correct any misconfigured worker and restart it.",
|
||||
requestSignature.keyAlgorithm(),
|
||||
keySignatureVerificationAlgorithms
|
||||
));
|
||||
} else {
|
||||
synchronized (this) {
|
||||
if (!requestSignature.isValid(sessionKey)) {
|
||||
requestValidationError = new ConnectRestException(
|
||||
Response.Status.FORBIDDEN,
|
||||
"Internal request contained invalid signature."
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (requestValidationError != null) {
|
||||
callback.onCompletion(requestValidationError, null);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
addRequest(
|
||||
new Callable<Void>() {
|
||||
|
|
@ -1082,7 +1186,7 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
// task configs if they are actually different from the existing ones to avoid unnecessary updates when this is
|
||||
// just restoring an existing connector.
|
||||
if (started && initialState == TargetState.STARTED)
|
||||
reconfigureConnectorTasksWithRetry(connectorName);
|
||||
reconfigureConnectorTasksWithRetry(time.milliseconds(), connectorName);
|
||||
|
||||
return started;
|
||||
}
|
||||
|
|
@ -1117,7 +1221,7 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
};
|
||||
}
|
||||
|
||||
private void reconfigureConnectorTasksWithRetry(final String connName) {
|
||||
private void reconfigureConnectorTasksWithRetry(long initialRequestTime, final String connName) {
|
||||
reconfigureConnector(connName, new Callback<Void>() {
|
||||
@Override
|
||||
public void onCompletion(Throwable error, Void result) {
|
||||
|
|
@ -1126,12 +1230,16 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
// never makes progress. The retry has to run through a DistributedHerderRequest since this callback could be happening
|
||||
// from the HTTP request forwarding thread.
|
||||
if (error != null) {
|
||||
log.error("Failed to reconfigure connector's tasks, retrying after backoff:", error);
|
||||
if (isPossibleExpiredKeyException(initialRequestTime, error)) {
|
||||
log.debug("Failed to reconfigure connector's tasks, possibly due to expired session key. Retrying after backoff");
|
||||
} else {
|
||||
log.error("Failed to reconfigure connector's tasks, retrying after backoff:", error);
|
||||
}
|
||||
addRequest(RECONFIGURE_CONNECTOR_TASKS_BACKOFF_MS,
|
||||
new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
reconfigureConnectorTasksWithRetry(connName);
|
||||
reconfigureConnectorTasksWithRetry(initialRequestTime, connName);
|
||||
return null;
|
||||
}
|
||||
}, new Callback<Void>() {
|
||||
|
|
@ -1147,6 +1255,15 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
});
|
||||
}
|
||||
|
||||
boolean isPossibleExpiredKeyException(long initialRequestTime, Throwable error) {
|
||||
if (error instanceof ConnectRestException) {
|
||||
ConnectRestException connectError = (ConnectRestException) error;
|
||||
return connectError.statusCode() == Response.Status.FORBIDDEN.getStatusCode()
|
||||
&& initialRequestTime + TimeUnit.MINUTES.toMillis(1) >= time.milliseconds();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Updates configurations for a connector by requesting them from the connector, filling in parameters provided
|
||||
// by the system, then checks whether any configs have actually changed before submitting the new configs to storage
|
||||
private void reconfigureConnector(final String connName, final Callback<Void> cb) {
|
||||
|
|
@ -1202,7 +1319,8 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
return;
|
||||
}
|
||||
String reconfigUrl = RestServer.urlJoin(leaderUrl, "/connectors/" + connName + "/tasks");
|
||||
RestClient.httpRequest(reconfigUrl, "POST", null, rawTaskProps, null, config);
|
||||
log.trace("Forwarding task configurations for connector {} to leader", connName);
|
||||
RestClient.httpRequest(reconfigUrl, "POST", null, rawTaskProps, null, config, sessionKey, requestSignatureAlgorithm);
|
||||
cb.onCompletion(null, null);
|
||||
} catch (ConnectException e) {
|
||||
log.error("Request to leader to reconfigure connector tasks failed", e);
|
||||
|
|
@ -1239,6 +1357,14 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
return req;
|
||||
}
|
||||
|
||||
private boolean internalRequestValidationEnabled() {
|
||||
return internalRequestValidationEnabled(member.currentProtocolVersion());
|
||||
}
|
||||
|
||||
private static boolean internalRequestValidationEnabled(short protocolVersion) {
|
||||
return protocolVersion >= CONNECT_PROTOCOL_V2;
|
||||
}
|
||||
|
||||
private DistributedHerderRequest peekWithoutException() {
|
||||
try {
|
||||
return requests.isEmpty() ? null : requests.first();
|
||||
|
|
@ -1306,6 +1432,21 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
}
|
||||
member.wakeup();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSessionKeyUpdate(SessionKey sessionKey) {
|
||||
log.info("Session key updated");
|
||||
|
||||
synchronized (DistributedHerder.this) {
|
||||
DistributedHerder.this.sessionKey = sessionKey.key();
|
||||
// Track the expiration of the key if and only if this worker is the leader
|
||||
// Followers will receive rotated keys from the follower and won't be responsible for
|
||||
// tracking expiration and distributing new keys themselves
|
||||
if (isLeader() && keyRotationIntervalMs > 0) {
|
||||
DistributedHerder.this.keyExpiration = sessionKey.creationTimestamp() + keyRotationIntervalMs;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class DistributedHerderRequest implements HerderRequest, Comparable<DistributedHerderRequest> {
|
||||
|
|
@ -1394,14 +1535,45 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
// catch up (or backoff if we fail) not executed in a callback, and so we'll be able to invoke other
|
||||
// group membership actions (e.g., we may need to explicitly leave the group if we cannot handle the
|
||||
// assigned tasks).
|
||||
log.info("Joined group at generation {} and got assignment: {}", generation, assignment);
|
||||
short priorProtocolVersion = currentProtocolVersion;
|
||||
DistributedHerder.this.currentProtocolVersion = member.currentProtocolVersion();
|
||||
log.info(
|
||||
"Joined group at generation {} with protocol version {} and got assignment: {}",
|
||||
generation,
|
||||
DistributedHerder.this.currentProtocolVersion,
|
||||
assignment
|
||||
);
|
||||
synchronized (DistributedHerder.this) {
|
||||
DistributedHerder.this.assignment = assignment;
|
||||
DistributedHerder.this.generation = generation;
|
||||
int delay = assignment.delay();
|
||||
DistributedHerder.this.scheduledRebalance = delay > 0
|
||||
? time.milliseconds() + delay
|
||||
: Long.MAX_VALUE;
|
||||
? time.milliseconds() + delay
|
||||
: Long.MAX_VALUE;
|
||||
|
||||
boolean requestValidationWasEnabled = internalRequestValidationEnabled(priorProtocolVersion);
|
||||
boolean requestValidationNowEnabled = internalRequestValidationEnabled(currentProtocolVersion);
|
||||
if (requestValidationNowEnabled != requestValidationWasEnabled) {
|
||||
// Internal request verification has been switched on or off; let the user know
|
||||
if (requestValidationNowEnabled) {
|
||||
log.info("Internal request validation has been re-enabled");
|
||||
} else {
|
||||
log.warn(
|
||||
"The protocol used by this Connect cluster has been downgraded from '{}' to '{}' and internal request "
|
||||
+ "validation is now disabled. This is most likely caused by a new worker joining the cluster with an "
|
||||
+ "older protocol specified for the {} configuration; if this is not intentional, either remove the {} "
|
||||
+ "configuration from that worker's config file, or change its value to '{}'. If this configuration is "
|
||||
+ "left as-is, the cluster will be insecure; for more information, see KIP-507: "
|
||||
+ "https://cwiki.apache.org/confluence/display/KAFKA/KIP-507%3A+Securing+Internal+Connect+REST+Endpoints",
|
||||
ConnectProtocolCompatibility.fromProtocolVersion(priorProtocolVersion),
|
||||
ConnectProtocolCompatibility.fromProtocolVersion(DistributedHerder.this.currentProtocolVersion),
|
||||
DistributedConfig.CONNECT_PROTOCOL_CONFIG,
|
||||
DistributedConfig.CONNECT_PROTOCOL_CONFIG,
|
||||
ConnectProtocolCompatibility.SESSIONED.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
rebalanceResolved = false;
|
||||
herderMetrics.rebalanceStarted(time.milliseconds());
|
||||
}
|
||||
|
|
@ -1463,6 +1635,12 @@ public class DistributedHerder extends AbstractHerder implements Runnable {
|
|||
ConnectMetricsRegistry registry = connectMetrics.registry();
|
||||
metricGroup = connectMetrics.group(registry.workerRebalanceGroupName());
|
||||
|
||||
metricGroup.addValueMetric(registry.connectProtocol, new LiteralSupplier<String>() {
|
||||
@Override
|
||||
public String metricValue(long now) {
|
||||
return ConnectProtocolCompatibility.fromProtocolVersion(member.currentProtocolVersion()).name();
|
||||
}
|
||||
});
|
||||
metricGroup.addValueMetric(registry.leaderName, new LiteralSupplier<String>() {
|
||||
@Override
|
||||
public String metricValue(long now) {
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import java.util.stream.IntStream;
|
|||
import static org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.Assignment;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
import static org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.LeaderState;
|
||||
|
||||
/**
|
||||
|
|
@ -99,15 +100,20 @@ public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
|||
log.debug("Max config offset root: {}, local snapshot config offsets root: {}",
|
||||
maxOffset, coordinator.configSnapshot().offset());
|
||||
|
||||
short protocolVersion = memberConfigs.values().stream()
|
||||
.allMatch(state -> state.assignment().version() == CONNECT_PROTOCOL_V2)
|
||||
? CONNECT_PROTOCOL_V2
|
||||
: CONNECT_PROTOCOL_V1;
|
||||
|
||||
Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator);
|
||||
if (leaderOffset == null) {
|
||||
Map<String, ExtendedAssignment> assignments = fillAssignments(
|
||||
memberConfigs.keySet(), Assignment.CONFIG_MISMATCH,
|
||||
leaderId, memberConfigs.get(leaderId).url(), maxOffset, Collections.emptyMap(),
|
||||
Collections.emptyMap(), Collections.emptyMap(), 0);
|
||||
Collections.emptyMap(), Collections.emptyMap(), 0, protocolVersion);
|
||||
return serializeAssignments(assignments);
|
||||
}
|
||||
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator);
|
||||
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator, protocolVersion);
|
||||
}
|
||||
|
||||
private Long ensureLeaderConfig(long maxOffset, WorkerCoordinator coordinator) {
|
||||
|
|
@ -140,12 +146,13 @@ public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
|||
* round of rebalancing
|
||||
* @param coordinator the worker coordinator instance that provide the configuration snapshot
|
||||
* and get assigned the leader state during this assignment
|
||||
* @param protocolVersion the Connect subprotocol version
|
||||
* @return the serialized assignment of tasks to the whole group, including assigned or
|
||||
* revoked tasks
|
||||
*/
|
||||
protected Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset,
|
||||
Map<String, ExtendedWorkerState> memberConfigs,
|
||||
WorkerCoordinator coordinator) {
|
||||
WorkerCoordinator coordinator, short protocolVersion) {
|
||||
// Base set: The previous assignment of connectors-and-tasks is a standalone snapshot that
|
||||
// can be used to calculate derived sets
|
||||
log.debug("Previous assignments: {}", previousAssignment);
|
||||
|
|
@ -281,7 +288,7 @@ public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
|||
Map<String, ExtendedAssignment> assignments =
|
||||
fillAssignments(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId,
|
||||
memberConfigs.get(leaderId).url(), maxOffset, incrementalConnectorAssignments,
|
||||
incrementalTaskAssignments, toRevoke, delay);
|
||||
incrementalTaskAssignments, toRevoke, delay, protocolVersion);
|
||||
|
||||
previousAssignment = computePreviousAssignment(toRevoke, connectorAssignments, taskAssignments, lostAssignments);
|
||||
|
||||
|
|
@ -491,7 +498,7 @@ public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
|||
Map<String, Collection<String>> connectorAssignments,
|
||||
Map<String, Collection<ConnectorTaskId>> taskAssignments,
|
||||
Map<String, ConnectorsAndTasks> revoked,
|
||||
int delay) {
|
||||
int delay, short protocolVersion) {
|
||||
Map<String, ExtendedAssignment> groupAssignment = new HashMap<>();
|
||||
for (String member : members) {
|
||||
Collection<String> connectorsToStart = connectorAssignments.getOrDefault(member, Collections.emptyList());
|
||||
|
|
@ -499,7 +506,7 @@ public class IncrementalCooperativeAssignor implements ConnectAssignor {
|
|||
Collection<String> connectorsToStop = revoked.getOrDefault(member, ConnectorsAndTasks.EMPTY).connectors();
|
||||
Collection<ConnectorTaskId> tasksToStop = revoked.getOrDefault(member, ConnectorsAndTasks.EMPTY).tasks();
|
||||
ExtendedAssignment assignment =
|
||||
new ExtendedAssignment(CONNECT_PROTOCOL_V1, error, leaderId, leaderUrl, maxOffset,
|
||||
new ExtendedAssignment(protocolVersion, error, leaderId, leaderUrl, maxOffset,
|
||||
connectorsToStart, tasksToStart, connectorsToStop, tasksToStop, delay);
|
||||
log.debug("Filling assignment: {} -> {}", member, assignment);
|
||||
groupAssignment.put(member, assignment);
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ import org.apache.kafka.common.protocol.types.Struct;
|
|||
import org.apache.kafka.common.protocol.types.Type;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol;
|
||||
import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection;
|
||||
|
|
@ -42,6 +43,7 @@ import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.URL_K
|
|||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.VERSION_KEY_NAME;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.COMPATIBLE;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.SESSIONED;
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -55,6 +57,7 @@ public class IncrementalCooperativeConnectProtocol {
|
|||
public static final String REVOKED_KEY_NAME = "revoked";
|
||||
public static final String SCHEDULED_DELAY_KEY_NAME = "delay";
|
||||
public static final short CONNECT_PROTOCOL_V1 = 1;
|
||||
public static final short CONNECT_PROTOCOL_V2 = 2;
|
||||
public static final boolean TOLERATE_MISSING_FIELDS_WITH_DEFAULTS = true;
|
||||
|
||||
/**
|
||||
|
|
@ -66,6 +69,19 @@ public class IncrementalCooperativeConnectProtocol {
|
|||
private static final Struct CONNECT_PROTOCOL_HEADER_V1 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
|
||||
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V1);
|
||||
|
||||
/**
|
||||
* Connect Protocol Header V2:
|
||||
* <pre>
|
||||
* Version => Int16
|
||||
* </pre>
|
||||
* The V2 protocol is schematically identical to V1, but is used to signify that internal request
|
||||
* verification and distribution of session keys is enabled (for more information, see KIP-507:
|
||||
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-507%3A+Securing+Internal+Connect+REST+Endpoints)
|
||||
*/
|
||||
private static final Struct CONNECT_PROTOCOL_HEADER_V2 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
|
||||
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V2);
|
||||
|
||||
|
||||
/**
|
||||
* Config State V1:
|
||||
* <pre>
|
||||
|
|
@ -132,17 +148,18 @@ public class IncrementalCooperativeConnectProtocol {
|
|||
* Current Assignment => [Byte]
|
||||
* </pre>
|
||||
*/
|
||||
public static ByteBuffer serializeMetadata(ExtendedWorkerState workerState) {
|
||||
public static ByteBuffer serializeMetadata(ExtendedWorkerState workerState, boolean sessioned) {
|
||||
Struct configState = new Struct(CONFIG_STATE_V1)
|
||||
.set(URL_KEY_NAME, workerState.url())
|
||||
.set(CONFIG_OFFSET_KEY_NAME, workerState.offset());
|
||||
// Not a big issue if we embed the protocol version with the assignment in the metadata
|
||||
Struct allocation = new Struct(ALLOCATION_V1)
|
||||
.set(ALLOCATION_KEY_NAME, serializeAssignment(workerState.assignment()));
|
||||
ByteBuffer buffer = ByteBuffer.allocate(CONNECT_PROTOCOL_HEADER_V1.sizeOf()
|
||||
Struct connectProtocolHeader = sessioned ? CONNECT_PROTOCOL_HEADER_V2 : CONNECT_PROTOCOL_HEADER_V1;
|
||||
ByteBuffer buffer = ByteBuffer.allocate(connectProtocolHeader.sizeOf()
|
||||
+ CONFIG_STATE_V1.sizeOf(configState)
|
||||
+ ALLOCATION_V1.sizeOf(allocation));
|
||||
CONNECT_PROTOCOL_HEADER_V1.writeTo(buffer);
|
||||
connectProtocolHeader.writeTo(buffer);
|
||||
CONFIG_STATE_V1.write(buffer, configState);
|
||||
ALLOCATION_V1.write(buffer, allocation);
|
||||
buffer.flip();
|
||||
|
|
@ -154,18 +171,28 @@ public class IncrementalCooperativeConnectProtocol {
|
|||
* with their serialized metadata. The protocols are ordered by preference.
|
||||
*
|
||||
* @param workerState the current state of the worker metadata
|
||||
* @param sessioned whether the {@link ConnectProtocolCompatibility#SESSIONED} protocol should
|
||||
* be included in the collection of supported protocols
|
||||
* @return the collection of Connect protocol metadata
|
||||
*/
|
||||
public static JoinGroupRequestProtocolCollection metadataRequest(ExtendedWorkerState workerState) {
|
||||
public static JoinGroupRequestProtocolCollection metadataRequest(ExtendedWorkerState workerState, boolean sessioned) {
|
||||
// Order matters in terms of protocol preference
|
||||
return new JoinGroupRequestProtocolCollection(Arrays.asList(
|
||||
new JoinGroupRequestProtocol()
|
||||
List<JoinGroupRequestProtocol> joinGroupRequestProtocols = new ArrayList<>();
|
||||
if (sessioned) {
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(SESSIONED.protocol())
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, true).array())
|
||||
);
|
||||
}
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(COMPATIBLE.protocol())
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState).array()),
|
||||
new JoinGroupRequestProtocol()
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, false).array())
|
||||
);
|
||||
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
|
||||
.setName(EAGER.protocol())
|
||||
.setMetadata(ConnectProtocol.serializeMetadata(workerState).array()))
|
||||
.iterator());
|
||||
.setMetadata(ConnectProtocol.serializeMetadata(workerState).array())
|
||||
);
|
||||
return new JoinGroupRequestProtocolCollection(joinGroupRequestProtocols.iterator());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -157,7 +157,9 @@ public class WorkerCoordinator extends AbstractCoordinator implements Closeable
|
|||
case EAGER:
|
||||
return ConnectProtocol.metadataRequest(workerState);
|
||||
case COMPATIBLE:
|
||||
return IncrementalCooperativeConnectProtocol.metadataRequest(workerState);
|
||||
return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, false);
|
||||
case SESSIONED:
|
||||
return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, true);
|
||||
default:
|
||||
throw new IllegalStateException("Unknown Connect protocol compatibility mode " + protocolCompatibility);
|
||||
}
|
||||
|
|
@ -292,7 +294,7 @@ public class WorkerCoordinator extends AbstractCoordinator implements Closeable
|
|||
* @return the current connect protocol version
|
||||
*/
|
||||
public short currentProtocolVersion() {
|
||||
return currentConnectProtocol == EAGER ? (short) 0 : (short) 1;
|
||||
return currentConnectProtocol.protocolVersion();
|
||||
}
|
||||
|
||||
private class WorkerCoordinatorMetrics {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.eclipse.jetty.client.api.Request;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Objects;
|
||||
|
||||
public class InternalRequestSignature {
|
||||
|
||||
public static final String SIGNATURE_HEADER = "X-Connect-Authorization";
|
||||
public static final String SIGNATURE_ALGORITHM_HEADER = "X-Connect-Request-Signature-Algorithm";
|
||||
|
||||
private final byte[] requestBody;
|
||||
private final Mac mac;
|
||||
private final byte[] requestSignature;
|
||||
|
||||
/**
|
||||
* Add a signature to a request.
|
||||
* @param key the key to sign the request with; may not be null
|
||||
* @param requestBody the body of the request; may not be null
|
||||
* @param signatureAlgorithm the algorithm to use to sign the request; may not be null
|
||||
* @param request the request to add the signature to; may not be null
|
||||
*/
|
||||
public static void addToRequest(SecretKey key, byte[] requestBody, String signatureAlgorithm, Request request) {
|
||||
Mac mac;
|
||||
try {
|
||||
mac = mac(signatureAlgorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
byte[] requestSignature = sign(mac, key, requestBody);
|
||||
request.header(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature))
|
||||
.header(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a signature from a request.
|
||||
* @param requestBody the body of the request; may not be null
|
||||
* @param headers the headers for the request; may be null
|
||||
* @return the signature extracted from the request, or null if one or more request signature
|
||||
* headers was not present
|
||||
*/
|
||||
public static InternalRequestSignature fromHeaders(byte[] requestBody, HttpHeaders headers) {
|
||||
if (headers == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER);
|
||||
String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER);
|
||||
if (signatureAlgorithm == null || encodedSignature == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Mac mac;
|
||||
try {
|
||||
mac = mac(signatureAlgorithm);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
|
||||
byte[] decodedSignature;
|
||||
try {
|
||||
decodedSignature = Base64.getDecoder().decode(encodedSignature);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
|
||||
return new InternalRequestSignature(
|
||||
requestBody,
|
||||
mac,
|
||||
decodedSignature
|
||||
);
|
||||
}
|
||||
|
||||
// Public for testing
|
||||
public InternalRequestSignature(byte[] requestBody, Mac mac, byte[] requestSignature) {
|
||||
this.requestBody = requestBody;
|
||||
this.mac = mac;
|
||||
this.requestSignature = requestSignature;
|
||||
}
|
||||
|
||||
public String keyAlgorithm() {
|
||||
return mac.getAlgorithm();
|
||||
}
|
||||
|
||||
public boolean isValid(SecretKey key) {
|
||||
return Arrays.equals(sign(mac, key, requestBody), requestSignature);
|
||||
}
|
||||
|
||||
private static Mac mac(String signatureAlgorithm) throws NoSuchAlgorithmException {
|
||||
return Mac.getInstance(signatureAlgorithm);
|
||||
}
|
||||
|
||||
private static byte[] sign(Mac mac, SecretKey key, byte[] requestBody) {
|
||||
try {
|
||||
mac.init(key);
|
||||
} catch (InvalidKeyException e) {
|
||||
throw new ConnectException(e);
|
||||
}
|
||||
return mac.doFinal(requestBody);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
InternalRequestSignature that = (InternalRequestSignature) o;
|
||||
return Arrays.equals(requestBody, that.requestBody)
|
||||
&& mac.getAlgorithm().equals(that.mac.getAlgorithm())
|
||||
&& mac.getMacLength() == that.mac.getMacLength()
|
||||
&& mac.getProvider().equals(that.mac.getProvider())
|
||||
&& Arrays.equals(requestSignature, that.requestSignature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Objects.hash(mac);
|
||||
result = 31 * result + Arrays.hashCode(requestBody);
|
||||
result = 31 * result + Arrays.hashCode(requestSignature);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -19,7 +19,10 @@ package org.apache.kafka.connect.runtime.rest;
|
|||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ErrorMessage;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException;
|
||||
|
|
@ -59,6 +62,27 @@ public class RestClient {
|
|||
*/
|
||||
public static <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
|
||||
TypeReference<T> responseFormat, WorkerConfig config) {
|
||||
return httpRequest(url, method, headers, requestBodyData, responseFormat, config, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends HTTP request to remote REST server
|
||||
*
|
||||
* @param url HTTP connection will be established with this url.
|
||||
* @param method HTTP method ("GET", "POST", "PUT", etc.)
|
||||
* @param headers HTTP headers from REST endpoint
|
||||
* @param requestBodyData Object to serialize as JSON and send in the request body.
|
||||
* @param responseFormat Expected format of the response to the HTTP request.
|
||||
* @param <T> The type of the deserialized response to the HTTP request.
|
||||
* @param sessionKey The key to sign the request with (intended for internal requests only);
|
||||
* may be null if the request doesn't need to be signed
|
||||
* @param requestSignatureAlgorithm The algorithm to sign the request with (intended for internal requests only);
|
||||
* may be null if the request doesn't need to be signed
|
||||
* @return The deserialized response to the HTTP request, or null if no data is expected.
|
||||
*/
|
||||
public static <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
|
||||
TypeReference<T> responseFormat, WorkerConfig config,
|
||||
SecretKey sessionKey, String requestSignatureAlgorithm) {
|
||||
HttpClient client;
|
||||
|
||||
if (url.startsWith("https://")) {
|
||||
|
|
@ -88,6 +112,14 @@ public class RestClient {
|
|||
|
||||
if (serializedBody != null) {
|
||||
req.content(new StringContentProvider(serializedBody, StandardCharsets.UTF_8), "application/json");
|
||||
if (sessionKey != null && requestSignatureAlgorithm != null) {
|
||||
InternalRequestSignature.addToRequest(
|
||||
sessionKey,
|
||||
serializedBody.getBytes(StandardCharsets.UTF_8),
|
||||
requestSignatureAlgorithm,
|
||||
req
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ContentResponse res = req.send();
|
||||
|
|
@ -111,12 +143,11 @@ public class RestClient {
|
|||
log.error("IO error forwarding REST request: ", e);
|
||||
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR, "IO Error trying to forward REST request: " + e.getMessage(), e);
|
||||
} finally {
|
||||
if (client != null)
|
||||
try {
|
||||
client.stop();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to stop HTTP client", e);
|
||||
}
|
||||
try {
|
||||
client.stop();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to stop HTTP client", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,12 +19,15 @@ package org.apache.kafka.connect.runtime.rest.resources;
|
|||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.connect.errors.NotFoundException;
|
||||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.Herder;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.distributed.RebalanceNeededException;
|
||||
import org.apache.kafka.connect.runtime.distributed.RequestTargetException;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.RestClient;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
|
|
@ -66,6 +69,8 @@ import java.util.concurrent.TimeoutException;
|
|||
@Consumes(MediaType.APPLICATION_JSON)
|
||||
public class ConnectorsResource {
|
||||
private static final Logger log = LoggerFactory.getLogger(ConnectorsResource.class);
|
||||
private static final TypeReference<List<Map<String, String>>> TASK_CONFIGS_TYPE =
|
||||
new TypeReference<List<Map<String, String>>>() { };
|
||||
|
||||
// TODO: This should not be so long. However, due to potentially long rebalances that may have to wait a full
|
||||
// session timeout to complete, during which we cannot serve some requests. Ideally we could reduce this, but
|
||||
|
|
@ -230,9 +235,10 @@ public class ConnectorsResource {
|
|||
public void putTaskConfigs(final @PathParam("connector") String connector,
|
||||
final @Context HttpHeaders headers,
|
||||
final @QueryParam("forward") Boolean forward,
|
||||
final List<Map<String, String>> taskConfigs) throws Throwable {
|
||||
final byte[] requestBody) throws Throwable {
|
||||
List<Map<String, String>> taskConfigs = new ObjectMapper().readValue(requestBody, TASK_CONFIGS_TYPE);
|
||||
FutureCallback<Void> cb = new FutureCallback<>();
|
||||
herder.putTaskConfigs(connector, taskConfigs, cb);
|
||||
herder.putTaskConfigs(connector, taskConfigs, cb, InternalRequestSignature.fromHeaders(requestBody, headers));
|
||||
completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "POST", headers, taskConfigs, forward);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,11 +24,13 @@ import org.apache.kafka.connect.runtime.AbstractHerder;
|
|||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.HerderConnectorContext;
|
||||
import org.apache.kafka.connect.runtime.HerderRequest;
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.SinkConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.SourceConnectorConfig;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.runtime.Worker;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
|
||||
import org.apache.kafka.connect.storage.ConfigBackingStore;
|
||||
|
|
@ -241,7 +243,7 @@ public class StandaloneHerder extends AbstractHerder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback) {
|
||||
public void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback, InternalRequestSignature requestSignature) {
|
||||
throw new UnsupportedOperationException("Kafka Connect in standalone mode does not support externally setting task configurations.");
|
||||
}
|
||||
|
||||
|
|
@ -379,6 +381,11 @@ public class StandaloneHerder extends AbstractHerder {
|
|||
updateConnectorTasks(connector);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSessionKeyUpdate(SessionKey sessionKey) {
|
||||
// no-op
|
||||
}
|
||||
}
|
||||
|
||||
static class StandaloneHerderRequest implements HerderRequest {
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
import org.apache.kafka.connect.util.ConnectorTaskId;
|
||||
|
|
@ -88,6 +89,8 @@ public interface ConfigBackingStore {
|
|||
*/
|
||||
void putTargetState(String connector, TargetState state);
|
||||
|
||||
void putSessionKey(SessionKey sessionKey);
|
||||
|
||||
/**
|
||||
* Set an update listener to get notifications when there are config/target state
|
||||
* changes.
|
||||
|
|
@ -119,6 +122,12 @@ public interface ConfigBackingStore {
|
|||
* @param connector name of the connector
|
||||
*/
|
||||
void onConnectorTargetStateChange(String connector);
|
||||
|
||||
/**
|
||||
* Invoked when the leader has distributed a new session key
|
||||
* @param sessionKey the {@link SessionKey session key}
|
||||
*/
|
||||
void onSessionKeyUpdate(SessionKey sessionKey);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ import org.apache.kafka.connect.data.SchemaBuilder;
|
|||
import org.apache.kafka.connect.data.Struct;
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.errors.DataException;
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
|
||||
|
|
@ -45,8 +46,10 @@ import org.apache.kafka.connect.util.TopicAdmin;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
|
@ -176,6 +179,8 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
return COMMIT_TASKS_PREFIX + connectorName;
|
||||
}
|
||||
|
||||
public static final String SESSION_KEY_KEY = "session-key";
|
||||
|
||||
// Note that while using real serialization for values as we have here, but ad hoc string serialization for keys,
|
||||
// isn't ideal, we use this approach because it avoids any potential problems with schema evolution or
|
||||
// converter/serializer changes causing keys to change. We need to absolutely ensure that the keys remain precisely
|
||||
|
|
@ -190,6 +195,13 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
public static final Schema TARGET_STATE_V0 = SchemaBuilder.struct()
|
||||
.field("state", Schema.STRING_SCHEMA)
|
||||
.build();
|
||||
// The key is logically a byte array, but we can't use the JSON converter to (de-)serialize that without a schema.
|
||||
// So instead, we base 64-encode it before serializing and decode it after deserializing.
|
||||
public static final Schema SESSION_KEY_V0 = SchemaBuilder.struct()
|
||||
.field("key", Schema.STRING_SCHEMA)
|
||||
.field("algorithm", Schema.STRING_SCHEMA)
|
||||
.field("creation-timestamp", Schema.INT64_SCHEMA)
|
||||
.build();
|
||||
|
||||
private static final long READ_TO_END_TIMEOUT_MS = 30000;
|
||||
|
||||
|
|
@ -216,6 +228,8 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
// The most recently read offset. This does not take into account deferred task updates/commits, so we may have
|
||||
// outstanding data to be applied.
|
||||
private volatile long offset;
|
||||
// The most recently read session key, to use for validating internal REST requests.
|
||||
private volatile SessionKey sessionKey;
|
||||
|
||||
// Connector -> Map[ConnectorTaskId -> Configs]
|
||||
private final Map<String, Map<ConnectorTaskId, Map<String, String>>> deferredTaskUpdates = new HashMap<>();
|
||||
|
|
@ -270,6 +284,7 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
// immutable configs
|
||||
return new ClusterConfigState(
|
||||
offset,
|
||||
sessionKey,
|
||||
new HashMap<>(connectorTaskCounts),
|
||||
new HashMap<>(connectorConfigs),
|
||||
new HashMap<>(connectorTargetStates),
|
||||
|
|
@ -409,6 +424,23 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
configLog.send(TARGET_STATE_KEY(connector), serializedTargetState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putSessionKey(SessionKey sessionKey) {
|
||||
log.debug("Distributing new session key");
|
||||
Struct sessionKeyStruct = new Struct(SESSION_KEY_V0);
|
||||
sessionKeyStruct.put("key", Base64.getEncoder().encodeToString(sessionKey.key().getEncoded()));
|
||||
sessionKeyStruct.put("algorithm", sessionKey.key().getAlgorithm());
|
||||
sessionKeyStruct.put("creation-timestamp", sessionKey.creationTimestamp());
|
||||
byte[] serializedSessionKey = converter.fromConnectData(topic, SESSION_KEY_V0, sessionKeyStruct);
|
||||
try {
|
||||
configLog.send(SESSION_KEY_KEY, serializedSessionKey);
|
||||
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException | ExecutionException | TimeoutException e) {
|
||||
log.error("Failed to write session key to Kafka: ", e);
|
||||
throw new ConnectException("Error writing session key to Kafka", e);
|
||||
}
|
||||
}
|
||||
|
||||
// package private for testing
|
||||
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, final WorkerConfig config) {
|
||||
Map<String, Object> originals = config.originals();
|
||||
|
|
@ -635,6 +667,45 @@ public class KafkaConfigBackingStore implements ConfigBackingStore {
|
|||
|
||||
if (started)
|
||||
updateListener.onTaskConfigUpdate(updatedTasks);
|
||||
} else if (record.key().equals(SESSION_KEY_KEY)) {
|
||||
synchronized (lock) {
|
||||
if (value.value() == null) {
|
||||
log.error("Ignoring session key because it is unexpectedly null");
|
||||
return;
|
||||
}
|
||||
if (!(value.value() instanceof Map)) {
|
||||
log.error("Ignoring session key because the value is not a Map but is {}", value.value().getClass());
|
||||
return;
|
||||
}
|
||||
|
||||
Map<String, Object> valueAsMap = (Map<String, Object>) value.value();
|
||||
|
||||
Object sessionKey = valueAsMap.get("key");
|
||||
if (!(sessionKey instanceof String)) {
|
||||
log.error("Invalid data for session key 'key' field should be a String but is {}", sessionKey.getClass());
|
||||
return;
|
||||
}
|
||||
byte[] key = Base64.getDecoder().decode((String) sessionKey);
|
||||
|
||||
Object keyAlgorithm = valueAsMap.get("algorithm");
|
||||
if (!(keyAlgorithm instanceof String)) {
|
||||
log.error("Invalid data for session key 'algorithm' field should be a String but it is {}", keyAlgorithm.getClass());
|
||||
return;
|
||||
}
|
||||
|
||||
Object creationTimestamp = valueAsMap.get("creation-timestamp");
|
||||
if (!(creationTimestamp instanceof Long)) {
|
||||
log.error("Invalid data for session key 'creation-timestamp' field should be a long but it is {}", creationTimestamp.getClass());
|
||||
return;
|
||||
}
|
||||
KafkaConfigBackingStore.this.sessionKey = new SessionKey(
|
||||
new SecretKeySpec(key, (String) keyAlgorithm),
|
||||
(long) creationTimestamp
|
||||
);
|
||||
|
||||
if (started)
|
||||
updateListener.onSessionKeyUpdate(KafkaConfigBackingStore.this.sessionKey);
|
||||
}
|
||||
} else {
|
||||
log.error("Discarding config update record with invalid key: {}", record.key());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.kafka.connect.storage;
|
||||
|
||||
import org.apache.kafka.connect.runtime.SessionKey;
|
||||
import org.apache.kafka.connect.runtime.TargetState;
|
||||
import org.apache.kafka.connect.runtime.WorkerConfigTransformer;
|
||||
import org.apache.kafka.connect.runtime.distributed.ClusterConfigState;
|
||||
|
|
@ -68,6 +69,7 @@ public class MemoryConfigBackingStore implements ConfigBackingStore {
|
|||
|
||||
return new ClusterConfigState(
|
||||
ClusterConfigState.NO_OFFSET,
|
||||
null,
|
||||
connectorTaskCounts,
|
||||
connectorConfigs,
|
||||
connectorTargetStates,
|
||||
|
|
@ -143,6 +145,11 @@ public class MemoryConfigBackingStore implements ConfigBackingStore {
|
|||
updateListener.onConnectorTargetStateChange(connector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putSessionKey(SessionKey sessionKey) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setUpdateListener(UpdateListener listener) {
|
||||
this.updateListener = listener;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.connect.integration;
|
||||
|
||||
import org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility;
|
||||
import org.apache.kafka.connect.storage.StringConverter;
|
||||
import org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster;
|
||||
import org.apache.kafka.test.IntegrationTest;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static javax.ws.rs.core.Response.Status.BAD_REQUEST;
|
||||
import static javax.ws.rs.core.Response.Status.FORBIDDEN;
|
||||
import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.SinkConnectorConfig.TOPICS_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.distributed.DistributedConfig.CONNECT_PROTOCOL_CONFIG;
|
||||
import static org.apache.kafka.connect.runtime.rest.InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER;
|
||||
import static org.apache.kafka.connect.runtime.rest.InternalRequestSignature.SIGNATURE_HEADER;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* A simple integration test to ensure that internal request validation becomes enabled with the
|
||||
* "sessioned" protocol.
|
||||
*/
|
||||
@Category(IntegrationTest.class)
|
||||
public class SessionedProtocolIntegrationTest {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(SessionedProtocolIntegrationTest.class);
|
||||
|
||||
private static final String CONNECTOR_NAME = "connector";
|
||||
private static final long CONNECTOR_SETUP_DURATION_MS = 60000;
|
||||
|
||||
private EmbeddedConnectCluster connect;
|
||||
private ConnectorHandle connectorHandle;
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
// setup Connect worker properties
|
||||
Map<String, String> workerProps = new HashMap<>();
|
||||
workerProps.put(CONNECT_PROTOCOL_CONFIG, ConnectProtocolCompatibility.SESSIONED.protocol());
|
||||
|
||||
// build a Connect cluster backed by Kafka and Zk
|
||||
connect = new EmbeddedConnectCluster.Builder()
|
||||
.name("connect-cluster")
|
||||
.numWorkers(2)
|
||||
.numBrokers(1)
|
||||
.workerProps(workerProps)
|
||||
.build();
|
||||
|
||||
// start the clusters
|
||||
connect.start();
|
||||
|
||||
// get a handle to the connector
|
||||
connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
|
||||
}
|
||||
|
||||
@After
|
||||
public void close() {
|
||||
// stop all Connect, Kafka and Zk threads.
|
||||
connect.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
// TODO: This test runs fine locally but fails on Jenkins. Ignoring for now, should revisit when
|
||||
// possible.
|
||||
public void ensureInternalEndpointIsSecured() throws Throwable {
|
||||
final String connectorTasksEndpoint = connect.endpointForResource(String.format(
|
||||
"connectors/%s/tasks",
|
||||
CONNECTOR_NAME
|
||||
));
|
||||
final Map<String, String> emptyHeaders = new HashMap<>();
|
||||
final Map<String, String> invalidSignatureHeaders = new HashMap<>();
|
||||
invalidSignatureHeaders.put(SIGNATURE_HEADER, "S2Fma2Flc3F1ZQ==");
|
||||
invalidSignatureHeaders.put(SIGNATURE_ALGORITHM_HEADER, "HmacSHA256");
|
||||
|
||||
// We haven't created the connector yet, but this should still return a 400 instead of a 404
|
||||
// if the endpoint is secured
|
||||
log.info(
|
||||
"Making a POST request to the {} endpoint with no connector started and no signature header; "
|
||||
+ "expecting 400 error response",
|
||||
connectorTasksEndpoint
|
||||
);
|
||||
assertEquals(
|
||||
BAD_REQUEST.getStatusCode(),
|
||||
connect.executePost(connectorTasksEndpoint, "[]", emptyHeaders)
|
||||
);
|
||||
|
||||
// Try again, but with an invalid signature
|
||||
log.info(
|
||||
"Making a POST request to the {} endpoint with no connector started and an invalid signature header; "
|
||||
+ "expecting 403 error response",
|
||||
connectorTasksEndpoint
|
||||
);
|
||||
assertEquals(
|
||||
FORBIDDEN.getStatusCode(),
|
||||
connect.executePost(connectorTasksEndpoint, "[]", invalidSignatureHeaders)
|
||||
);
|
||||
|
||||
// Create the connector now
|
||||
// setup up props for the sink connector
|
||||
Map<String, String> connectorProps = new HashMap<>();
|
||||
connectorProps.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName());
|
||||
connectorProps.put(TASKS_MAX_CONFIG, String.valueOf(1));
|
||||
connectorProps.put(TOPICS_CONFIG, "test-topic");
|
||||
connectorProps.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
|
||||
connectorProps.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
|
||||
|
||||
// start a sink connector
|
||||
log.info("Starting the {} connector", CONNECTOR_NAME);
|
||||
StartAndStopLatch startLatch = connectorHandle.expectedStarts(1);
|
||||
connect.configureConnector(CONNECTOR_NAME, connectorProps);
|
||||
startLatch.await(CONNECTOR_SETUP_DURATION_MS, TimeUnit.MILLISECONDS);
|
||||
|
||||
|
||||
// Verify the exact same behavior, after starting the connector
|
||||
|
||||
// We haven't created the connector yet, but this should still return a 400 instead of a 404
|
||||
// if the endpoint is secured
|
||||
log.info(
|
||||
"Making a POST request to the {} endpoint with the connector started and no signature header; "
|
||||
+ "expecting 400 error response",
|
||||
connectorTasksEndpoint
|
||||
);
|
||||
assertEquals(
|
||||
BAD_REQUEST.getStatusCode(),
|
||||
connect.executePost(connectorTasksEndpoint, "[]", emptyHeaders)
|
||||
);
|
||||
|
||||
// Try again, but with an invalid signature
|
||||
log.info(
|
||||
"Making a POST request to the {} endpoint with the connector started and an invalid signature header; "
|
||||
+ "expecting 403 error response",
|
||||
connectorTasksEndpoint
|
||||
);
|
||||
assertEquals(
|
||||
FORBIDDEN.getStatusCode(),
|
||||
connect.executePost(connectorTasksEndpoint, "[]", invalidSignatureHeaders)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -117,10 +117,10 @@ public class AbstractHerderTest {
|
|||
TASK_CONFIGS_MAP.put(TASK1, TASK_CONFIG);
|
||||
TASK_CONFIGS_MAP.put(TASK2, TASK_CONFIG);
|
||||
}
|
||||
private static final ClusterConfigState SNAPSHOT = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
private static final ClusterConfigState SNAPSHOT = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG), Collections.singletonMap(CONN1, TargetState.STARTED),
|
||||
TASK_CONFIGS_MAP, Collections.<String>emptySet());
|
||||
private static final ClusterConfigState SNAPSHOT_NO_TASKS = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
private static final ClusterConfigState SNAPSHOT_NO_TASKS = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG), Collections.singletonMap(CONN1, TargetState.STARTED),
|
||||
Collections.emptyMap(), Collections.<String>emptySet());
|
||||
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ public class WorkerTestUtils {
|
|||
int taskNum) {
|
||||
return new ClusterConfigState(
|
||||
offset,
|
||||
null,
|
||||
connectorTaskCounts(1, connectorNum, taskNum),
|
||||
connectorConfigs(1, connectorNum),
|
||||
connectorTargetStates(1, connectorNum, TargetState.STARTED),
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ public class ConnectProtocolCompatibilityTest {
|
|||
configStorage = mock(KafkaConfigBackingStore.class);
|
||||
configState = new ClusterConfigState(
|
||||
1L,
|
||||
null,
|
||||
Collections.singletonMap(connectorId1, 1),
|
||||
Collections.singletonMap(connectorId1, new HashMap<>()),
|
||||
Collections.singletonMap(connectorId1, TargetState.STARTED),
|
||||
|
|
@ -89,18 +90,40 @@ public class ConnectProtocolCompatibilityTest {
|
|||
public void testCoopToCoopMetadata() {
|
||||
when(configStorage.snapshot()).thenReturn(configState);
|
||||
ExtendedWorkerState workerState = new ExtendedWorkerState(LEADER_URL, configStorage.snapshot().offset(), null);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, false);
|
||||
ExtendedWorkerState state = IncrementalCooperativeConnectProtocol.deserializeMetadata(metadata);
|
||||
assertEquals(LEADER_URL, state.url());
|
||||
assertEquals(1, state.offset());
|
||||
verify(configStorage).snapshot();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSessionedToCoopMetadata() {
|
||||
when(configStorage.snapshot()).thenReturn(configState);
|
||||
ExtendedWorkerState workerState = new ExtendedWorkerState(LEADER_URL, configStorage.snapshot().offset(), null);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, true);
|
||||
ExtendedWorkerState state = IncrementalCooperativeConnectProtocol.deserializeMetadata(metadata);
|
||||
assertEquals(LEADER_URL, state.url());
|
||||
assertEquals(1, state.offset());
|
||||
verify(configStorage).snapshot();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSessionedToEagerMetadata() {
|
||||
when(configStorage.snapshot()).thenReturn(configState);
|
||||
ExtendedWorkerState workerState = new ExtendedWorkerState(LEADER_URL, configStorage.snapshot().offset(), null);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, true);
|
||||
ConnectProtocol.WorkerState state = ConnectProtocol.deserializeMetadata(metadata);
|
||||
assertEquals(LEADER_URL, state.url());
|
||||
assertEquals(1, state.offset());
|
||||
verify(configStorage).snapshot();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCoopToEagerMetadata() {
|
||||
when(configStorage.snapshot()).thenReturn(configState);
|
||||
ExtendedWorkerState workerState = new ExtendedWorkerState(LEADER_URL, configStorage.snapshot().offset(), null);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState);
|
||||
ByteBuffer metadata = IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, false);
|
||||
ConnectProtocol.WorkerState state = ConnectProtocol.deserializeMetadata(metadata);
|
||||
assertEquals(LEADER_URL, state.url());
|
||||
assertEquals(1, state.offset());
|
||||
|
|
|
|||
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.distributed;
|
||||
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
|
||||
public class DistributedConfigTest {
|
||||
|
||||
public Map<String, String> configs() {
|
||||
Map<String, String> result = new HashMap<>();
|
||||
result.put(DistributedConfig.GROUP_ID_CONFIG, "connect-cluster");
|
||||
result.put(DistributedConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
|
||||
result.put(DistributedConfig.CONFIG_TOPIC_CONFIG, "connect-configs");
|
||||
result.put(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "connect-offsets");
|
||||
result.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "connect-status");
|
||||
result.put(DistributedConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
|
||||
result.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
|
||||
return result;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateKeyGeneratorWithDefaultSettings() {
|
||||
DistributedConfig config = new DistributedConfig(configs());
|
||||
assertNotNull(config.getInternalRequestKeyGenerator());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldCreateKeyGeneratorWithSpecificSettings() {
|
||||
final String algorithm = "HmacSHA1";
|
||||
Map<String, String> configs = configs();
|
||||
configs.put(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, algorithm);
|
||||
configs.put(DistributedConfig.INTER_WORKER_KEY_SIZE_CONFIG, "512");
|
||||
configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, algorithm);
|
||||
DistributedConfig config = new DistributedConfig(configs);
|
||||
KeyGenerator keyGenerator = config.getInternalRequestKeyGenerator();
|
||||
assertNotNull(keyGenerator);
|
||||
assertEquals(algorithm, keyGenerator.getAlgorithm());
|
||||
assertEquals(512 / 8, keyGenerator.generateKey().getEncoded().length);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void shouldFailWithEmptyListOfVerificationAlgorithms() {
|
||||
Map<String, String> configs = configs();
|
||||
configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, "");
|
||||
new DistributedConfig(configs);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void shouldFailIfKeyAlgorithmNotInVerificationAlgorithmsList() {
|
||||
Map<String, String> configs = configs();
|
||||
configs.put(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, "HmacSHA1");
|
||||
configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, "HmacSHA256");
|
||||
new DistributedConfig(configs);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void shouldFailWithInvalidKeyAlgorithm() {
|
||||
Map<String, String> configs = configs();
|
||||
configs.put(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, "not-actually-a-key-algorithm");
|
||||
new DistributedConfig(configs);
|
||||
}
|
||||
|
||||
@Test(expected = ConfigException.class)
|
||||
public void shouldFailWithInvalidKeySize() {
|
||||
Map<String, String> configs = configs();
|
||||
configs.put(DistributedConfig.INTER_WORKER_KEY_SIZE_CONFIG, "0");
|
||||
new DistributedConfig(configs);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldValidateAllVerificationAlgorithms() {
|
||||
List<String> algorithms =
|
||||
new ArrayList<>(Arrays.asList("HmacSHA1", "HmacSHA256", "HmacMD5", "bad-algorithm"));
|
||||
Map<String, String> configs = configs();
|
||||
for (int i = 0; i < algorithms.size(); i++) {
|
||||
configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, String.join(",", algorithms));
|
||||
assertThrows(ConfigException.class, () -> new DistributedConfig(configs));
|
||||
algorithms.add(algorithms.remove(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -41,10 +41,12 @@ import org.apache.kafka.connect.runtime.distributed.DistributedHerder.HerderMetr
|
|||
import org.apache.kafka.connect.runtime.isolation.DelegatingClassLoader;
|
||||
import org.apache.kafka.connect.runtime.isolation.PluginClassLoader;
|
||||
import org.apache.kafka.connect.runtime.isolation.Plugins;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorType;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException;
|
||||
import org.apache.kafka.connect.sink.SinkConnector;
|
||||
import org.apache.kafka.connect.source.SourceConnector;
|
||||
import org.apache.kafka.connect.source.SourceTask;
|
||||
|
|
@ -78,16 +80,19 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static javax.ws.rs.core.Response.Status.FORBIDDEN;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.CONNECT_PROTOCOL_V0;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest({DistributedHerder.class, Plugins.class})
|
||||
@PowerMockIgnore("javax.management.*")
|
||||
@PowerMockIgnore({"javax.management.*", "javax.crypto.*"})
|
||||
public class DistributedHerderTest {
|
||||
private static final Map<String, String> HERDER_CONFIG = new HashMap<>();
|
||||
static {
|
||||
|
|
@ -144,13 +149,13 @@ public class DistributedHerderTest {
|
|||
TASK_CONFIGS_MAP.put(TASK1, TASK_CONFIG);
|
||||
TASK_CONFIGS_MAP.put(TASK2, TASK_CONFIG);
|
||||
}
|
||||
private static final ClusterConfigState SNAPSHOT = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
private static final ClusterConfigState SNAPSHOT = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG), Collections.singletonMap(CONN1, TargetState.STARTED),
|
||||
TASK_CONFIGS_MAP, Collections.<String>emptySet());
|
||||
private static final ClusterConfigState SNAPSHOT_PAUSED_CONN1 = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
private static final ClusterConfigState SNAPSHOT_PAUSED_CONN1 = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG), Collections.singletonMap(CONN1, TargetState.PAUSED),
|
||||
TASK_CONFIGS_MAP, Collections.<String>emptySet());
|
||||
private static final ClusterConfigState SNAPSHOT_UPDATED_CONN1_CONFIG = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
private static final ClusterConfigState SNAPSHOT_UPDATED_CONN1_CONFIG = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG_UPDATED), Collections.singletonMap(CONN1, TargetState.STARTED),
|
||||
TASK_CONFIGS_MAP, Collections.<String>emptySet());
|
||||
|
||||
|
|
@ -1490,7 +1495,7 @@ public class DistributedHerderTest {
|
|||
EasyMock.expect(configTransformer.transform(EasyMock.eq(CONN1), EasyMock.anyObject()))
|
||||
.andThrow(new AssertionError("Config transformation should not occur when requesting connector or task info"));
|
||||
EasyMock.replay(configTransformer);
|
||||
ClusterConfigState snapshotWithTransform = new ClusterConfigState(1, Collections.singletonMap(CONN1, 3),
|
||||
ClusterConfigState snapshotWithTransform = new ClusterConfigState(1, null, Collections.singletonMap(CONN1, 3),
|
||||
Collections.singletonMap(CONN1, CONN1_CONFIG), Collections.singletonMap(CONN1, TargetState.STARTED),
|
||||
TASK_CONFIGS_MAP, Collections.<String>emptySet(), configTransformer);
|
||||
|
||||
|
|
@ -1627,6 +1632,131 @@ public class DistributedHerderTest {
|
|||
PowerMock.verifyAll();
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testPutTaskConfigsSignatureNotRequiredV0() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
|
||||
member.wakeup();
|
||||
EasyMock.expectLastCall().once();
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V0).anyTimes();
|
||||
PowerMock.replayAll(taskConfigCb);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, null);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
@Test
|
||||
public void testPutTaskConfigsSignatureNotRequiredV1() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
|
||||
member.wakeup();
|
||||
EasyMock.expectLastCall().once();
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V1).anyTimes();
|
||||
PowerMock.replayAll(taskConfigCb);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, null);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutTaskConfigsMissingRequiredSignature() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
Capture<Throwable> errorCapture = Capture.newInstance();
|
||||
taskConfigCb.onCompletion(EasyMock.capture(errorCapture), EasyMock.eq(null));
|
||||
EasyMock.expectLastCall().once();
|
||||
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V2).anyTimes();
|
||||
PowerMock.replayAll(taskConfigCb);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, null);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
assertTrue(errorCapture.getValue() instanceof BadRequestException);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutTaskConfigsDisallowedSignatureAlgorithm() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
Capture<Throwable> errorCapture = Capture.newInstance();
|
||||
taskConfigCb.onCompletion(EasyMock.capture(errorCapture), EasyMock.eq(null));
|
||||
EasyMock.expectLastCall().once();
|
||||
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V2).anyTimes();
|
||||
|
||||
InternalRequestSignature signature = EasyMock.mock(InternalRequestSignature.class);
|
||||
EasyMock.expect(signature.keyAlgorithm()).andReturn("HmacSHA489").anyTimes();
|
||||
|
||||
PowerMock.replayAll(taskConfigCb, signature);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
assertTrue(errorCapture.getValue() instanceof BadRequestException);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutTaskConfigsInvalidSignature() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
Capture<Throwable> errorCapture = Capture.newInstance();
|
||||
taskConfigCb.onCompletion(EasyMock.capture(errorCapture), EasyMock.eq(null));
|
||||
EasyMock.expectLastCall().once();
|
||||
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V2).anyTimes();
|
||||
|
||||
InternalRequestSignature signature = EasyMock.mock(InternalRequestSignature.class);
|
||||
EasyMock.expect(signature.keyAlgorithm()).andReturn("HmacSHA256").anyTimes();
|
||||
EasyMock.expect(signature.isValid(EasyMock.anyObject())).andReturn(false).anyTimes();
|
||||
|
||||
PowerMock.replayAll(taskConfigCb, signature);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
assertTrue(errorCapture.getValue() instanceof ConnectRestException);
|
||||
assertEquals(FORBIDDEN.getStatusCode(), ((ConnectRestException) errorCapture.getValue()).statusCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutTaskConfigsValidRequiredSignature() {
|
||||
Callback<Void> taskConfigCb = EasyMock.mock(Callback.class);
|
||||
|
||||
member.wakeup();
|
||||
EasyMock.expectLastCall().once();
|
||||
EasyMock.expect(member.currentProtocolVersion()).andReturn(CONNECT_PROTOCOL_V2).anyTimes();
|
||||
|
||||
InternalRequestSignature signature = EasyMock.mock(InternalRequestSignature.class);
|
||||
EasyMock.expect(signature.keyAlgorithm()).andReturn("HmacSHA256").anyTimes();
|
||||
EasyMock.expect(signature.isValid(EasyMock.anyObject())).andReturn(true).anyTimes();
|
||||
|
||||
PowerMock.replayAll(taskConfigCb, signature);
|
||||
|
||||
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyExceptionDetection() {
|
||||
assertFalse(herder.isPossibleExpiredKeyException(
|
||||
time.milliseconds(),
|
||||
new RuntimeException()
|
||||
));
|
||||
assertFalse(herder.isPossibleExpiredKeyException(
|
||||
time.milliseconds(),
|
||||
new BadRequestException("")
|
||||
));
|
||||
assertFalse(herder.isPossibleExpiredKeyException(
|
||||
time.milliseconds() - TimeUnit.MINUTES.toMillis(2),
|
||||
new ConnectRestException(FORBIDDEN.getStatusCode(), "")
|
||||
));
|
||||
assertTrue(herder.isPossibleExpiredKeyException(
|
||||
time.milliseconds(),
|
||||
new ConnectRestException(FORBIDDEN.getStatusCode(), "")
|
||||
));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInconsistentConfigs() {
|
||||
// FIXME: if we have inconsistent configs, we need to request forced reconfig + write of the connector's task configs
|
||||
|
|
|
|||
|
|
@ -47,12 +47,15 @@ import java.util.stream.Collectors;
|
|||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2;
|
||||
import static org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad;
|
||||
import static org.hamcrest.CoreMatchers.hasItems;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.runners.Parameterized.Parameter;
|
||||
import static org.junit.runners.Parameterized.Parameters;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
|
|
@ -71,6 +74,14 @@ public class IncrementalCooperativeAssignorTest {
|
|||
@Captor
|
||||
ArgumentCaptor<Map<String, ExtendedAssignment>> assignmentsCapture;
|
||||
|
||||
@Parameters
|
||||
public static Iterable<?> mode() {
|
||||
return Arrays.asList(new Object[][] {{CONNECT_PROTOCOL_V1, CONNECT_PROTOCOL_V2}});
|
||||
}
|
||||
|
||||
@Parameter
|
||||
public short protocolVersion;
|
||||
|
||||
private ClusterConfigState configState;
|
||||
private Map<String, ExtendedWorkerState> memberConfigs;
|
||||
private Map<String, ExtendedWorkerState> expectedMemberConfigs;
|
||||
|
|
@ -115,7 +126,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
doReturn(Collections.EMPTY_MAP).when(assignor).serializeAssignments(assignmentsCapture.capture());
|
||||
|
||||
// First assignment with 1 worker and 2 connectors configured but not yet assigned
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -127,7 +138,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -138,7 +149,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Third assignment after revocations
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -149,7 +160,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// A fourth rebalance should not change assignments
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -172,7 +183,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// First assignment with 2 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -186,7 +197,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
applyAssignments(returnedAssignments);
|
||||
assignments.remove("worker2");
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay, returnedAssignments);
|
||||
|
|
@ -200,7 +211,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// been reached yet
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay / 2, returnedAssignments);
|
||||
|
|
@ -213,7 +224,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Fourth assignment after delay expired
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -236,7 +247,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// First assignment with 2 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -250,7 +261,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
applyAssignments(returnedAssignments);
|
||||
assignments.remove("worker2");
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay, returnedAssignments);
|
||||
|
|
@ -264,7 +275,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// been reached yet
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay / 2, returnedAssignments);
|
||||
|
|
@ -279,7 +290,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay / 4, returnedAssignments);
|
||||
|
|
@ -293,7 +304,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// assignments ought to be assigned to the worker that has appeared as returned.
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -317,7 +328,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// First assignment with 3 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
memberConfigs.put("worker3", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -338,7 +349,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// Capture needs to be reset to point to the new assignor
|
||||
doReturn(Collections.EMPTY_MAP).when(assignor).serializeAssignments(assignmentsCapture.capture());
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -349,7 +360,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Third (incidental) assignment with still only one worker in the group.
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
expectedMemberConfigs = memberConfigs(leader, offset, returnedAssignments);
|
||||
|
|
@ -372,7 +383,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// First assignment with 3 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
memberConfigs.put("worker3", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -393,7 +404,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// Capture needs to be reset to point to the new assignor
|
||||
doReturn(Collections.EMPTY_MAP).when(assignor).serializeAssignments(assignmentsCapture.capture());
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -407,7 +418,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
memberConfigs.put("worker1", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
expectedMemberConfigs = memberConfigs(leader, offset, returnedAssignments);
|
||||
|
|
@ -417,7 +428,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Fourth assignment after revocations
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -442,7 +453,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// First assignment with 2 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
try {
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
} catch (RuntimeException e) {
|
||||
RequestFuture.failure(e);
|
||||
}
|
||||
|
|
@ -459,7 +470,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// or the workers that were bounced. Therefore it goes into assignment freeze for
|
||||
// the new assignments for a rebalance delay period
|
||||
doReturn(Collections.EMPTY_MAP).when(assignor).serializeAssignments(assignmentsCapture.capture());
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
expectedMemberConfigs = memberConfigs(leader, offset, returnedAssignments);
|
||||
|
|
@ -473,7 +484,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// been reached yet
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(rebalanceDelay / 2, returnedAssignments);
|
||||
|
|
@ -486,7 +497,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Fourth assignment after delay expired. Finally all the new assignments are assigned
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -509,7 +520,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// First assignment with 2 workers and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -527,7 +538,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
memberConfigs.put("worker3", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
try {
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
} catch (RuntimeException e) {
|
||||
RequestFuture.failure(e);
|
||||
}
|
||||
|
|
@ -542,7 +553,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
// Third assignment happens with members returning the same assignments (memberConfigs)
|
||||
// as the first time.
|
||||
doReturn(Collections.EMPTY_MAP).when(assignor).serializeAssignments(assignmentsCapture.capture());
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
expectedMemberConfigs = memberConfigs(leader, offset, returnedAssignments);
|
||||
|
|
@ -562,7 +573,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
// First assignment with 1 worker and 2 connectors configured but not yet assigned
|
||||
memberConfigs.put("worker2", new ExtendedWorkerState(leaderUrl, offset, null));
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -575,7 +586,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
when(coordinator.configSnapshot()).thenReturn(configState);
|
||||
applyAssignments(returnedAssignments);
|
||||
memberConfigs = memberConfigs(leader, offset, assignments);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator);
|
||||
assignor.performTaskAssignment(leader, offset, memberConfigs, coordinator, protocolVersion);
|
||||
++rebalanceNum;
|
||||
returnedAssignments = assignmentsCapture.getValue();
|
||||
assertDelay(0, returnedAssignments);
|
||||
|
|
@ -976,6 +987,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
int taskNum) {
|
||||
return new ClusterConfigState(
|
||||
offset,
|
||||
null,
|
||||
connectorTaskCounts(1, connectorNum, taskNum),
|
||||
connectorConfigs(1, connectorNum),
|
||||
connectorTargetStates(1, connectorNum, TargetState.STARTED),
|
||||
|
|
@ -1052,7 +1064,7 @@ public class IncrementalCooperativeAssignorTest {
|
|||
|
||||
private ExtendedAssignment newExpandableAssignment() {
|
||||
return new ExtendedAssignment(
|
||||
CONNECT_PROTOCOL_V1,
|
||||
protocolVersion,
|
||||
ConnectProtocol.Assignment.NO_ERROR,
|
||||
leader,
|
||||
leaderUrl,
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ import static org.apache.kafka.connect.runtime.WorkerTestUtils.clusterConfigStat
|
|||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocol.WorkerState;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.COMPATIBLE;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.EAGER;
|
||||
import static org.apache.kafka.connect.runtime.distributed.ConnectProtocolCompatibility.SESSIONED;
|
||||
import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
|
@ -119,7 +120,7 @@ public class WorkerCoordinatorIncrementalTest {
|
|||
// - Expected metadata size
|
||||
@Parameters
|
||||
public static Iterable<?> mode() {
|
||||
return Arrays.asList(new Object[][]{{COMPATIBLE, 2}});
|
||||
return Arrays.asList(new Object[][]{{COMPATIBLE, 2}, {SESSIONED, 3}});
|
||||
}
|
||||
|
||||
@Parameter
|
||||
|
|
@ -565,14 +566,18 @@ public class WorkerCoordinatorIncrementalTest {
|
|||
return IncrementalCooperativeConnectProtocol.deserializeAssignment(assignment.get(member));
|
||||
}
|
||||
|
||||
private static void addJoinGroupResponseMember(List<JoinGroupResponseMember> responseMembers,
|
||||
private void addJoinGroupResponseMember(List<JoinGroupResponseMember> responseMembers,
|
||||
String member,
|
||||
long offset,
|
||||
ExtendedAssignment assignment) {
|
||||
responseMembers.add(new JoinGroupResponseMember()
|
||||
.setMemberId(member)
|
||||
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(
|
||||
new ExtendedWorkerState(expectedUrl(member), offset, assignment)).array())
|
||||
.setMetadata(
|
||||
IncrementalCooperativeConnectProtocol.serializeMetadata(
|
||||
new ExtendedWorkerState(expectedUrl(member), offset, assignment),
|
||||
compatibility != COMPATIBLE
|
||||
).array()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,6 +149,7 @@ public class WorkerCoordinatorTest {
|
|||
|
||||
configState1 = new ClusterConfigState(
|
||||
1L,
|
||||
null,
|
||||
Collections.singletonMap(connectorId1, 1),
|
||||
Collections.singletonMap(connectorId1, (Map<String, String>) new HashMap<String, String>()),
|
||||
Collections.singletonMap(connectorId1, TargetState.STARTED),
|
||||
|
|
@ -171,6 +172,7 @@ public class WorkerCoordinatorTest {
|
|||
configState2TaskConfigs.put(taskId2x0, new HashMap<String, String>());
|
||||
configState2 = new ClusterConfigState(
|
||||
2L,
|
||||
null,
|
||||
configState2ConnectorTaskCounts,
|
||||
configState2ConnectorConfigs,
|
||||
configState2TargetStates,
|
||||
|
|
@ -196,6 +198,7 @@ public class WorkerCoordinatorTest {
|
|||
configStateSingleTaskConnectorsTaskConfigs.put(taskId3x0, new HashMap<String, String>());
|
||||
configStateSingleTaskConnectors = new ClusterConfigState(
|
||||
2L,
|
||||
null,
|
||||
configStateSingleTaskConnectorsConnectorTaskCounts,
|
||||
configStateSingleTaskConnectorsConnectorConfigs,
|
||||
configStateSingleTaskConnectorsTargetStates,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.connect.runtime.rest;
|
||||
|
||||
import org.apache.kafka.connect.errors.ConnectException;
|
||||
import org.apache.kafka.connect.runtime.rest.errors.BadRequestException;
|
||||
import org.eclipse.jetty.client.api.Request;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import java.util.Base64;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class InternalRequestSignatureTest {
|
||||
|
||||
private static final byte[] REQUEST_BODY =
|
||||
"[{\"config\":\"value\"},{\"config\":\"other_value\"}]".getBytes();
|
||||
private static final String SIGNATURE_ALGORITHM = "HmacSHA256";
|
||||
private static final SecretKey KEY = new SecretKeySpec(
|
||||
new byte[] {
|
||||
109, 116, -111, 49, -94, 25, -103, 44, -99, -118, 53, -69, 87, -124, 5, 48,
|
||||
89, -105, -2, 58, -92, 87, 67, 49, -125, -79, -39, -126, -51, -53, -85, 57
|
||||
}, "HmacSHA256"
|
||||
);
|
||||
private static final byte[] SIGNATURE = new byte[] {
|
||||
42, -3, 127, 57, 43, 49, -51, -43, 72, -62, -10, 120, 123, 125, 26, -65,
|
||||
36, 72, 86, -71, -32, 13, -8, 115, 85, 73, -65, -112, 6, 68, 41, -50
|
||||
};
|
||||
private static final String ENCODED_SIGNATURE = Base64.getEncoder().encodeToString(SIGNATURE);
|
||||
|
||||
@Test
|
||||
public void fromHeadersShouldReturnNullOnNullHeaders() {
|
||||
assertNull(InternalRequestSignature.fromHeaders(REQUEST_BODY, null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromHeadersShouldReturnNullIfSignatureHeaderMissing() {
|
||||
assertNull(InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders(null, SIGNATURE_ALGORITHM)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromHeadersShouldReturnNullIfSignatureAlgorithmHeaderMissing() {
|
||||
assertNull(InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, null)));
|
||||
}
|
||||
|
||||
@Test(expected = BadRequestException.class)
|
||||
public void fromHeadersShouldThrowExceptionOnInvalidSignatureAlgorithm() {
|
||||
InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, "doesn'texist"));
|
||||
}
|
||||
|
||||
@Test(expected = BadRequestException.class)
|
||||
public void fromHeadersShouldThrowExceptionOnInvalidBase64Signature() {
|
||||
InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders("not valid base 64", SIGNATURE_ALGORITHM));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromHeadersShouldReturnNonNullResultOnValidSignatureAndSignatureAlgorithm() {
|
||||
InternalRequestSignature signature =
|
||||
InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, SIGNATURE_ALGORITHM));
|
||||
assertNotNull(signature);
|
||||
assertNotNull(signature.keyAlgorithm());
|
||||
}
|
||||
|
||||
@Test(expected = ConnectException.class)
|
||||
public void addToRequestShouldThrowExceptionOnInvalidSignatureAlgorithm() {
|
||||
Request request = mock(Request.class);
|
||||
InternalRequestSignature.addToRequest(KEY, REQUEST_BODY, "doesn'texist", request);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addToRequestShouldAddHeadersOnValidSignatureAlgorithm() {
|
||||
Request request = mock(Request.class);
|
||||
ArgumentCaptor<String> signatureCapture = ArgumentCaptor.forClass(String.class);
|
||||
ArgumentCaptor<String> signatureAlgorithmCapture = ArgumentCaptor.forClass(String.class);
|
||||
when(request.header(
|
||||
eq(InternalRequestSignature.SIGNATURE_HEADER),
|
||||
signatureCapture.capture()
|
||||
)).thenReturn(request);
|
||||
when(request.header(
|
||||
eq(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER),
|
||||
signatureAlgorithmCapture.capture()
|
||||
)).thenReturn(request);
|
||||
|
||||
InternalRequestSignature.addToRequest(KEY, REQUEST_BODY, SIGNATURE_ALGORITHM, request);
|
||||
|
||||
assertEquals(
|
||||
"Request should have valid base 64-encoded signature added as header",
|
||||
ENCODED_SIGNATURE,
|
||||
signatureCapture.getValue()
|
||||
);
|
||||
assertEquals(
|
||||
"Request should have provided signature algorithm added as header",
|
||||
SIGNATURE_ALGORITHM,
|
||||
signatureAlgorithmCapture.getValue()
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSignatureValidation() throws Exception {
|
||||
Mac mac = Mac.getInstance(SIGNATURE_ALGORITHM);
|
||||
|
||||
InternalRequestSignature signature = new InternalRequestSignature(REQUEST_BODY, mac, SIGNATURE);
|
||||
assertTrue(signature.isValid(KEY));
|
||||
|
||||
signature = InternalRequestSignature.fromHeaders(REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, SIGNATURE_ALGORITHM));
|
||||
assertTrue(signature.isValid(KEY));
|
||||
|
||||
signature = new InternalRequestSignature("[{\"different_config\":\"different_value\"}]".getBytes(), mac, SIGNATURE);
|
||||
assertFalse(signature.isValid(KEY));
|
||||
|
||||
signature = new InternalRequestSignature(REQUEST_BODY, mac, "bad signature".getBytes());
|
||||
assertFalse(signature.isValid(KEY));
|
||||
}
|
||||
|
||||
private static HttpHeaders internalRequestHeaders(String signature, String signatureAlgorithm) {
|
||||
HttpHeaders result = mock(HttpHeaders.class);
|
||||
when(result.getHeaderString(eq(InternalRequestSignature.SIGNATURE_HEADER)))
|
||||
.thenReturn(signature);
|
||||
when(result.getHeaderString(eq(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER)))
|
||||
.thenReturn(signatureAlgorithm);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -63,7 +63,7 @@ import static org.junit.Assert.assertNull;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PowerMockIgnore({"javax.net.ssl.*", "javax.security.*"})
|
||||
@PowerMockIgnore({"javax.net.ssl.*", "javax.security.*", "javax.crypto.*"})
|
||||
public class RestServerTest {
|
||||
|
||||
@MockStrict
|
||||
|
|
|
|||
|
|
@ -18,7 +18,10 @@ package org.apache.kafka.connect.runtime.rest.resources;
|
|||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
|
||||
import javax.crypto.Mac;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.connect.errors.AlreadyExistsException;
|
||||
import org.apache.kafka.connect.errors.NotFoundException;
|
||||
import org.apache.kafka.connect.runtime.ConnectorConfig;
|
||||
|
|
@ -26,6 +29,7 @@ import org.apache.kafka.connect.runtime.Herder;
|
|||
import org.apache.kafka.connect.runtime.WorkerConfig;
|
||||
import org.apache.kafka.connect.runtime.distributed.NotAssignedException;
|
||||
import org.apache.kafka.connect.runtime.distributed.NotLeaderException;
|
||||
import org.apache.kafka.connect.runtime.rest.InternalRequestSignature;
|
||||
import org.apache.kafka.connect.runtime.rest.RestClient;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
|
||||
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
|
||||
|
|
@ -52,9 +56,11 @@ import javax.ws.rs.core.MultivaluedHashMap;
|
|||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
|
@ -66,7 +72,7 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest(RestClient.class)
|
||||
@PowerMockIgnore("javax.management.*")
|
||||
@PowerMockIgnore({"javax.management.*", "javax.crypto.*"})
|
||||
@SuppressWarnings("unchecked")
|
||||
public class ConnectorsResourceTest {
|
||||
// Note trailing / and that we do *not* use LEADER_URL to construct our reference values. This checks that we handle
|
||||
|
|
@ -622,27 +628,76 @@ public class ConnectorsResourceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testPutConnectorTaskConfigs() throws Throwable {
|
||||
public void testPutConnectorTaskConfigsNoInternalRequestSignature() throws Throwable {
|
||||
final Capture<Callback<Void>> cb = Capture.newInstance();
|
||||
herder.putTaskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(TASK_CONFIGS), EasyMock.capture(cb));
|
||||
herder.putTaskConfigs(
|
||||
EasyMock.eq(CONNECTOR_NAME),
|
||||
EasyMock.eq(TASK_CONFIGS),
|
||||
EasyMock.capture(cb),
|
||||
EasyMock.anyObject(InternalRequestSignature.class)
|
||||
);
|
||||
expectAndCallbackResult(cb, null);
|
||||
|
||||
PowerMock.replayAll();
|
||||
|
||||
connectorsResource.putTaskConfigs(CONNECTOR_NAME, NULL_HEADERS, FORWARD, TASK_CONFIGS);
|
||||
connectorsResource.putTaskConfigs(CONNECTOR_NAME, NULL_HEADERS, FORWARD, serializeAsBytes(TASK_CONFIGS));
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutConnectorTaskConfigsWithInternalRequestSignature() throws Throwable {
|
||||
final String signatureAlgorithm = "HmacSHA256";
|
||||
final String encodedSignature = "Kv1/OSsxzdVIwvZ4e30avyRIVrngDfhzVUm/kAZEKc4=";
|
||||
|
||||
final Capture<Callback<Void>> cb = Capture.newInstance();
|
||||
final Capture<InternalRequestSignature> signatureCapture = Capture.newInstance();
|
||||
herder.putTaskConfigs(
|
||||
EasyMock.eq(CONNECTOR_NAME),
|
||||
EasyMock.eq(TASK_CONFIGS),
|
||||
EasyMock.capture(cb),
|
||||
EasyMock.capture(signatureCapture)
|
||||
);
|
||||
expectAndCallbackResult(cb, null);
|
||||
|
||||
HttpHeaders headers = EasyMock.mock(HttpHeaders.class);
|
||||
EasyMock.expect(headers.getHeaderString(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER))
|
||||
.andReturn(signatureAlgorithm)
|
||||
.once();
|
||||
EasyMock.expect(headers.getHeaderString(InternalRequestSignature.SIGNATURE_HEADER))
|
||||
.andReturn(encodedSignature)
|
||||
.once();
|
||||
|
||||
PowerMock.replayAll(headers);
|
||||
|
||||
connectorsResource.putTaskConfigs(CONNECTOR_NAME, headers, FORWARD, serializeAsBytes(TASK_CONFIGS));
|
||||
|
||||
PowerMock.verifyAll();
|
||||
InternalRequestSignature expectedSignature = new InternalRequestSignature(
|
||||
serializeAsBytes(TASK_CONFIGS),
|
||||
Mac.getInstance(signatureAlgorithm),
|
||||
Base64.getDecoder().decode(encodedSignature)
|
||||
);
|
||||
assertEquals(
|
||||
expectedSignature,
|
||||
signatureCapture.getValue()
|
||||
);
|
||||
}
|
||||
|
||||
@Test(expected = NotFoundException.class)
|
||||
public void testPutConnectorTaskConfigsConnectorNotFound() throws Throwable {
|
||||
final Capture<Callback<Void>> cb = Capture.newInstance();
|
||||
herder.putTaskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(TASK_CONFIGS), EasyMock.capture(cb));
|
||||
herder.putTaskConfigs(
|
||||
EasyMock.eq(CONNECTOR_NAME),
|
||||
EasyMock.eq(TASK_CONFIGS),
|
||||
EasyMock.capture(cb),
|
||||
EasyMock.anyObject(InternalRequestSignature.class)
|
||||
);
|
||||
expectAndCallbackException(cb, new NotFoundException("not found"));
|
||||
|
||||
PowerMock.replayAll();
|
||||
|
||||
connectorsResource.putTaskConfigs(CONNECTOR_NAME, NULL_HEADERS, FORWARD, TASK_CONFIGS);
|
||||
connectorsResource.putTaskConfigs(CONNECTOR_NAME, NULL_HEADERS, FORWARD, serializeAsBytes(TASK_CONFIGS));
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
|
|
@ -748,6 +803,10 @@ public class ConnectorsResourceTest {
|
|||
PowerMock.verifyAll();
|
||||
}
|
||||
|
||||
private <T> byte[] serializeAsBytes(final T value) throws IOException {
|
||||
return new ObjectMapper().writeValueAsBytes(value);
|
||||
}
|
||||
|
||||
private <T> void expectAndCallbackResult(final Capture<Callback<T>> cb, final T value) {
|
||||
PowerMock.expectLastCall().andAnswer(new IAnswer<Void>() {
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -361,6 +361,7 @@ public class StandaloneHerderTest {
|
|||
|
||||
ClusterConfigState configState = new ClusterConfigState(
|
||||
-1,
|
||||
null,
|
||||
Collections.singletonMap(CONNECTOR_NAME, 1),
|
||||
Collections.singletonMap(CONNECTOR_NAME, connectorConfig),
|
||||
Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED),
|
||||
|
|
@ -395,6 +396,7 @@ public class StandaloneHerderTest {
|
|||
|
||||
ClusterConfigState configState = new ClusterConfigState(
|
||||
-1,
|
||||
null,
|
||||
Collections.singletonMap(CONNECTOR_NAME, 1),
|
||||
Collections.singletonMap(CONNECTOR_NAME, connectorConfig),
|
||||
Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED),
|
||||
|
|
@ -570,7 +572,8 @@ public class StandaloneHerderTest {
|
|||
|
||||
herder.putTaskConfigs(CONNECTOR_NAME,
|
||||
Arrays.asList(singletonMap("config", "value")),
|
||||
cb);
|
||||
cb,
|
||||
null);
|
||||
|
||||
PowerMock.verifyAll();
|
||||
}
|
||||
|
|
@ -645,6 +648,7 @@ public class StandaloneHerderTest {
|
|||
|
||||
ClusterConfigState configState = new ClusterConfigState(
|
||||
-1,
|
||||
null,
|
||||
Collections.singletonMap(CONNECTOR_NAME, 1),
|
||||
Collections.singletonMap(CONNECTOR_NAME, connectorConfig(sourceSink)),
|
||||
Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED),
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest(KafkaConfigBackingStore.class)
|
||||
@PowerMockIgnore("javax.management.*")
|
||||
@PowerMockIgnore({"javax.management.*", "javax.crypto.*"})
|
||||
@SuppressWarnings({"unchecked", "deprecation"})
|
||||
public class KafkaConfigBackingStoreTest {
|
||||
private static final String TOPIC = "connect-configs";
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ import static org.junit.Assert.fail;
|
|||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest(KafkaOffsetBackingStore.class)
|
||||
@PowerMockIgnore("javax.management.*")
|
||||
@PowerMockIgnore({"javax.management.*", "javax.crypto.*"})
|
||||
@SuppressWarnings({"unchecked", "deprecation"})
|
||||
public class KafkaOffsetBackingStoreTest {
|
||||
private static final String TOPIC = "connect-offsets";
|
||||
|
|
|
|||
|
|
@ -384,6 +384,28 @@ public class EmbeddedConnectCluster {
|
|||
return httpCon.getResponseCode();
|
||||
}
|
||||
|
||||
public int executePost(String url, String body, Map<String, String> headers) throws IOException {
|
||||
log.debug("Executing POST request to URL={}. Payload={}", url, body);
|
||||
HttpURLConnection httpCon = (HttpURLConnection) new URL(url).openConnection();
|
||||
httpCon.setDoOutput(true);
|
||||
httpCon.setRequestProperty("Content-Type", "application/json");
|
||||
headers.forEach(httpCon::setRequestProperty);
|
||||
httpCon.setRequestMethod("POST");
|
||||
try (OutputStreamWriter out = new OutputStreamWriter(httpCon.getOutputStream())) {
|
||||
out.write(body);
|
||||
}
|
||||
if (httpCon.getResponseCode() < HttpURLConnection.HTTP_BAD_REQUEST) {
|
||||
try (InputStream is = httpCon.getInputStream()) {
|
||||
log.info("POST response for URL={} is {}", url, responseToString(is));
|
||||
}
|
||||
} else {
|
||||
try (InputStream is = httpCon.getErrorStream()) {
|
||||
log.info("POST error response for URL={} is {}", url, responseToString(is));
|
||||
}
|
||||
}
|
||||
return httpCon.getResponseCode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a GET request on the given URL.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ class ConnectDistributedTest(Test):
|
|||
STATUS_REPLICATION_FACTOR = "1"
|
||||
STATUS_PARTITIONS = "1"
|
||||
SCHEDULED_REBALANCE_MAX_DELAY_MS = "60000"
|
||||
CONNECT_PROTOCOL="compatible"
|
||||
CONNECT_PROTOCOL="sessioned"
|
||||
|
||||
# Since tasks can be assigned to any node and we're testing with files, we need to make sure the content is the same
|
||||
# across all nodes.
|
||||
|
|
@ -157,7 +157,7 @@ class ConnectDistributedTest(Test):
|
|||
return self._task_has_state(task_id, status, 'RUNNING')
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@matrix(connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_restart_failed_connector(self, connect_protocol):
|
||||
self.CONNECT_PROTOCOL = connect_protocol
|
||||
self.setup_services()
|
||||
|
|
@ -176,7 +176,7 @@ class ConnectDistributedTest(Test):
|
|||
err_msg="Failed to see connector transition to the RUNNING state")
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@matrix(connector_type=['source', 'sink'], connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connector_type=['source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_restart_failed_task(self, connector_type, connect_protocol):
|
||||
self.CONNECT_PROTOCOL = connect_protocol
|
||||
self.setup_services()
|
||||
|
|
@ -201,7 +201,7 @@ class ConnectDistributedTest(Test):
|
|||
err_msg="Failed to see task transition to the RUNNING state")
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@matrix(connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_pause_and_resume_source(self, connect_protocol):
|
||||
"""
|
||||
Verify that source connectors stop producing records when paused and begin again after
|
||||
|
|
@ -242,7 +242,7 @@ class ConnectDistributedTest(Test):
|
|||
err_msg="Failed to produce messages after resuming source connector")
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@matrix(connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_pause_and_resume_sink(self, connect_protocol):
|
||||
"""
|
||||
Verify that sink connectors stop consuming records when paused and begin again after
|
||||
|
|
@ -290,7 +290,7 @@ class ConnectDistributedTest(Test):
|
|||
err_msg="Failed to consume messages after resuming sink connector")
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@matrix(connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_pause_state_persistent(self, connect_protocol):
|
||||
"""
|
||||
Verify that paused state is preserved after a cluster restart.
|
||||
|
|
@ -322,7 +322,7 @@ class ConnectDistributedTest(Test):
|
|||
err_msg="Failed to see connector startup in PAUSED state")
|
||||
|
||||
@cluster(num_nodes=6)
|
||||
@matrix(security_protocol=[SecurityConfig.PLAINTEXT, SecurityConfig.SASL_SSL], connect_protocol=['compatible', 'eager'])
|
||||
@matrix(security_protocol=[SecurityConfig.PLAINTEXT, SecurityConfig.SASL_SSL], connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_file_source_and_sink(self, security_protocol, connect_protocol):
|
||||
"""
|
||||
Tests that a basic file connector works across clean rolling bounces. This validates that the connector is
|
||||
|
|
@ -360,7 +360,7 @@ class ConnectDistributedTest(Test):
|
|||
wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST), timeout_sec=timeout_sec, err_msg="Sink output file never converged to the same state as the input file")
|
||||
|
||||
@cluster(num_nodes=6)
|
||||
@matrix(clean=[True, False], connect_protocol=['compatible', 'eager'])
|
||||
@matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_bounce(self, clean, connect_protocol):
|
||||
"""
|
||||
Validates that source and sink tasks that run continuously and produce a predictable sequence of messages
|
||||
|
|
@ -470,7 +470,7 @@ class ConnectDistributedTest(Test):
|
|||
assert success, "Found validation errors:\n" + "\n ".join(errors)
|
||||
|
||||
@cluster(num_nodes=6)
|
||||
@matrix(connect_protocol=['compatible', 'eager'])
|
||||
@matrix(connect_protocol=['sessioned', 'compatible', 'eager'])
|
||||
def test_transformations(self, connect_protocol):
|
||||
self.CONNECT_PROTOCOL = connect_protocol
|
||||
self.setup_services(timestamp_type='CreateTime')
|
||||
|
|
@ -527,6 +527,11 @@ class ConnectDistributedTest(Test):
|
|||
assert obj['payload'][ts_fieldname] == ts
|
||||
|
||||
@cluster(num_nodes=5)
|
||||
@parametrize(broker_version=str(DEV_BRANCH), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
|
||||
@parametrize(broker_version=str(LATEST_0_11_0), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
|
||||
@parametrize(broker_version=str(LATEST_0_10_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
|
||||
@parametrize(broker_version=str(LATEST_0_10_1), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
|
||||
@parametrize(broker_version=str(LATEST_0_10_0), auto_create_topics=True, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='sessioned')
|
||||
@parametrize(broker_version=str(DEV_BRANCH), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
|
||||
@parametrize(broker_version=str(LATEST_2_3), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
|
||||
@parametrize(broker_version=str(LATEST_2_2), auto_create_topics=False, security_protocol=SecurityConfig.PLAINTEXT, connect_protocol='compatible')
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ bootstrap.servers={{ kafka.bootstrap_servers(kafka.security_config.security_prot
|
|||
|
||||
group.id={{ group|default("connect-cluster") }}
|
||||
|
||||
connect.protocol={{ CONNECT_PROTOCOL|default("compatible") }}
|
||||
connect.protocol={{ CONNECT_PROTOCOL|default("sessioned") }}
|
||||
scheduled.rebalance.max.delay.ms={{ SCHEDULED_REBALANCE_MAX_DELAY_MS|default(60000) }}
|
||||
|
||||
key.converter={{ key_converter|default("org.apache.kafka.connect.json.JsonConverter") }}
|
||||
|
|
|
|||
Loading…
Reference in New Issue