MINOR: Migrate connect system tests to KRaft (#12621)

Adds the `metadata_quorum` parameter to the `@matrix(...)` annotation to many existing tests, so that they are run with both zookeeper and remote_kraft nodes.

Reviewers: Randall Hauch <rhauch@gmail.com>, Greg Harris <gharris1727@gmail.com>
This commit is contained in:
srishti-saraswat 2022-10-27 21:49:14 +05:30 committed by GitHub
parent 47adb86636
commit 57aefa9c82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 36 additions and 31 deletions

View File

@ -20,7 +20,7 @@ from ducktape.mark import matrix, parametrize
from ducktape.cluster.remoteaccount import RemoteCommandError from ducktape.cluster.remoteaccount import RemoteCommandError
from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService, config_property from kafkatest.services.kafka import KafkaService, config_property, quorum
from kafkatest.services.connect import ConnectDistributedService, VerifiableSource, VerifiableSink, ConnectRestError, MockSink, MockSource from kafkatest.services.connect import ConnectDistributedService, VerifiableSource, VerifiableSink, ConnectRestError, MockSink, MockSource
from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.security.security_config import SecurityConfig from kafkatest.services.security.security_config import SecurityConfig
@ -75,7 +75,7 @@ class ConnectDistributedTest(Test):
self.TOPIC: {'partitions': 1, 'replication-factor': 1} self.TOPIC: {'partitions': 1, 'replication-factor': 1}
} }
self.zk = ZookeeperService(test_context, self.num_zk) self.zk = ZookeeperService(test_context, self.num_zk) if quorum.for_test(test_context) == quorum.zk else None
self.key_converter = "org.apache.kafka.connect.json.JsonConverter" self.key_converter = "org.apache.kafka.connect.json.JsonConverter"
self.value_converter = "org.apache.kafka.connect.json.JsonConverter" self.value_converter = "org.apache.kafka.connect.json.JsonConverter"
@ -98,6 +98,7 @@ class ConnectDistributedTest(Test):
include_filestream_connectors=include_filestream_connectors) include_filestream_connectors=include_filestream_connectors)
self.cc.log_level = "DEBUG" self.cc.log_level = "DEBUG"
if self.zk:
self.zk.start() self.zk.start()
self.kafka.start() self.kafka.start()
@ -164,8 +165,8 @@ class ConnectDistributedTest(Test):
return self._task_has_state(task_id, status, 'RUNNING') return self._task_has_state(task_id, status, 'RUNNING')
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_restart_failed_connector(self, exactly_once_source, connect_protocol): def test_restart_failed_connector(self, exactly_once_source, connect_protocol, metadata_quorum):
self.EXACTLY_ONCE_SOURCE_SUPPORT = 'enabled' if exactly_once_source else 'disabled' self.EXACTLY_ONCE_SOURCE_SUPPORT = 'enabled' if exactly_once_source else 'disabled'
self.CONNECT_PROTOCOL = connect_protocol self.CONNECT_PROTOCOL = connect_protocol
self.setup_services() self.setup_services()
@ -187,8 +188,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to see connector transition to the RUNNING state") err_msg="Failed to see connector transition to the RUNNING state")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(connector_type=['source', 'exactly-once source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(connector_type=['source', 'exactly-once source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_restart_failed_task(self, connector_type, connect_protocol): def test_restart_failed_task(self, connector_type, connect_protocol, metadata_quorum):
self.EXACTLY_ONCE_SOURCE_SUPPORT = 'enabled' if connector_type == 'exactly-once source' else 'disabled' self.EXACTLY_ONCE_SOURCE_SUPPORT = 'enabled' if connector_type == 'exactly-once source' else 'disabled'
self.CONNECT_PROTOCOL = connect_protocol self.CONNECT_PROTOCOL = connect_protocol
self.setup_services() self.setup_services()
@ -213,8 +214,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to see task transition to the RUNNING state") err_msg="Failed to see task transition to the RUNNING state")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_restart_connector_and_tasks_failed_connector(self, connect_protocol): def test_restart_connector_and_tasks_failed_connector(self, connect_protocol, metadata_quorum):
self.CONNECT_PROTOCOL = connect_protocol self.CONNECT_PROTOCOL = connect_protocol
self.setup_services() self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node)) self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
@ -232,8 +233,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to see connector transition to the RUNNING state") err_msg="Failed to see connector transition to the RUNNING state")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(connector_type=['source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(connector_type=['source', 'sink'], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_restart_connector_and_tasks_failed_task(self, connector_type, connect_protocol): def test_restart_connector_and_tasks_failed_task(self, connector_type, connect_protocol, metadata_quorum):
self.CONNECT_PROTOCOL = connect_protocol self.CONNECT_PROTOCOL = connect_protocol
self.setup_services() self.setup_services()
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node)) self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))
@ -257,8 +258,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to see task transition to the RUNNING state") err_msg="Failed to see task transition to the RUNNING state")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_pause_and_resume_source(self, exactly_once_source, connect_protocol): def test_pause_and_resume_source(self, exactly_once_source, connect_protocol, metadata_quorum):
""" """
Verify that source connectors stop producing records when paused and begin again after Verify that source connectors stop producing records when paused and begin again after
being resumed. being resumed.
@ -299,8 +300,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to produce messages after resuming source connector") err_msg="Failed to produce messages after resuming source connector")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_pause_and_resume_sink(self, connect_protocol): def test_pause_and_resume_sink(self, connect_protocol, metadata_quorum):
""" """
Verify that sink connectors stop consuming records when paused and begin again after Verify that sink connectors stop consuming records when paused and begin again after
being resumed. being resumed.
@ -347,8 +348,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to consume messages after resuming sink connector") err_msg="Failed to consume messages after resuming sink connector")
@cluster(num_nodes=5) @cluster(num_nodes=5)
@matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_pause_state_persistent(self, exactly_once_source, connect_protocol): def test_pause_state_persistent(self, exactly_once_source, connect_protocol, metadata_quorum):
""" """
Verify that paused state is preserved after a cluster restart. Verify that paused state is preserved after a cluster restart.
""" """
@ -375,8 +376,8 @@ class ConnectDistributedTest(Test):
err_msg="Failed to see connector startup in PAUSED state") err_msg="Failed to see connector startup in PAUSED state")
@cluster(num_nodes=6) @cluster(num_nodes=6)
@matrix(security_protocol=[SecurityConfig.PLAINTEXT, SecurityConfig.SASL_SSL], exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(security_protocol=[SecurityConfig.PLAINTEXT, SecurityConfig.SASL_SSL], exactly_once_source=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_file_source_and_sink(self, security_protocol, exactly_once_source, connect_protocol): def test_file_source_and_sink(self, security_protocol, exactly_once_source, connect_protocol, metadata_quorum):
""" """
Tests that a basic file connector works across clean rolling bounces. This validates that the connector is Tests that a basic file connector works across clean rolling bounces. This validates that the connector is
correctly created, tasks instantiated, and as nodes restart the work is rebalanced across nodes. correctly created, tasks instantiated, and as nodes restart the work is rebalanced across nodes.
@ -409,8 +410,8 @@ class ConnectDistributedTest(Test):
wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST), timeout_sec=150, err_msg="Sink output file never converged to the same state as the input file") wait_until(lambda: self._validate_file_output(self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST), timeout_sec=150, err_msg="Sink output file never converged to the same state as the input file")
@cluster(num_nodes=6) @cluster(num_nodes=6)
@matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_bounce(self, clean, connect_protocol): def test_bounce(self, clean, connect_protocol, metadata_quorum):
""" """
Validates that source and sink tasks that run continuously and produce a predictable sequence of messages Validates that source and sink tasks that run continuously and produce a predictable sequence of messages
run correctly and deliver messages exactly once when Kafka Connect workers undergo clean rolling bounces, run correctly and deliver messages exactly once when Kafka Connect workers undergo clean rolling bounces,
@ -537,8 +538,8 @@ class ConnectDistributedTest(Test):
assert success, "Found validation errors:\n" + "\n ".join(errors) assert success, "Found validation errors:\n" + "\n ".join(errors)
@cluster(num_nodes=6) @cluster(num_nodes=6)
@matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(clean=[True, False], connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_exactly_once_source(self, clean, connect_protocol): def test_exactly_once_source(self, clean, connect_protocol, metadata_quorum):
""" """
Validates that source tasks run correctly and deliver messages exactly once Validates that source tasks run correctly and deliver messages exactly once
when Kafka Connect workers undergo bounces, both clean and unclean. when Kafka Connect workers undergo bounces, both clean and unclean.
@ -641,8 +642,8 @@ class ConnectDistributedTest(Test):
assert success, "Found validation errors:\n" + "\n ".join(errors) assert success, "Found validation errors:\n" + "\n ".join(errors)
@cluster(num_nodes=6) @cluster(num_nodes=6)
@matrix(connect_protocol=['sessioned', 'compatible', 'eager']) @matrix(connect_protocol=['sessioned', 'compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_transformations(self, connect_protocol): def test_transformations(self, connect_protocol, metadata_quorum):
self.CONNECT_PROTOCOL = connect_protocol self.CONNECT_PROTOCOL = connect_protocol
self.setup_services(timestamp_type='CreateTime', include_filestream_connectors=True) self.setup_services(timestamp_type='CreateTime', include_filestream_connectors=True)
self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node)) self.cc.set_configs(lambda node: self.render("connect-distributed.properties", node=node))

View File

@ -15,6 +15,7 @@
from kafkatest.tests.kafka_test import KafkaTest from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.connect import ConnectDistributedService, ConnectRestError, ConnectServiceBase from kafkatest.services.connect import ConnectDistributedService, ConnectRestError, ConnectServiceBase
from kafkatest.services.kafka import quorum
from ducktape.utils.util import wait_until from ducktape.utils.util import wait_until
from ducktape.mark import matrix from ducktape.mark import matrix
from ducktape.mark.resource import cluster from ducktape.mark.resource import cluster
@ -78,8 +79,8 @@ class ConnectRestApiTest(KafkaTest):
include_filestream_connectors=True) include_filestream_connectors=True)
@cluster(num_nodes=4) @cluster(num_nodes=4)
@matrix(connect_protocol=['compatible', 'eager']) @matrix(connect_protocol=['compatible', 'eager'], metadata_quorum=quorum.all_non_upgrade)
def test_rest_api(self, connect_protocol): def test_rest_api(self, connect_protocol, metadata_quorum):
# Template parameters # Template parameters
self.key_converter = "org.apache.kafka.connect.json.JsonConverter" self.key_converter = "org.apache.kafka.connect.json.JsonConverter"
self.value_converter = "org.apache.kafka.connect.json.JsonConverter" self.value_converter = "org.apache.kafka.connect.json.JsonConverter"

View File

@ -138,9 +138,11 @@ class ConnectStandaloneFileTest(Test):
return False return False
@cluster(num_nodes=5) @cluster(num_nodes=5)
@parametrize(error_tolerance=ErrorTolerance.ALL) @parametrize(error_tolerance=ErrorTolerance.ALL, metadata_quorum=quorum.zk)
@parametrize(error_tolerance=ErrorTolerance.NONE) @parametrize(error_tolerance=ErrorTolerance.NONE, metadata_quorum=quorum.remote_kraft)
def test_skip_and_log_to_dlq(self, error_tolerance): @parametrize(error_tolerance=ErrorTolerance.ALL, metadata_quorum=quorum.remote_kraft)
@parametrize(error_tolerance=ErrorTolerance.NONE, metadata_quorum=quorum.zk)
def test_skip_and_log_to_dlq(self, error_tolerance, metadata_quorum):
self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, topics=self.topics) self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, topics=self.topics)
# set config props # set config props
@ -171,6 +173,7 @@ class ConnectStandaloneFileTest(Test):
self.sink = ConnectStandaloneService(self.test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE], self.sink = ConnectStandaloneService(self.test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE],
include_filestream_connectors=True) include_filestream_connectors=True)
if self.zk:
self.zk.start() self.zk.start()
self.kafka.start() self.kafka.start()