Add test cases to test log size retention and more; patched by John Fung; reviewed by Jun Rao; KAFKA-591

git-svn-id: https://svn.apache.org/repos/asf/incubator/kafka/branches/0.8@1407680 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jun Rao 2012-11-09 22:58:23 +00:00
parent 6800b0ed3b
commit 1cc8c3beab
73 changed files with 6916 additions and 206 deletions

View File

@ -188,50 +188,38 @@ class MigrationToolTest(ReplicationUtils, SetupUtils):
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 1
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
self.log_message("looking up leader")
leaderDict = kafka_system_test_utils.get_leader_elected_log_line(
self.systemTestEnv, self.testcaseEnv, self.leaderAttributesDict)
# ==========================
# leaderDict looks like this:
# ==========================
#{'entity_id': u'3',
# 'partition': '0',
# 'timestamp': 1345050255.8280001,
# 'hostname': u'localhost',
# 'topic': 'test_1',
# 'brokerid': '3'}
# =============================================
# Bounce Migration Tool
# =============================================
bounceMigrationTool = self.testcaseEnv.testcaseArgumentsDict["bounce_migration_tool"]
self.log_message("bounce_migration_tool flag : " + bounceMigrationTool)
if (bounceMigrationTool.lower() == "true"):
# =============================================
# validate to see if leader election is successful
# =============================================
self.log_message("validating leader election")
result = kafka_system_test_utils.validate_leader_election_successful(
self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict)
# =============================================
# trigger leader re-election by stopping leader
# to get re-election latency
# =============================================
bounceLeaderFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_leader"]
self.log_message("bounce_leader flag : " + bounceLeaderFlag)
if (bounceLeaderFlag.lower() == "true"):
reelectionLatency = kafka_system_test_utils.get_reelection_latency(
self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict)
latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"]
self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms"
# =============================================
# starting previously terminated broker
# =============================================
if bounceLeaderFlag.lower() == "true":
self.log_message("starting the previously terminated broker")
stoppedLeaderEntityId = leaderDict["entity_id"]
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedLeaderEntityId)
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
migrationToolEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "migration_tool", "entity_id")
stoppedMigrationToolEntityId = migrationToolEntityIdList[0]
migrationToolPPid = self.testcaseEnv.entityMigrationToolParentPidDict[stoppedMigrationToolEntityId]
self.log_message("stopping migration tool : " + migrationToolPPid)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMigrationToolEntityId, migrationToolPPid)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# starting previously terminated broker
self.log_message("starting the previously terminated migration tool")
kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv, stoppedMigrationToolEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)

View File

@ -9,7 +9,7 @@
"08":"Log segment size : 51200"
},
"testcase_args": {
"bounce_leader": "false",
"bounce_migration_tool": "false",
"replica_factor": "3",
"num_partition": "1",
"num_iteration": "1",

View File

@ -0,0 +1,112 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9994"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9995"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9996"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9997"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9890"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9891"
}
]
}

View File

@ -0,0 +1,136 @@
{
"description": {"01":"To Test : 'Replication with Migration Tool'",
"02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
"03":"Produce and consume messages to a single topic - single partition.",
"04":"This test sends messages to 3 replicas",
"05":"At the end it verifies the log size and contents",
"06":"Use a consumer to verify no message loss in TARGET cluster.",
"07":"Producer dimensions : mode:async, acks:-1, comp:1",
"08":"Log segment size : 51200"
},
"testcase_args": {
"bounce_migration_tool": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "1",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "30",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_1.log",
"config_filename": "kafka_server_1.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_2.log",
"config_filename": "kafka_server_2.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_3.log",
"config_filename": "kafka_server_3.properties"
},
{
"entity_id": "4",
"port": "9094",
"brokerid": "4",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9095",
"brokerid": "5",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9096",
"brokerid": "6",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"async": "true",
"log_filename": "producer_performance_7.log",
"config_filename": "producer_performance_7.properties"
},
{
"entity_id": "8",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_8.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_8.properties"
},
{
"entity_id": "9",
"clientPort": "2191",
"dataDir": "/tmp/zookeeper_9",
"log_filename": "zookeeper_9.log",
"config_filename": "zookeeper_9.properties"
},
{
"entity_id": "10",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_10.log",
"config_filename": "migration_tool_10.properties"
},
{
"entity_id": "11",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_11.log",
"config_filename": "migration_tool_11.properties"
}
]
}

View File

@ -0,0 +1,112 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9994"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9995"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9996"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9997"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9890"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9891"
}
]
}

View File

@ -0,0 +1,136 @@
{
"description": {"01":"To Test : 'Replication with Migration Tool'",
"02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
"03":"Produce and consume messages to a single topic - single partition.",
"04":"This test sends messages to 3 replicas",
"05":"At the end it verifies the log size and contents",
"06":"Use a consumer to verify no message loss in TARGET cluster.",
"07":"Producer dimensions : mode:async, acks:1, comp:1",
"08":"Log segment size : 51200"
},
"testcase_args": {
"bounce_migration_tool": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "1",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "30",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_1.log",
"config_filename": "kafka_server_1.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_2.log",
"config_filename": "kafka_server_2.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_3.log",
"config_filename": "kafka_server_3.properties"
},
{
"entity_id": "4",
"port": "9094",
"brokerid": "4",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9095",
"brokerid": "5",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9096",
"brokerid": "6",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"async": "true",
"log_filename": "producer_performance_7.log",
"config_filename": "producer_performance_7.properties"
},
{
"entity_id": "8",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_8.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_8.properties"
},
{
"entity_id": "9",
"clientPort": "2191",
"dataDir": "/tmp/zookeeper_9",
"log_filename": "zookeeper_9.log",
"config_filename": "zookeeper_9.properties"
},
{
"entity_id": "10",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_10.log",
"config_filename": "migration_tool_10.properties"
},
{
"entity_id": "11",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_11.log",
"config_filename": "migration_tool_11.properties"
}
]
}

View File

@ -0,0 +1,141 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9900"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9901"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9902"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9903"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9904"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9905"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9906"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9907"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9908"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9909"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9910"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9911"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9912"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9913"
}
]
}

View File

@ -0,0 +1,167 @@
{
"description": {"01":"To Test : 'Replication with Migration Tool'",
"02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
"03":"Produce and consume messages to 2 topics - 2 partitions.",
"04":"This test sends messages to 3 replicas",
"05":"At the end it verifies the log size and contents",
"06":"Use a consumer to verify no message loss in TARGET cluster.",
"07":"Producer dimensions : mode:async, acks:-1, comp:1",
"08":"Log segment size : 51200"
},
"testcase_args": {
"bounce_migration_tool": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "30",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2191",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"port": "9091",
"brokerid": "1",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_2.log",
"config_filename": "kafka_server_2.properties"
},
{
"entity_id": "3",
"port": "9092",
"brokerid": "2",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_3.log",
"config_filename": "kafka_server_3.properties"
},
{
"entity_id": "4",
"port": "9093",
"brokerid": "3",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9094",
"brokerid": "4",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9095",
"brokerid": "5",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9096",
"brokerid": "6",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"async": "true",
"log_filename": "producer_performance_8.log",
"config_filename": "producer_performance_8.properties"
},
{
"entity_id": "9",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"async": "true",
"log_filename": "producer_performance_9.log",
"config_filename": "producer_performance_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_10.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_10.properties"
},
{
"entity_id": "11",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_11.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_11.properties"
},
{
"entity_id": "12",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_12.log",
"config_filename": "migration_tool_12.properties"
},
{
"entity_id": "13",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_13.log",
"config_filename": "migration_tool_13.properties"
}
]
}

View File

@ -0,0 +1,141 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9900"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9901"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9902"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9903"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "default",
"jmx_port": "9904"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9905"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9906"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9907"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9908"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "system_test/migration_tool_testsuite/0.7",
"java_home": "/export/apps/jdk/JDK-1_6_0_27",
"jmx_port": "9909"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9910"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9911"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9912"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "migration_tool",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9913"
}
]
}

View File

@ -0,0 +1,167 @@
{
"description": {"01":"To Test : 'Replication with Migration Tool'",
"02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
"03":"Produce and consume messages to 2 topics - 2 partitions.",
"04":"This test sends messages to 3 replicas",
"05":"At the end it verifies the log size and contents",
"06":"Use a consumer to verify no message loss in TARGET cluster.",
"07":"Producer dimensions : mode:async, acks:1, comp:1",
"08":"Log segment size : 51200"
},
"testcase_args": {
"bounce_migration_tool": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "30",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2191",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"port": "9091",
"brokerid": "1",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_2.log",
"config_filename": "kafka_server_2.properties"
},
{
"entity_id": "3",
"port": "9092",
"brokerid": "2",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_3.log",
"config_filename": "kafka_server_3.properties"
},
{
"entity_id": "4",
"port": "9093",
"brokerid": "3",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9094",
"brokerid": "4",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9095",
"brokerid": "5",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9096",
"brokerid": "6",
"log.file.size": "51200",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"async": "true",
"log_filename": "producer_performance_8.log",
"config_filename": "producer_performance_8.properties"
},
{
"entity_id": "9",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"async": "true",
"log_filename": "producer_performance_9.log",
"config_filename": "producer_performance_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_10.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_10.properties"
},
{
"entity_id": "11",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_11.log",
"formatter": "kafka.consumer.ChecksumMessageFormatter",
"config_filename": "console_consumer_11.properties"
},
{
"entity_id": "12",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_12.log",
"config_filename": "migration_tool_12.properties"
},
{
"entity_id": "13",
"whitelist": ".*",
"num.producers": "2",
"num.streams": "2",
"producer.config": "migration_tool_testsuite/config/migration_producer.properties",
"consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
"zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
"kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
"log_filename": "migration_tool_13.log",
"config_filename": "migration_tool_13.properties"
}
]
}

View File

@ -191,6 +191,12 @@ class MirrorMakerTest(ReplicationUtils, SetupUtils):
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 1
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
@ -202,18 +208,20 @@ class MirrorMakerTest(ReplicationUtils, SetupUtils):
self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker)
if (bounceMirrorMaker.lower() == "true"):
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "mirror_maker", "entity_id")
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "mirror_maker", "entity_id")
stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0]
mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[mirrorMakerEntityIdList[0]]
mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId]
self.log_message("stopping mirror maker : " + mirrorMakerPPid)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, mirrorMakerEntityIdList[0], mirrorMakerPPid)
time.sleep(1)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# starting previously terminated broker
self.log_message("starting the previously terminated mirror maker")
kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)

View File

@ -0,0 +1,135 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9100"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9101"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9102"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9103"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9104"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9105"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9106"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9107"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9108"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9109"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9110"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9111"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9112"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9113"
}
]
}

View File

@ -0,0 +1,143 @@
{
"description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
"02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
"03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
"04":"Produce and consume messages to a single topic - single partition.",
"05":"This test sends messages to 3 replicas",
"06":"At the end it verifies the log size and contents",
"07":"Use a consumer to verify no message loss in TARGET cluster.",
"08":"Producer dimensions : mode:async, acks:-1, comp:1",
"09":"Log segment size : 10240"
},
"testcase_args": {
"bounce_leader": "false",
"bounce_mirror_maker": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "1",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2108",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2118",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"clientPort": "2128",
"dataDir": "/tmp/zookeeper_2",
"log_filename": "zookeeper_2.log",
"config_filename": "zookeeper_2.properties"
},
{
"entity_id": "3",
"clientPort": "2138",
"dataDir": "/tmp/zookeeper_3",
"log_filename": "zookeeper_3.log",
"config_filename": "zookeeper_3.properties"
},
{
"entity_id": "4",
"port": "9091",
"brokerid": "1",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9092",
"brokerid": "2",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9093",
"brokerid": "3",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9094",
"brokerid": "4",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"port": "9095",
"brokerid": "5",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_8_logs",
"log_filename": "kafka_server_8.log",
"config_filename": "kafka_server_8.properties"
},
{
"entity_id": "9",
"port": "9096",
"brokerid": "6",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_9_logs",
"log_filename": "kafka_server_9.log",
"config_filename": "kafka_server_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_10.log",
"config_filename": "producer_performance_10.properties"
},
{
"entity_id": "11",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_11.log",
"config_filename": "console_consumer_11.properties"
},
{
"entity_id": "12",
"log_filename": "mirror_maker_12.log",
"mirror_consumer_config_filename": "mirror_consumer_12.properties",
"mirror_producer_config_filename": "mirror_producer_12.properties"
},
{
"entity_id": "13",
"log_filename": "mirror_maker_13.log",
"mirror_consumer_config_filename": "mirror_consumer_13.properties",
"mirror_producer_config_filename": "mirror_producer_13.properties"
}
]
}

View File

@ -0,0 +1,135 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9100"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9101"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9102"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9103"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9104"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9105"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9106"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9107"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9108"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9109"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9110"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9111"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9112"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9113"
}
]
}

View File

@ -0,0 +1,143 @@
{
"description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
"02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
"03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
"04":"Produce and consume messages to a single topic - single partition.",
"05":"This test sends messages to 3 replicas",
"06":"At the end it verifies the log size and contents",
"07":"Use a consumer to verify no message loss in TARGET cluster.",
"08":"Producer dimensions : mode:async, acks:1, comp:1",
"09":"Log segment size : 10240"
},
"testcase_args": {
"bounce_leader": "false",
"bounce_mirror_maker": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "1",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2108",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2118",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"clientPort": "2128",
"dataDir": "/tmp/zookeeper_2",
"log_filename": "zookeeper_2.log",
"config_filename": "zookeeper_2.properties"
},
{
"entity_id": "3",
"clientPort": "2138",
"dataDir": "/tmp/zookeeper_3",
"log_filename": "zookeeper_3.log",
"config_filename": "zookeeper_3.properties"
},
{
"entity_id": "4",
"port": "9091",
"brokerid": "1",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9092",
"brokerid": "2",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9093",
"brokerid": "3",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9094",
"brokerid": "4",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"port": "9095",
"brokerid": "5",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_8_logs",
"log_filename": "kafka_server_8.log",
"config_filename": "kafka_server_8.properties"
},
{
"entity_id": "9",
"port": "9096",
"brokerid": "6",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_9_logs",
"log_filename": "kafka_server_9.log",
"config_filename": "kafka_server_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_10.log",
"config_filename": "producer_performance_10.properties"
},
{
"entity_id": "11",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_11.log",
"config_filename": "console_consumer_11.properties"
},
{
"entity_id": "12",
"log_filename": "mirror_maker_12.log",
"mirror_consumer_config_filename": "mirror_consumer_12.properties",
"mirror_producer_config_filename": "mirror_producer_12.properties"
},
{
"entity_id": "13",
"log_filename": "mirror_maker_13.log",
"mirror_consumer_config_filename": "mirror_consumer_13.properties",
"mirror_producer_config_filename": "mirror_producer_13.properties"
}
]
}

View File

@ -0,0 +1,153 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9100"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9101"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9102"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9103"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9104"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9105"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9106"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9107"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9108"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9109"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9110"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9111"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9112"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9113"
},
{
"entity_id": "14",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9114"
},
{
"entity_id": "15",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9115"
}
]
}

View File

@ -0,0 +1,164 @@
{
"description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
"02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
"03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
"04":"Produce and consume messages to 2 topics - 2 partitions.",
"05":"This test sends messages to 3 replicas",
"06":"At the end it verifies the log size and contents",
"07":"Use a consumer to verify no message loss in TARGET cluster.",
"08":"Producer dimensions : mode:async, acks:-1, comp:1",
"09":"Log segment size : 10240"
},
"testcase_args": {
"bounce_leader": "false",
"bounce_mirror_maker": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2108",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2118",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"clientPort": "2128",
"dataDir": "/tmp/zookeeper_2",
"log_filename": "zookeeper_2.log",
"config_filename": "zookeeper_2.properties"
},
{
"entity_id": "3",
"clientPort": "2138",
"dataDir": "/tmp/zookeeper_3",
"log_filename": "zookeeper_3.log",
"config_filename": "zookeeper_3.properties"
},
{
"entity_id": "4",
"port": "9091",
"brokerid": "1",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9092",
"brokerid": "2",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9093",
"brokerid": "3",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9094",
"brokerid": "4",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"port": "9095",
"brokerid": "5",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_8_logs",
"log_filename": "kafka_server_8.log",
"config_filename": "kafka_server_8.properties"
},
{
"entity_id": "9",
"port": "9096",
"brokerid": "6",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_9_logs",
"log_filename": "kafka_server_9.log",
"config_filename": "kafka_server_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_10.log",
"config_filename": "producer_performance_10.properties"
},
{
"entity_id": "11",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_11.log",
"config_filename": "producer_performance_11.properties"
},
{
"entity_id": "12",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_12.log",
"config_filename": "console_consumer_12.properties"
},
{
"entity_id": "13",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_13.log",
"config_filename": "console_consumer_13.properties"
},
{
"entity_id": "14",
"log_filename": "mirror_maker_14.log",
"mirror_consumer_config_filename": "mirror_consumer_14.properties",
"mirror_producer_config_filename": "mirror_producer_14.properties"
},
{
"entity_id": "15",
"log_filename": "mirror_maker_15.log",
"mirror_consumer_config_filename": "mirror_consumer_15.properties",
"mirror_producer_config_filename": "mirror_producer_15.properties"
}
]
}

View File

@ -0,0 +1,153 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9100"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9101"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9102"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9103"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9104"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9105"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "broker",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9106"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9107"
},
{
"entity_id": "8",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9108"
},
{
"entity_id": "9",
"hostname": "localhost",
"role": "broker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9109"
},
{
"entity_id": "10",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9110"
},
{
"entity_id": "11",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name":"source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9111"
},
{
"entity_id": "12",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9112"
},
{
"entity_id": "13",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9113"
},
{
"entity_id": "14",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9114"
},
{
"entity_id": "15",
"hostname": "localhost",
"role": "mirror_maker",
"cluster_name":"target",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9115"
}
]
}

View File

@ -0,0 +1,164 @@
{
"description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
"02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
"03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
"04":"Produce and consume messages to 2 topics - 2 partitions.",
"05":"This test sends messages to 3 replicas",
"06":"At the end it verifies the log size and contents",
"07":"Use a consumer to verify no message loss in TARGET cluster.",
"08":"Producer dimensions : mode:async, acks:1, comp:1",
"09":"Log segment size : 10240"
},
"testcase_args": {
"bounce_leader": "false",
"bounce_mirror_maker": "true",
"bounced_entity_downtime_sec": "30",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15",
"num_messages_to_produce_per_producer_call": "50"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2108",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_0.log",
"config_filename": "zookeeper_0.properties"
},
{
"entity_id": "1",
"clientPort": "2118",
"dataDir": "/tmp/zookeeper_1",
"log_filename": "zookeeper_1.log",
"config_filename": "zookeeper_1.properties"
},
{
"entity_id": "2",
"clientPort": "2128",
"dataDir": "/tmp/zookeeper_2",
"log_filename": "zookeeper_2.log",
"config_filename": "zookeeper_2.properties"
},
{
"entity_id": "3",
"clientPort": "2138",
"dataDir": "/tmp/zookeeper_3",
"log_filename": "zookeeper_3.log",
"config_filename": "zookeeper_3.properties"
},
{
"entity_id": "4",
"port": "9091",
"brokerid": "1",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_4_logs",
"log_filename": "kafka_server_4.log",
"config_filename": "kafka_server_4.properties"
},
{
"entity_id": "5",
"port": "9092",
"brokerid": "2",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_5_logs",
"log_filename": "kafka_server_5.log",
"config_filename": "kafka_server_5.properties"
},
{
"entity_id": "6",
"port": "9093",
"brokerid": "3",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_6_logs",
"log_filename": "kafka_server_6.log",
"config_filename": "kafka_server_6.properties"
},
{
"entity_id": "7",
"port": "9094",
"brokerid": "4",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_7_logs",
"log_filename": "kafka_server_7.log",
"config_filename": "kafka_server_7.properties"
},
{
"entity_id": "8",
"port": "9095",
"brokerid": "5",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_8_logs",
"log_filename": "kafka_server_8.log",
"config_filename": "kafka_server_8.properties"
},
{
"entity_id": "9",
"port": "9096",
"brokerid": "6",
"log.file.size": "10240",
"log.dir": "/tmp/kafka_server_9_logs",
"log_filename": "kafka_server_9.log",
"config_filename": "kafka_server_9.properties"
},
{
"entity_id": "10",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_10.log",
"config_filename": "producer_performance_10.properties"
},
{
"entity_id": "11",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_11.log",
"config_filename": "producer_performance_11.properties"
},
{
"entity_id": "12",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_12.log",
"config_filename": "console_consumer_12.properties"
},
{
"entity_id": "13",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_13.log",
"config_filename": "console_consumer_13.properties"
},
{
"entity_id": "14",
"log_filename": "mirror_maker_14.log",
"mirror_consumer_config_filename": "mirror_consumer_14.properties",
"mirror_producer_config_filename": "mirror_producer_14.properties"
},
{
"entity_id": "15",
"log_filename": "mirror_maker_15.log",
"mirror_consumer_config_filename": "mirror_consumer_15.properties",
"mirror_producer_config_filename": "mirror_producer_15.properties"
}
]
}

View File

@ -23,6 +23,7 @@
import inspect
import logging
import os
import pprint
import signal
import subprocess
import sys
@ -115,7 +116,14 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# get optional testcase arguments
logRetentionTest = "false"
try:
logRetentionTest = self.testcaseEnv.testcaseArgumentsDict["log_retention_test"]
except:
pass
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
@ -170,7 +178,15 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
# =============================================
# start ConsoleConsumer if this is a Log Retention test
# =============================================
if logRetentionTest.lower() == "true":
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
time.sleep(1)
# =============================================
# starting producer
# =============================================
@ -188,14 +204,22 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
brokerType = self.testcaseEnv.testcaseArgumentsDict["broker_type"]
bounceBrokerFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_broker"]
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
self.log_message("bounce_broker flag : " + bounceBrokerFlag)
leaderDict = None
controllerDict = None
stoppedBrokerEntityId = ""
# ==============================================
# Find out the entity id for the stopping broker
# ==============================================
if brokerType == "leader" or brokerType == "follower":
self.log_message("looking up leader")
leaderDict = kafka_system_test_utils.get_leader_elected_log_line(
self.systemTestEnv, self.testcaseEnv, self.leaderAttributesDict)
leaderDict = kafka_system_test_utils.get_leader_elected_log_line(self.systemTestEnv, self.testcaseEnv, self.leaderAttributesDict)
# ==========================
# leaderDict looks like this:
# ==========================
@ -205,69 +229,28 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
# 'hostname': u'localhost',
# 'topic': 'test_1',
# 'brokerid': '3'}
if brokerType == "leader":
# validate to see if leader election is successful
self.log_message("validating leader election")
result = kafka_system_test_utils.validate_leader_election_successful( \
self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict)
# trigger leader re-election by stopping leader to get re-election latency
self.log_message("bounce_broker flag : " + bounceBrokerFlag)
if bounceBrokerFlag.lower() == "true":
reelectionLatency = kafka_system_test_utils.get_reelection_latency(
self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict)
latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"]
self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms"
self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"].append("{0:.2f}".format(reelectionLatency * 1000))
time.sleep(1)
# starting previously terminated broker
self.log_message("starting the previously terminated broker")
stoppedLeaderEntityId = leaderDict["entity_id"]
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedLeaderEntityId)
else:
try:
# GC Pause simulation
hostname = leaderDict["hostname"]
pauseTime = self.testcaseEnv.testcaseArgumentsDict["pause_time_in_seconds"]
parentPid = self.testcaseEnv.entityBrokerParentPidDict[leaderDict["entity_id"]]
pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid)
system_test_utils.simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTime)
except:
pass
else: # follower
stoppedBrokerEntityId = leaderDict["entity_id"]
self.log_message("Found leader with entity id: " + stoppedBrokerEntityId)
else: # Follower
self.log_message("looking up follower")
# a list of all brokers
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
self.systemTestEnv.clusterEntityConfigDictList, "role", "broker", "entity_id")
# we pick the follower from the first broker which is not the leader
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(self.systemTestEnv.clusterEntityConfigDictList, "role", "broker", "entity_id")
# we pick the first non-leader broker as the follower
firstFollowerEntityId = None
for brokerEntityId in brokerEntityIdList:
if brokerEntityId != leaderDict["entity_id"]:
firstFollowerEntityId = brokerEntityId
break
# stopping Follower
self.log_message("bounce_broker flag : " + bounceBrokerFlag)
if bounceBrokerFlag.lower() == "true":
self.log_message("stopping follower with entity id: " + firstFollowerEntityId)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, firstFollowerEntityId,
self.testcaseEnv.entityBrokerParentPidDict[firstFollowerEntityId])
stoppedBrokerEntityId = firstFollowerEntityId
self.log_message("Found follower with entity id: " + stoppedBrokerEntityId)
time.sleep(1)
# starting previously terminated broker
self.log_message("starting the previously terminated broker")
stoppedBrokerEntityId = firstFollowerEntityId
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId)
elif brokerType == "controller":
self.log_message("looking up controller")
controllerDict = kafka_system_test_utils.get_controller_attributes(self.systemTestEnv, self.testcaseEnv)
# ==========================
# controllerDict looks like this:
# ==========================
@ -275,26 +258,65 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
# 'timestamp': 1345050255.8280001,
# 'hostname': u'localhost',
# 'brokerid': '3'}
# stopping Controller
self.log_message("bounce_broker flag : " + bounceBrokerFlag)
if bounceBrokerFlag.lower() == "true":
stoppedBrokerEntityId = controllerDict["entity_id"]
self.log_message("Found controller with entity id: " + stoppedBrokerEntityId)
# =============================================
# Bounce the broker
# =============================================
if bounceBrokerFlag.lower() == "true":
if brokerType == "leader":
# validate to see if leader election is successful
self.log_message("validating leader election")
kafka_system_test_utils.validate_leader_election_successful(self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict)
# trigger leader re-election by stopping leader to get re-election latency
reelectionLatency = kafka_system_test_utils.get_reelection_latency(self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict)
latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"]
self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms"
self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"].append("{0:.2f}".format(reelectionLatency * 1000))
elif brokerType == "follower":
# stopping Follower
self.log_message("stopping follower with entity id: " + firstFollowerEntityId)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, firstFollowerEntityId, self.testcaseEnv.entityBrokerParentPidDict[firstFollowerEntityId])
elif brokerType == "controller":
# stopping Controller
self.log_message("stopping controller : " + controllerDict["brokerid"])
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, controllerDict["entity_id"],
self.testcaseEnv.entityBrokerParentPidDict[controllerDict["entity_id"]])
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, controllerDict["entity_id"], self.testcaseEnv.entityBrokerParentPidDict[controllerDict["entity_id"]])
time.sleep(1)
brokerDownTimeInSec = 5
try:
brokerDownTimeInSec = int(self.testcaseEnv.testcaseArgumentsDict["broker_down_time_in_sec"])
except:
pass # take default
time.sleep(brokerDownTimeInSec)
# starting previously terminated broker
self.log_message("starting the previously terminated broker")
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId)
else:
# GC Pause simulation
pauseTime = None
try:
hostname = leaderDict["hostname"]
pauseTime = self.testcaseEnv.testcaseArgumentsDict["pause_time_in_seconds"]
parentPid = self.testcaseEnv.entityBrokerParentPidDict[leaderDict["entity_id"]]
pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid)
system_test_utils.simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTime)
except:
pass
# starting previously terminated broker
self.log_message("starting the previously terminated broker")
stoppedBrokerEntityId = controllerDict["entity_id"]
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
i += 1
# while loop
# update Leader Election Latency MIN/MAX to testcaseEnv.validationStatusDict
self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = None
try:
self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = \
@ -335,11 +357,25 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
time.sleep(2)
# =============================================
# starting consumer
# collect logs from remote hosts to find the
# minimum common offset of a certain log
# segment file among all replicas
# =============================================
minStartingOffsetDict = None
if logRetentionTest.lower() == "true":
self.anonLogger.info("sleeping for 10s before collecting logs")
time.sleep(10)
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
minStartingOffsetDict = kafka_system_test_utils.getMinCommonStartingOffset(self.systemTestEnv, self.testcaseEnv)
print
pprint.pprint(minStartingOffsetDict)
# =============================================
# starting debug consumer
# =============================================
self.log_message("starting debug consumers in the background")
#kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.start_simple_consumer(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.start_simple_consumer(self.systemTestEnv, self.testcaseEnv, minStartingOffsetDict)
self.anonLogger.info("sleeping for 10s")
time.sleep(10)
@ -365,9 +401,14 @@ class ReplicaBasicTest(ReplicationUtils, SetupUtils):
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
#kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_simple_consumer_data_matched(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
if logRetentionTest.lower() == "true":
kafka_system_test_utils.validate_simple_consumer_data_matched_across_replicas(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv)
else:
#kafka_system_test_utils.validate_simple_consumer_data_matched(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_simple_consumer_data_matched_across_replicas(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
# =============================================
# draw graphs

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,102 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. log.index.interval.bytes => 490",
"02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message",
"03":"Produce and consume messages to 2 topics - 3 partitions",
"04":"This test sends messages to 3 replicas",
"05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"06":"Restart the terminated broker",
"07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"08":"At the end it verifies the log size and contents",
"09":"Use a consumer to verify no message loss.",
"10":"Producer dimensions : mode:sync, acks:-1, comp:0",
"11":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_1_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_2_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_3_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,102 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1",
"02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message",
"03":"Produce and consume messages to 2 topics - 3 partitions",
"04":"This test sends messages to 3 replicas",
"05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"06":"Restart the terminated broker",
"07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"08":"At the end it verifies the log size and contents",
"09":"Use a consumer to verify no message loss.",
"10":"Producer dimensions : mode:sync, acks:1, comp:0",
"11":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_1_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_2_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_3_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,102 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => -1, 2. comp => 1",
"02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message",
"03":"Produce and consume messages to 2 topics - 3 partitions",
"04":"This test sends messages to 3 replicas",
"05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"06":"Restart the terminated broker",
"07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"08":"At the end it verifies the log size and contents",
"09":"Use a consumer to verify no message loss.",
"10":"Producer dimensions : mode:sync, acks:-1, comp:1",
"11":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_1_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_2_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_3_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,102 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1",
"02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message",
"03":"Produce and consume messages to 2 topics - 3 partitions",
"04":"This test sends messages to 3 replicas",
"05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"06":"Restart the terminated broker",
"07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"08":"At the end it verifies the log size and contents",
"09":"Use a consumer to verify no message loss.",
"10":"Producer dimensions : mode:sync, acks:1, comp:1",
"11":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_1_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_2_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_3_logs",
"log.index.interval.bytes": "490",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,101 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test",
"02":"Produce and consume messages to 2 topics - 3 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:-1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_1_logs",
"log.index.interval.bytes": "10",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_2_logs",
"log.index.interval.bytes": "10",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.dir": "/tmp/kafka_server_3_logs",
"log.index.interval.bytes": "10",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,98 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 3 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:1, comp:0",
"10":"Log segment size : 512000"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9099"
}
]
}

View File

@ -0,0 +1,98 @@
{
"description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0",
"02":"Produce and consume messages to 2 topics - 3 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:1, comp:1",
"10":"Log segment size : 512000"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "3",
"num_iteration": "3",
"sleep_seconds_between_producer_calls": "1",
"message_producing_free_time_sec": "15"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "512000",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "100",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "10000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -65,6 +65,7 @@
"message": "100",
"request-num-acks": "1",
"sync":"true",
"producer-retry-backoff-ms": "2500",
"log_filename": "producer_performance.log",
"config_filename": "producer_performance.properties"
},

View File

@ -65,6 +65,7 @@
"message": "100",
"request-num-acks": "1",
"sync":"false",
"producer-retry-backoff-ms": "2500",
"log_filename": "producer_performance.log",
"config_filename": "producer_performance.properties"
},

View File

@ -65,6 +65,7 @@
"message": "100",
"request-num-acks": "1",
"sync":"true",
"producer-retry-backoff-ms": "2500",
"log_filename": "producer_performance.log",
"config_filename": "producer_performance.properties"
},

View File

@ -65,6 +65,7 @@
"message": "100",
"request-num-acks": "1",
"sync":"false",
"producer-retry-backoff-ms": "2500",
"log_filename": "producer_performance.log",
"config_filename": "producer_performance.properties"
},

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : Base Test",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:-1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => -1, comp => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:-1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. sync => false, acks => -1, comp => 0",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:-1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => -1, 2. comp => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:-1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 3 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "3",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : Base Test",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:-1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, comp => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:-1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:sync, acks:1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"true",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. sync => false, acks => -1, comp => 0",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:-1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:1, comp:0",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "0",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, 2. comp => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:-1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "-1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -0,0 +1,76 @@
{
"cluster_config": [
{
"entity_id": "0",
"hostname": "localhost",
"role": "zookeeper",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9990"
},
{
"entity_id": "1",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9991"
},
{
"entity_id": "2",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9992"
},
{
"entity_id": "3",
"hostname": "localhost",
"role": "broker",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9993"
},
{
"entity_id": "4",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9997"
},
{
"entity_id": "5",
"hostname": "localhost",
"role": "producer_performance",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9998"
},
{
"entity_id": "6",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9999"
},
{
"entity_id": "7",
"hostname": "localhost",
"role": "console_consumer",
"cluster_name": "source",
"kafka_home": "default",
"java_home": "default",
"jmx_port": "9191"
}
]
}

View File

@ -0,0 +1,103 @@
{
"description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1",
"02":"Produce and consume messages to 2 topics - 2 partitions",
"03":"This test sends messages to 2 replicas",
"04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)",
"05":"Restart the terminated broker",
"06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully",
"07":"At the end it verifies the log size and contents",
"08":"Use a consumer to verify no message loss.",
"09":"Producer dimensions : mode:async, acks:1, comp:1",
"10":"Log segment size : 102400"
},
"testcase_args": {
"broker_type": "leader",
"bounce_broker": "true",
"replica_factor": "2",
"num_partition": "2",
"num_iteration": "1",
"sleep_seconds_between_producer_calls": "1",
"broker_down_time_in_sec": "5",
"message_producing_free_time_sec": "15",
"log_retention_test": "true"
},
"entities": [
{
"entity_id": "0",
"clientPort": "2188",
"dataDir": "/tmp/zookeeper_0",
"log_filename": "zookeeper_2188.log",
"config_filename": "zookeeper_2188.properties"
},
{
"entity_id": "1",
"port": "9091",
"brokerid": "1",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_1_logs",
"log_filename": "kafka_server_9091.log",
"config_filename": "kafka_server_9091.properties"
},
{
"entity_id": "2",
"port": "9092",
"brokerid": "2",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_2_logs",
"log_filename": "kafka_server_9092.log",
"config_filename": "kafka_server_9092.properties"
},
{
"entity_id": "3",
"port": "9093",
"brokerid": "3",
"log.file.size": "102400",
"log.retention.size": "1048576",
"log.dir": "/tmp/kafka_server_3_logs",
"log_filename": "kafka_server_9093.log",
"config_filename": "kafka_server_9093.properties"
},
{
"entity_id": "4",
"topic": "test_1",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_4.log",
"config_filename": "producer_performance_4.properties"
},
{
"entity_id": "5",
"topic": "test_2",
"threads": "5",
"compression-codec": "1",
"message-size": "500",
"message": "500",
"request-num-acks": "1",
"sync":"false",
"log_filename": "producer_performance_5.log",
"config_filename": "producer_performance_5.properties"
},
{
"entity_id": "6",
"topic": "test_1",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_6.log",
"config_filename": "console_consumer_6.properties"
},
{
"entity_id": "7",
"topic": "test_2",
"groupid": "mytestgroup",
"consumer-timeout-ms": "60000",
"log_filename": "console_consumer_7.log",
"config_filename": "console_consumer_7.properties"
}
]
}

View File

@ -38,6 +38,14 @@
"testcase_0121",
"testcase_0122",
"testcase_0123",
"testcase_0124",
"testcase_0125",
"testcase_0126",
"testcase_0127",
"testcase_0131",
"testcase_0132",
"testcase_0133",
"testcase_0151",
"testcase_0152",
@ -73,15 +81,41 @@
"testcase_0305",
"testcase_0306",
"testcase_0307",
"testcase_0308"
"testcase_0308",
"testcase_4001",
"testcase_4002",
"testcase_4003",
"testcase_4004",
"testcase_4005",
"testcase_4006",
"testcase_4007",
"testcase_4008",
"testcase_4011",
"testcase_4012",
"testcase_4013",
"testcase_4014",
"testcase_4015",
"testcase_4016",
"testcase_4017",
"testcase_4018"
],
"MigrationToolTest" : [
"testcase_9001"
"testcase_9001",
"testcase_9003",
"testcase_9004",
"testcase_9005",
"testcase_9006"
],
"MirrorMakerTest" : [
"testcase_5001",
"testcase_5002"
"testcase_5002",
"testcase_5003",
"testcase_5004",
"testcase_5005",
"testcase_5006"
]
}

View File

@ -174,6 +174,20 @@ def collect_logs_from_remote_hosts(systemTestEnv, testcaseEnv):
logger.debug("executing command [" + cmdStr + "]", extra=d)
system_test_utils.sys_call(cmdStr)
# ==============================
# collect ZK log
# ==============================
if role == "zookeeper":
dataLogPathName = system_test_utils.get_data_by_lookup_keyval(
testcaseEnv.testcaseConfigsList, "entity_id", entity_id, "dataDir")
cmdList = ["scp -r",
hostname + ":" + dataLogPathName,
logPathName]
cmdStr = " ".join(cmdList)
logger.debug("executing command [" + cmdStr + "]", extra=d)
system_test_utils.sys_call(cmdStr)
# ==============================
# collect dashboards file
# ==============================
@ -485,14 +499,17 @@ def start_brokers(systemTestEnv, testcaseEnv):
start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId)
def start_mirror_makers(systemTestEnv, testcaseEnv):
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
def start_mirror_makers(systemTestEnv, testcaseEnv, onlyThisEntityId=None):
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterEntityConfigDictList, "role", "mirror_maker", "entity_id")
if onlyThisEntityId is not None:
start_entity_in_background(systemTestEnv, testcaseEnv, onlyThisEntityId)
else:
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterEntityConfigDictList, "role", "mirror_maker", "entity_id")
for brokerEntityId in brokerEntityIdList:
start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId)
for brokerEntityId in brokerEntityIdList:
start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId)
def get_broker_shutdown_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict):
@ -1040,9 +1057,10 @@ def get_message_checksum(logPathName):
if not "checksum:" in line:
continue
else:
matchObj = re.match('.*checksum:(\d*?).*', line)
matchObj = re.match('.*checksum:(\d*).*', line)
if matchObj is not None:
messageChecksumList.append( matchObj.group(1) )
checksum = matchObj.group(1)
messageChecksumList.append( checksum )
else:
logger.error("unexpected log line : " + line, extra=d)
@ -1311,52 +1329,70 @@ def stop_all_remote_running_processes(systemTestEnv, testcaseEnv):
stop_remote_entity(systemTestEnv, entityId, zkParentPid)
def start_migration_tool(systemTestEnv, testcaseEnv):
def start_migration_tool(systemTestEnv, testcaseEnv, onlyThisEntityId=None):
clusterConfigList = systemTestEnv.clusterEntityConfigDictList
migrationToolConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "migration_tool")
migrationToolConfig = migrationToolConfigList[0]
host = migrationToolConfig["hostname"]
entityId = migrationToolConfig["entity_id"]
jmxPort = migrationToolConfig["jmx_port"]
role = migrationToolConfig["role"]
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "java_home")
jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "jmx_port")
kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh"
for migrationToolConfig in migrationToolConfigList:
logger.info("starting kafka migration tool", extra=d)
migrationToolLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "migration_tool", entityId, "default")
migrationToolLogPathName = migrationToolLogPath + "/migration_tool.log"
testcaseEnv.userDefinedEnvVarDict["migrationToolLogPathName"] = migrationToolLogPathName
entityId = migrationToolConfig["entity_id"]
testcaseConfigsList = testcaseEnv.testcaseConfigsList
numProducers = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.producers")
numStreams = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.streams")
producerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer.config")
consumerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer.config")
zkClientJar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "zkclient.01.jar")
kafka07Jar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "kafka.07.jar")
whiteList = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "whitelist")
logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename")
if onlyThisEntityId is None or entityId == onlyThisEntityId:
cmdList = ["ssh " + host,
"'JAVA_HOME=" + javaHome,
"JMX_PORT=" + jmxPort,
kafkaRunClassBin + " kafka.tools.KafkaMigrationTool",
"--whitelist=" + whiteList,
"--num.producers=" + numProducers,
"--num.streams=" + numStreams,
"--producer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + producerConfig,
"--consumer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + consumerConfig,
"--zkclient.01.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + zkClientJar,
"--kafka.07.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + kafka07Jar,
" &> " + migrationToolLogPath + "/migrationTool.log",
" & echo pid:$! > " + migrationToolLogPath + "/entity_" + entityId + "_pid'"]
host = migrationToolConfig["hostname"]
jmxPort = migrationToolConfig["jmx_port"]
role = migrationToolConfig["role"]
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "java_home")
jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "jmx_port")
kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh"
logger.info("starting kafka migration tool", extra=d)
migrationToolLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "migration_tool", entityId, "default")
migrationToolLogPathName = migrationToolLogPath + "/migration_tool.log"
testcaseEnv.userDefinedEnvVarDict["migrationToolLogPathName"] = migrationToolLogPathName
testcaseConfigsList = testcaseEnv.testcaseConfigsList
numProducers = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.producers")
numStreams = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.streams")
producerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer.config")
consumerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer.config")
zkClientJar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "zkclient.01.jar")
kafka07Jar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "kafka.07.jar")
whiteList = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "whitelist")
logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename")
cmdList = ["ssh " + host,
"'JAVA_HOME=" + javaHome,
"JMX_PORT=" + jmxPort,
kafkaRunClassBin + " kafka.tools.KafkaMigrationTool",
"--whitelist=" + whiteList,
"--num.producers=" + numProducers,
"--num.streams=" + numStreams,
"--producer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + producerConfig,
"--consumer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + consumerConfig,
"--zkclient.01.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + zkClientJar,
"--kafka.07.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + kafka07Jar,
" &> " + migrationToolLogPath + "/migrationTool.log",
" & echo pid:$! > " + migrationToolLogPath + "/entity_" + entityId + "_pid'"]
cmdStr = " ".join(cmdList)
logger.debug("executing command: [" + cmdStr + "]", extra=d)
system_test_utils.async_sys_call(cmdStr)
time.sleep(5)
pidCmdStr = "ssh " + host + " 'cat " + migrationToolLogPath + "/entity_" + entityId + "_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of the remote entity pid in a dictionary
for line in subproc.stdout.readlines():
if line.startswith("pid"):
line = line.rstrip('\n')
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
testcaseEnv.entityMigrationToolParentPidDict[entityId] = tokens[1]
cmdStr = " ".join(cmdList)
logger.debug("executing command: [" + cmdStr + "]", extra=d)
system_test_utils.async_sys_call(cmdStr)
def validate_07_08_migrated_data_matched(systemTestEnv, testcaseEnv):
validationStatusDict = testcaseEnv.validationStatusDict
@ -1392,30 +1428,40 @@ def validate_07_08_migrated_data_matched(systemTestEnv, testcaseEnv):
consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", matchingConsumerEntityId, "default")
consumerLogPathName = consumerLogPath + "/console_consumer.log"
producerMsgChecksumList = get_message_checksum(producerLogPathName)
consumerMsgChecksumList = get_message_checksum(consumerLogPathName)
producerMsgChecksumSet = set(producerMsgChecksumList)
consumerMsgChecksumSet = set(consumerMsgChecksumList)
producerMsgChecksumList = get_message_checksum(producerLogPathName)
consumerMsgChecksumList = get_message_checksum(consumerLogPathName)
producerMsgChecksumSet = set(producerMsgChecksumList)
consumerMsgChecksumSet = set(consumerMsgChecksumList)
producerMsgChecksumUniqList = list(producerMsgChecksumSet)
consumerMsgChecksumUniqList = list(consumerMsgChecksumSet)
missingMsgChecksumInConsumer = producerMsgChecksumSet - consumerMsgChecksumSet
logger.debug("size of producerMsgChecksumList : " + str(len(producerMsgChecksumList)), extra=d)
logger.debug("size of consumerMsgChecksumList : " + str(len(consumerMsgChecksumList)), extra=d)
logger.debug("size of producerMsgChecksumSet : " + str(len(producerMsgChecksumSet)), extra=d)
logger.debug("size of consumerMsgChecksumSet : " + str(len(consumerMsgChecksumSet)), extra=d)
logger.debug("size of producerMsgChecksumUniqList : " + str(len(producerMsgChecksumUniqList)), extra=d)
logger.debug("size of consumerMsgChecksumUniqList : " + str(len(consumerMsgChecksumUniqList)), extra=d)
logger.debug("size of missingMsgChecksumInConsumer : " + str(len(missingMsgChecksumInConsumer)), extra=d)
outfile = open(msgChecksumMissingInConsumerLogPathName, "w")
for id in missingMsgChecksumInConsumer:
outfile.write(id + "\n")
outfile.close()
logger.info("no. of unique messages on topic [" + topic + "] sent from publisher : " + str(len(producerMsgChecksumList)), extra=d)
logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumList)), extra=d)
validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(producerMsgChecksumList))
validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(consumerMsgChecksumList))
logger.info("no. of messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumList)), extra=d)
logger.info("no. of messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumList)), extra=d)
logger.info("no. of unique messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumUniqList)), extra=d)
logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumUniqList)), extra=d)
validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(list(producerMsgChecksumSet)))
validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(list(consumerMsgChecksumSet)))
if ( len(missingMsgChecksumInConsumer) == 0 and len(producerMsgChecksumList) > 0 ):
if ( len(producerMsgChecksumList) > 0 and len(list(producerMsgChecksumSet)) == len(list(consumerMsgChecksumSet))):
validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED"
#return True
else:
validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED"
logger.info("See " + msgChecksumMissingInConsumerLogPathName + " for missing MessageID", extra=d)
#return False
def validate_broker_log_segment_checksum(systemTestEnv, testcaseEnv, clusterName="source"):
@ -1542,7 +1588,7 @@ def validate_broker_log_segment_checksum(systemTestEnv, testcaseEnv, clusterName
else:
validationStatusDict["Validate for merged log segment checksum in cluster [" + clusterName + "]"] = "FAILED"
def start_simple_consumer(systemTestEnv, testcaseEnv):
def start_simple_consumer(systemTestEnv, testcaseEnv, minStartingOffsetDict=None):
clusterList = systemTestEnv.clusterEntityConfigDictList
consumerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterList, "role", "console_consumer")
@ -1586,45 +1632,42 @@ def start_simple_consumer(systemTestEnv, testcaseEnv):
numPartitions = int(numPartitions)
replicaIndex = 1
startingOffset = -2
brokerPortList = brokerListStr.split(',')
for brokerPort in brokerPortList:
k = 0
while (k < numPartitions):
logger.info("starting debug consumer for replica on [" + brokerPort + "] partition [" + str(k) + "]", extra=d)
partitionId = 0
while (partitionId < numPartitions):
logger.info("starting debug consumer for replica on [" + brokerPort + "] partition [" + str(partitionId) + "]", extra=d)
if minStartingOffsetDict is not None:
topicPartition = topic + "-" + str(partitionId)
startingOffset = minStartingOffsetDict[topicPartition]
outputFilePathName = consumerLogPath + "/simple_consumer_" + topic + "-" + str(partitionId) + "_r" + str(replicaIndex) + ".log"
brokerPortLabel = brokerPort.replace(":", "_")
cmdList = ["ssh " + host,
"'JAVA_HOME=" + javaHome,
kafkaRunClassBin + " kafka.tools.SimpleConsumerShell",
"--broker-list " + brokerListStr,
"--topic " + topic,
"--partition " + str(k),
"--partition " + str(partitionId),
"--replica " + str(replicaIndex),
"--offset " + str(startingOffset),
"--no-wait-at-logend ",
" >> " + consumerLogPath + "/simple_consumer_" + str(replicaIndex) + ".log",
" > " + outputFilePathName,
" & echo pid:$! > " + consumerLogPath + "/entity_" + entityId + "_pid'"]
cmdStr = " ".join(cmdList)
logger.debug("executing command: [" + cmdStr + "]", extra=d)
system_test_utils.async_sys_call(cmdStr)
time.sleep(2)
pidCmdStr = "ssh " + host + " 'cat " + consumerLogPath + "/entity_" + entityId + "_pid'"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of the remote entity pid in a dictionary
for line in subproc.stdout.readlines():
if line.startswith("pid"):
line = line.rstrip('\n')
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
testcaseEnv.consumerHostParentPidDict[host] = tokens[1]
logger.info("sleeping for 5 sec",extra=d)
time.sleep(5)
k += 1
subproc_1 = system_test_utils.sys_call_return_subproc(cmdStr)
# dummy for-loop to wait until the process is completed
for line in subproc_1.stdout.readlines():
pass
time.sleep(1)
partitionId += 1
replicaIndex += 1
def validate_simple_consumer_data_matched(systemTestEnv, testcaseEnv):
@ -1733,4 +1776,187 @@ def get_controller_attributes(systemTestEnv, testcaseEnv):
tcConfigsList, "brokerid", brokerid, "entity_id")
return controllerDict
def getMinCommonStartingOffset(systemTestEnv, testcaseEnv, clusterName="source"):
brokerLogStartOffsetDict = {}
minCommonStartOffsetDict = {}
tcConfigsList = testcaseEnv.testcaseConfigsList
clusterConfigList = systemTestEnv.clusterEntityConfigDictList
allBrokerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "broker")
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(allBrokerConfigList, "cluster_name", clusterName, "entity_id")
# loop through all brokers
for brokerEntityId in sorted(brokerEntityIdList):
# remoteLogSegmentPathName : /tmp/kafka_server_4_logs
# => remoteLogSegmentDir : kafka_server_4_logs
remoteLogSegmentPathName = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", brokerEntityId, "log.dir")
remoteLogSegmentDir = os.path.basename(remoteLogSegmentPathName)
logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default")
localLogSegmentPath = logPathName + "/" + remoteLogSegmentDir
# loop through all topicPartition directories such as : test_1-0, test_1-1, ...
for topicPartition in sorted(os.listdir(localLogSegmentPath)):
# found a topic-partition directory
if os.path.isdir(localLogSegmentPath + "/" + topicPartition):
# startingOffsetKey : <brokerEntityId>:<topicPartition> (eg. 1:test_1-0)
startingOffsetKey = brokerEntityId + ":" + topicPartition
# log segment files are located in : localLogSegmentPath + "/" + topicPartition
# sort the log segment files under each topic-partition
for logFile in sorted(os.listdir(localLogSegmentPath + "/" + topicPartition)):
# logFile is located at:
# system_test/xxxx_testsuite/testcase_xxxx/logs/broker-1/kafka_server_1_logs/test_1-0/00000000000000003800.log
if logFile.endswith(".log"):
matchObj = re.match("0*(.*)\.log", logFile) # remove the leading zeros & the file extension
startingOffset = matchObj.group(1) # this is the starting offset from the file name
if len(startingOffset) == 0: # when log filename is: 00000000000000000000.log
startingOffset = "0"
# starting offset of a topic-partition can be retrieved from the filename of the first log segment
# => break out of this innest for-loop after processing the first log segment file
brokerLogStartOffsetDict[startingOffsetKey] = startingOffset
break
# brokerLogStartOffsetDict is like this:
# {u'1:test_1-0': u'400',
# u'1:test_1-1': u'400',
# u'1:test_2-0': u'200',
# u'1:test_2-1': u'200',
# u'2:test_1-0': u'400',
# u'2:test_1-1': u'400',
# u'2:test_2-0': u'200',
# u'2:test_2-1': u'200',
# u'3:test_1-0': '0',
# u'3:test_1-1': '0',
# u'3:test_2-0': '0',
# u'3:test_2-1': '0'}
# loop through brokerLogStartOffsetDict to get the min common starting offset for each topic-partition
for brokerTopicPartition in sorted(brokerLogStartOffsetDict.iterkeys()):
topicPartition = brokerTopicPartition.split(':')[1]
if topicPartition in minCommonStartOffsetDict:
# key exists => if the new value is greater, replace the existing value with new
if minCommonStartOffsetDict[topicPartition] < brokerLogStartOffsetDict[brokerTopicPartition]:
minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition]
else:
# key doesn't exist => add it to the dictionary
minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition]
# returning minCommonStartOffsetDict which is like this:
# {u'test_1-0': u'400',
# u'test_1-1': u'400',
# u'test_2-0': u'200',
# u'test_2-1': u'200'}
return minCommonStartOffsetDict
def validate_simple_consumer_data_matched_across_replicas(systemTestEnv, testcaseEnv):
validationStatusDict = testcaseEnv.validationStatusDict
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterEntityConfigDictList, "role", "console_consumer", "entity_id")
replicaFactor = testcaseEnv.testcaseArgumentsDict["replica_factor"]
numPartition = testcaseEnv.testcaseArgumentsDict["num_partition"]
# Unique messages from producer on [test_1] : 1500
# Unique messages from consumer on [test_1] : 1500
# Unique messages from consumer on [test_1] at simple_consumer_test_1-0_r1.log : 750
# Unique messages from consumer on [test_1] at simple_consumer_test_1-0_r2.log : 750
# Unique messages from consumer on [test_1] at simple_consumer_test_1-0_r3.log : 0
# Unique messages from consumer on [test_1] at simple_consumer_test_1-1_r1.log : 0
# Unique messages from consumer on [test_1] at simple_consumer_test_1-1_r2.log : 750
# Unique messages from consumer on [test_1] at simple_consumer_test_1-1_r3.log : 750
# ==================================================
# Unique messages from producer on [test_2] : 1000
# Unique messages from consumer on [test_2] : 1000
# Unique messages from consumer on [test_2] at simple_consumer_test_2-0_r1.log : 500
# Unique messages from consumer on [test_2] at simple_consumer_test_2-0_r2.log : 0
# Unique messages from consumer on [test_2] at simple_consumer_test_2-0_r3.log : 500
# Unique messages from consumer on [test_2] at simple_consumer_test_2-1_r1.log : 500
# Unique messages from consumer on [test_2] at simple_consumer_test_2-1_r2.log : 500
# Unique messages from consumer on [test_2] at simple_consumer_test_2-1_r3.log : 0
mismatchCounter = 0
for consumerEntityId in consumerEntityIdList:
topic = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", consumerEntityId, "topic")
consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", consumerEntityId, "default")
replicaIdxMsgCountDictList = []
# replicaIdxMsgCountDictList is being used as follows:
#
# the above replica message count will be organized as follows:
# index of the list would map to the partitionId
# each element in the list maps to the replicaIdx-MessageCount
# to validate that :
# 1. there should be "no. of broker" of non-zero message count and they are equal
# 2. there should be "no. of broker - replication factor" of zero count
# [{"1": "750", "2": "750", "3": "0" },
# {"1": "0" , "2": "750", "3": "750"}]
j = 0
while j < int(numPartition):
newDict = {}
replicaIdxMsgCountDictList.append(newDict)
j += 1
for logFile in sorted(os.listdir(consumerLogPath)):
if logFile.startswith("simple_consumer_") and logFile.endswith(".log"):
matchObj = re.match("simple_consumer_"+topic+"-(\d*)_r(\d*)\.log" , logFile)
partitionId = int(matchObj.group(1))
replicaIdx = int(matchObj.group(2))
consumerLogPathName = consumerLogPath + "/" + logFile
consumerMsgIdList = get_message_id(consumerLogPathName)
consumerMsgIdSet = set(consumerMsgIdList)
replicaIdxMsgCountDictList[partitionId][replicaIdx] = len(consumerMsgIdSet)
logger.info("no. of unique messages on topic [" + topic + "] at " + logFile + " : " + str(len(consumerMsgIdSet)), extra=d)
validationStatusDict["Unique messages from consumer on [" + topic + "] at " + logFile] = str(len(consumerMsgIdSet))
pprint.pprint(replicaIdxMsgCountDictList)
partitionId = 0
while partitionId < int(numPartition):
zeroMsgCounter = 0
nonZeroMsgCounter = 0
nonZeroMsgValue = -1
for replicaIdx in sorted(replicaIdxMsgCountDictList[partitionId].iterkeys()):
if replicaIdxMsgCountDictList[partitionId][int(replicaIdx)] == 0:
zeroMsgCounter += 1
else:
if nonZeroMsgValue == -1:
nonZeroMsgValue = replicaIdxMsgCountDictList[partitionId][int(replicaIdx)]
else:
if nonZeroMsgValue != replicaIdxMsgCountDictList[partitionId][int(replicaIdx)]:
mismatchCounter += 1
nonZeroMsgCounter += 1
partitionId += 1
logger.info("topic " + topic + " : no. of brokers with zero msg count : " + str(zeroMsgCounter), extra=d)
logger.info("topic " + topic + " : no. of brokers with non-zero msg count : " + str(nonZeroMsgCounter), extra=d)
logger.info("topic " + topic + " : non-zero brokers msg count : " + str(nonZeroMsgValue), extra=d)
if mismatchCounter == 0:
validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "PASSED"
else:
validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "FAILED"

View File

@ -51,6 +51,12 @@ class TestcaseEnv():
# { 0: 12345, 1: 12389, ... }
entityMirrorMakerParentPidDict = {}
# dictionary of entity_id to ppid for migration tool entities
# key: entity_id
# val: ppid of broker associated to that entity_id
# { 0: 12345, 1: 12389, ... }
entityMigrationToolParentPidDict = {}
# dictionary of entity_id to list of JMX ppid
# key: entity_id
# val: list of JMX ppid associated to that entity_id