diff --git a/Vagrantfile b/Vagrantfile index 31b99b4b4f0..caece17097d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -148,7 +148,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end # Exclude some directories that can grow very large from syncing - override.vm.synced_folder ".", "/vagrant", type: "rsync", :rsync_excludes => ['.git', 'core/data/', 'logs/', 'system_test/', 'tests/results/', 'results/'] + override.vm.synced_folder ".", "/vagrant", type: "rsync", :rsync_excludes => ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/'] end def name_node(node, name) diff --git a/build.gradle b/build.gradle index d177e2e5a41..279c51f7cda 100644 --- a/build.gradle +++ b/build.gradle @@ -75,8 +75,7 @@ rat { 'gradlew', 'gradlew.bat', '**/README.md', - '.reviewboardrc', - 'system_test/**' + '.reviewboardrc' ]) } diff --git a/system_test/README.txt b/system_test/README.txt deleted file mode 100644 index e96d15dd558..00000000000 --- a/system_test/README.txt +++ /dev/null @@ -1,83 +0,0 @@ -# ========================== -# Quick Start -# ========================== - -* Please note that the following commands should be executed after downloading the kafka source code to build all the required binaries: - 1. / $ ./gradlew jar - - Now you are ready to follow the steps below. - 1. Update system_test/cluster_config.json for "kafka_home" & "java_home" specific to your environment - 2. Edit system_test/replication_testsuite/testcase_1/testcase_1_properties.json and update "broker-list" to the proper settings of your environment. (If this test is to be run in a single localhost, no change is required for this.) - 3. Create testcase_to_run.json file with the tests you wish to run. You can start by just copying one of our preset test suites. For example: - cp testcase_to_run_sanity.json testcase_to_run.json - 4. To run the test, go to /system_test and run the following command: - $ python -u -B system_test_runner.py 2>&1 | tee system_test_output.log - 5. To turn on debugging, update system_test/logging.conf by changing the level in handlers session from INFO to DEBUG. - - We also have three built-in test suites you can use after you set your environment (steps 1 and 2 above): - * run_sanity.sh - will run a single basic replication test - * run_all_replica.sh - will run all replication tests - * run_all.sh - will run all replication and mirror_maker tests - -# ========================== -# Overview -# ========================== - -"system_test" is now transformed to a system regression test framework intended for the automation of system / integration testing of data platform software such as Kafka. The test framework is implemented in Python which is a popular scripting language with well supported features. - -The framework has the following levels: - -1. The first level is generic and does not depend on any product specific details. - location: system_test - a. system_test_runner.py - It implements the main class RegTest as an entry point. - b. system_test_env.py - It implements the class RegTestEnv which defines the testing environment of a test session such as the base directory and environment variables specific to the local machine. - -2. The second level defines a suite of testing such as Kafka's replication (including basic testing, failure testing, ... etc) - location: system_test/*. - - * Please note the test framework will look for a specific suffix of the directories under system_test to determine what test suites are available. The suffix of can be defined in SystemTestEnv class (system_test_env.py) - - a. replica_basic_test.py - This is a test module file. It implements the test logic for basic replication testing as follows: - - i. start zookeepers - ii. start brokers - iii. create kafka topics - iv. lookup the brokerid as a leader - v. terminate the leader (if defined in the testcase config json file) - vi. start producer to send n messages - vii. start consumer to receive messages - viii. validate if there is data loss - - b. config/ - This config directory provides templates for all properties files needed for zookeeper, brokers, producer and consumer (any changes in the files under this directory would be reflected or overwritten by the settings under testcase_/testcase__properties.json) - - d. testcase_** - The testcase directory contains the testcase argument definition file: testcase_1_properties.json. This file defines the specific configurations for the testcase such as the followings (eg. producer related): - i. no. of producer threads - ii. no. of messages to produce - iii. zkconnect string - - When this test case is being run, the test framework will copy and update the template properties files to testcase_/config. The logs of various components will be saved in testcase_/logs - - ** Please note the test framework will look for a specific prefix of the directories under system_test// to determine what test cases are available. The prefix of can be defined in SystemTestEnv class (system_test_env.py) - -# ========================== -# Adding Test Case -# ========================== - -To create a new test suite called "broker_testsuite", please do the followings: - - 1. Copy and paste system_test/replication_testsuite => system_test/broker_testsuite - 2. Rename system_test/broker_testsuite/replica_basic_test.py => system_test/broker_testsuite/broker_basic_test.py - 3. Edit system_test/broker_testsuite/broker_basic_test.py and update all ReplicaBasicTest related class name to BrokerBasicTest (as an example) - 4. Follow the flow of system_test/broker_testsuite/broker_basic_test.py and modify the necessary test logic accordingly. - - -To create a new test case under "replication_testsuite", please do the followings: - - 1. Copy and paste system_test/replication_testsuite/testcase_1 => system_test/replication_testsuite/testcase_2 - 2. Rename system_test/replication_testsuite/testcase_2/testcase_1_properties.json => system_test/replication_testsuite/testcase_2/testcase_2_properties.json - 3. Update system_test/replication_testsuite/testcase_2/testcase_2_properties.json with the corresponding settings for testcase 2. - -Note: -The following testcases are for the old producer and the old mirror maker. We can remove them once we phase out the old producer client. - replication_testsuite: testcase_{10101 - 10110} testcase_{10131 - 10134} - mirror_maker_testsuite: testcase_{15001 - 15006} diff --git a/system_test/__init__.py b/system_test/__init__.py deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/system_test/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/system_test/broker_failure/README b/system_test/broker_failure/README deleted file mode 100644 index e7ff738cbf2..00000000000 --- a/system_test/broker_failure/README +++ /dev/null @@ -1,72 +0,0 @@ -** Please note that the following commands should be executed - after downloading the kafka source code to build all the - required binaries: - 1. / $ ./sbt update - 2. / $ ./sbt package - - Now you are ready to follow the steps below. - -This script performs broker failure tests in an environment with -Mirrored Source & Target clusters in a single machine: - -1. Start a cluster of Kafka source brokers -2. Start a cluster of Kafka target brokers -3. Start one or more Mirror Maker to create mirroring between - source and target clusters -4. A producer produces batches of messages to the SOURCE brokers - in the background -5. The Kafka SOURCE, TARGET brokers and Mirror Maker will be - terminated in a round-robin fashion and wait for the consumer - to catch up. -6. Repeat step 5 as many times as specified in the script -7. An independent ConsoleConsumer in publish/subcribe mode to - consume messages from the SOURCE brokers cluster -8. An independent ConsoleConsumer in publish/subcribe mode to - consume messages from the TARGET brokers cluster - -Expected results: -================== -There should not be any discrepancies by comparing the unique -message checksums from the source ConsoleConsumer and the -target ConsoleConsumer. - -Notes: -================== -The number of Kafka SOURCE brokers can be increased as follows: -1. Update the value of $num_kafka_source_server in this script -2. Make sure that there are corresponding number of prop files: - $base_dir/config/server_source{1..4}.properties - -The number of Kafka TARGET brokers can be increased as follows: -1. Update the value of $num_kafka_target_server in this script -2. Make sure that there are corresponding number of prop files: - $base_dir/config/server_target{1..3}.properties - -Quick Start: -================== -In the directory /system_test/broker_failure, -execute this script as following: - $ bin/run-test.sh -n -s - -num of iterations - the number of iterations that the test runs - -servers to bounce - the servers to be bounced in a round-robin fashion. - - Values to be entered: - 1 - source broker - 2 - mirror maker - 3 - target broker - - Example: - * To bounce only mirror maker and target broker - in turns, enter the value 23. - * To bounce only mirror maker, enter the value 2. - * To run the test without bouncing, enter 0. - -At the end of the test, the received messages checksums in both -SOURCE & TARGET will be compared. If all checksums are matched, -the test is PASSED. Otherwise, the test is FAILED. - -In the event of failure, by default the brokers and zookeepers -remain running to make it easier to debug the issue - hit Ctrl-C -to shut them down. diff --git a/system_test/broker_failure/bin/kafka-run-class.sh b/system_test/broker_failure/bin/kafka-run-class.sh deleted file mode 100755 index 05f46b60e2d..00000000000 --- a/system_test/broker_failure/bin/kafka-run-class.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 classname [opts]" - exit 1 -fi - -base_dir=$(dirname $0)/.. -kafka_inst_dir=${base_dir}/../.. - -for file in $kafka_inst_dir/project/boot/scala-2.8.0/lib/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/target/scala_2.8.0/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/lib/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/perf/target/scala_2.8.0/kafka*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/lib_managed/scala_2.8.0/compile/*.jar; -do - if [ ${file##*/} != "sbt-launch.jar" ]; then - CLASSPATH=$CLASSPATH:$file - fi -done -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " -fi -if [ -z "$KAFKA_OPTS" ]; then - KAFKA_OPTS="-Xmx512M -server -Dlog4j.configuration=file:$base_dir/config/log4j.properties" -fi -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " -fi -if [ -z "$JAVA_HOME" ]; then - JAVA="java" -else - JAVA="$JAVA_HOME/bin/java" -fi - -$JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH $@ diff --git a/system_test/broker_failure/bin/run-test.sh b/system_test/broker_failure/bin/run-test.sh deleted file mode 100755 index 549cd1f4eaf..00000000000 --- a/system_test/broker_failure/bin/run-test.sh +++ /dev/null @@ -1,815 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# =========== -# run-test.sh -# =========== - -# ==================================== -# Do not change the followings -# (keep this section at the beginning -# of this script) -# ==================================== -readonly system_test_root=$(dirname $0)/../.. # path of /system_test -readonly common_dir=${system_test_root}/common # common util scripts for system_test -source ${common_dir}/util.sh # include the util script - -readonly base_dir=$(dirname $0)/.. # the base dir of this test suite -readonly test_start_time="$(date +%s)" # time starting this test -readonly bounce_source_id=1 -readonly bounce_mir_mkr_id=2 -readonly bounce_target_id=3 -readonly log4j_prop_file=$base_dir/config/log4j.properties - -iter=1 # init a counter to keep track of iterations -num_iterations=5 # total no. of iterations to run -svr_to_bounce=0 # servers to bounce: 1-source 2-mirror_maker 3-target - # 12 - source & mirror_maker - # 13 - source & target - -# ==================================== -# No need to change the following -# configurations in most cases -# ==================================== -readonly zk_source_port=2181 # source zk port -readonly zk_target_port=2182 # target zk port -readonly test_topic=test01 # topic used in this test -readonly consumer_grp=group1 # consumer group -readonly source_console_consumer_grp=source -readonly target_console_consumer_grp=target -readonly message_size=100 -readonly console_consumer_timeout_ms=15000 -readonly num_kafka_source_server=4 # requires same no. of property files such as: - # $base_dir/config/server_source{1..4}.properties -readonly num_kafka_target_server=3 # requires same no. of property files such as: - # $base_dir/config/server_target{1..3}.properties -readonly num_kafka_mirror_maker=3 # any values greater than 0 -readonly wait_time_after_killing_broker=0 # wait after broker is stopped but before starting again -readonly wait_time_after_restarting_broker=10 - -# ==================================== -# Change the followings as needed -# ==================================== -num_msg_per_batch=500 # no. of msg produced in each calling of ProducerPerformance -num_producer_threads=5 # no. of producer threads to send msg -producer_sleep_min=5 # min & max sleep time (in sec) between each -producer_sleep_max=5 # batch of messages sent from producer - -# ==================================== -# zookeeper -# ==================================== -pid_zk_source= -pid_zk_target= -zk_log4j_log= - -# ==================================== -# kafka source -# ==================================== -kafka_source_pids= -kafka_source_prop_files= -kafka_source_log_files= -kafka_topic_creation_log_file=$base_dir/kafka_topic_creation.log -kafka_log4j_log= - -# ==================================== -# kafka target -# ==================================== -kafka_target_pids= -kafka_target_prop_files= -kafka_target_log_files= - -# ==================================== -# mirror maker -# ==================================== -kafka_mirror_maker_pids= -kafka_mirror_maker_log_files= -consumer_prop_file=$base_dir/config/whitelisttest.consumer.properties -mirror_producer_prop_files= - -# ==================================== -# console consumer source -# ==================================== -console_consumer_source_pid= -console_consumer_source_log=$base_dir/console_consumer_source.log -console_consumer_source_mid_log=$base_dir/console_consumer_source_mid.log -console_consumer_source_mid_sorted_log=$base_dir/console_consumer_source_mid_sorted.log -console_consumer_source_mid_sorted_uniq_log=$base_dir/console_consumer_source_mid_sorted_uniq.log - -# ==================================== -# console consumer target -# ==================================== -console_consumer_target_pid= -console_consumer_target_log=$base_dir/console_consumer_target.log -console_consumer_target_mid_log=$base_dir/console_consumer_target_mid.log -console_consumer_target_mid_sorted_log=$base_dir/console_consumer_target_mid_sorted.log -console_consumer_target_mid_sorted_uniq_log=$base_dir/console_consumer_target_mid_sorted_uniq.log - -# ==================================== -# producer -# ==================================== -background_producer_pid= -producer_performance_log=$base_dir/producer_performance.log -producer_performance_mid_log=$base_dir/producer_performance_mid.log -producer_performance_mid_sorted_log=$base_dir/producer_performance_mid_sorted.log -producer_performance_mid_sorted_uniq_log=$base_dir/producer_performance_mid_sorted_uniq.log -tmp_file_to_stop_background_producer=/tmp/tmp_file_to_stop_background_producer - -# ==================================== -# test reports -# ==================================== -checksum_diff_log=$base_dir/checksum_diff.log - - -# ==================================== -# initialize prop and log files -# ==================================== -initialize() { - for ((i=1; i<=$num_kafka_target_server; i++)) - do - kafka_target_prop_files[${i}]=$base_dir/config/server_target${i}.properties - kafka_target_log_files[${i}]=$base_dir/kafka_target${i}.log - kafka_mirror_maker_log_files[${i}]=$base_dir/kafka_mirror_maker${i}.log - done - - for ((i=1; i<=$num_kafka_source_server; i++)) - do - kafka_source_prop_files[${i}]=$base_dir/config/server_source${i}.properties - kafka_source_log_files[${i}]=$base_dir/kafka_source${i}.log - done - - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - mirror_producer_prop_files[${i}]=$base_dir/config/mirror_producer${i}.properties - done - - zk_log4j_log=`grep "log4j.appender.zookeeperAppender.File=" $log4j_prop_file | awk -F '=' '{print $2}'` - kafka_log4j_log=`grep "log4j.appender.kafkaAppender.File=" $log4j_prop_file | awk -F '=' '{print $2}'` -} - -# ========================================= -# cleanup -# ========================================= -cleanup() { - info "cleaning up" - - rm -rf $tmp_file_to_stop_background_producer - rm -rf $kafka_topic_creation_log_file - - rm -rf /tmp/zookeeper_source - rm -rf /tmp/zookeeper_target - - rm -rf /tmp/kafka-source{1..4}-logs - rm -rf /tmp/kafka-target{1..3}-logs - - rm -rf $zk_log4j_log - rm -rf $kafka_log4j_log - - for ((i=1; i<=$num_kafka_target_server; i++)) - do - rm -rf ${kafka_target_log_files[${i}]} - rm -rf ${kafka_mirror_maker_log_files[${i}]} - done - - rm -f $base_dir/zookeeper_source.log - rm -f $base_dir/zookeeper_target.log - rm -f $base_dir/kafka_source{1..4}.log - - rm -f $producer_performance_log - rm -f $producer_performance_mid_log - rm -f $producer_performance_mid_sorted_log - rm -f $producer_performance_mid_sorted_uniq_log - - rm -f $console_consumer_target_log - rm -f $console_consumer_source_log - rm -f $console_consumer_target_mid_log - rm -f $console_consumer_source_mid_log - - rm -f $checksum_diff_log - - rm -f $console_consumer_target_mid_sorted_log - rm -f $console_consumer_source_mid_sorted_log - rm -f $console_consumer_target_mid_sorted_uniq_log - rm -f $console_consumer_source_mid_sorted_uniq_log -} - -# ========================================= -# wait_for_zero_consumer_lags -# ========================================= -wait_for_zero_consumer_lags() { - - this_group_name=$1 - this_zk_port=$2 - - # no of times to check for zero lagging - no_of_zero_to_verify=3 - - while [ 'x' == 'x' ] - do - TOTAL_LAG=0 - CONSUMER_LAGS=`$base_dir/bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker \ - --group $target_console_consumer_grp \ - --zkconnect localhost:$zk_target_port \ - --topic $test_topic \ - | grep "Consumer lag" | tr -d ' ' | cut -f2 -d '='` - - for lag in $CONSUMER_LAGS; - do - TOTAL_LAG=$(($TOTAL_LAG + $lag)) - done - - info "mirror console consumer TOTAL_LAG = $TOTAL_LAG" - if [ $TOTAL_LAG -eq 0 ]; then - if [ $no_of_zero_to_verify -eq 0 ]; then - echo - return 0 - fi - no_of_zero_to_verify=$(($no_of_zero_to_verify - 1)) - fi - sleep 1 - done -} - -# ========================================= -# create_topic -# ========================================= -create_topic() { - this_topic_to_create=$1 - this_zk_conn_str=$2 - this_replica_factor=$3 - - info "creating topic [$this_topic_to_create] on [$this_zk_conn_str]" - $base_dir/../../bin/kafka-create-topic.sh \ - --topic $this_topic_to_create \ - --zookeeper $this_zk_conn_str \ - --replica $this_replica_factor \ - 2> $kafka_topic_creation_log_file -} - -# ========================================= -# start_zk -# ========================================= -start_zk() { - info "starting zookeepers" - - $base_dir/../../bin/zookeeper-server-start.sh \ - $base_dir/config/zookeeper_source.properties \ - 2>&1 > $base_dir/zookeeper_source.log & - pid_zk_source=$! - - $base_dir/../../bin/zookeeper-server-start.sh \ - $base_dir/config/zookeeper_target.properties \ - 2>&1 > $base_dir/zookeeper_target.log & - pid_zk_target=$! -} - -# ========================================= -# start_source_servers_cluster -# ========================================= -start_source_servers_cluster() { - info "starting source cluster" - - for ((i=1; i<=$num_kafka_source_server; i++)) - do - start_source_server $i - done -} - -# ========================================= -# start_source_server -# ========================================= -start_source_server() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.Kafka \ - ${kafka_source_prop_files[$s_idx]} \ - 2>&1 >> ${kafka_source_log_files[$s_idx]} & - kafka_source_pids[${s_idx}]=$! - - info " -> kafka_source_pids[$s_idx]: ${kafka_source_pids[$s_idx]}" -} - -# ========================================= -# start_target_servers_cluster -# ========================================= -start_target_servers_cluster() { - info "starting mirror cluster" - - for ((i=1; i<=$num_kafka_target_server; i++)) - do - start_target_server $i - done -} - -# ========================================= -# start_target_server -# ========================================= -start_target_server() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.Kafka \ - ${kafka_target_prop_files[${s_idx}]} \ - 2>&1 >> ${kafka_target_log_files[${s_idx}]} & - kafka_target_pids[$s_idx]=$! - - info " -> kafka_target_pids[$s_idx]: ${kafka_target_pids[$s_idx]}" -} - -# ========================================= -# start_target_mirror_maker -# ========================================= -start_target_mirror_maker() { - info "starting mirror maker" - - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - start_mirror_maker $i - done -} - -# ========================================= -# start_mirror_maker -# ========================================= -start_mirror_maker() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.tools.MirrorMaker \ - --consumer.config $consumer_prop_file \ - --producer.config ${mirror_producer_prop_files[${s_idx}]} \ - --whitelist=\".*\" \ - 2>&1 >> ${kafka_mirror_maker_log_files[$s_idx]} & - kafka_mirror_maker_pids[${s_idx}]=$! - - info " -> kafka_mirror_maker_pids[$s_idx]: ${kafka_mirror_maker_pids[$s_idx]}" -} - -# ========================================= -# start_console_consumer -# ========================================= -start_console_consumer() { - - this_consumer_grp=$1 - this_consumer_zk_port=$2 - this_consumer_log=$3 - this_msg_formatter=$4 - - info "starting console consumers for $this_consumer_grp" - - $base_dir/bin/kafka-run-class.sh kafka.tools.ConsoleConsumer \ - --zookeeper localhost:$this_consumer_zk_port \ - --topic $test_topic \ - --group $this_consumer_grp \ - --from-beginning \ - --consumer-timeout-ms $console_consumer_timeout_ms \ - --formatter "kafka.tools.ConsoleConsumer\$${this_msg_formatter}" \ - 2>&1 > ${this_consumer_log} & - console_consumer_pid=$! - - info " -> console consumer pid: $console_consumer_pid" -} - -# ========================================= -# force_shutdown_background_producer -# - to be called when user press Ctrl-C -# ========================================= -force_shutdown_background_producer() { - info "force shutting down producer" - `ps auxw | grep "run\-test\|ProducerPerformance" | grep -v grep | awk '{print $2}' | xargs kill -9` -} - -# ========================================= -# force_shutdown_consumer -# - to be called when user press Ctrl-C -# ========================================= -force_shutdown_consumer() { - info "force shutting down consumer" - `ps auxw | grep ChecksumMessageFormatter | grep -v grep | awk '{print $2}' | xargs kill -9` -} - -# ========================================= -# shutdown_servers -# ========================================= -shutdown_servers() { - - info "shutting down mirror makers" - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - #info "stopping mm pid: ${kafka_mirror_maker_pids[$i]}" - if [ "x${kafka_mirror_maker_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_mirror_maker_pids[$i]}; - fi - done - - info "shutting down target servers" - for ((i=1; i<=$num_kafka_target_server; i++)) - do - if [ "x${kafka_target_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_target_pids[$i]}; - fi - done - - info "shutting down source servers" - for ((i=1; i<=$num_kafka_source_server; i++)) - do - if [ "x${kafka_source_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_source_pids[$i]}; - fi - done - - info "shutting down zookeeper servers" - if [ "x${pid_zk_target}" != "x" ]; then kill_child_processes 0 ${pid_zk_target}; fi - if [ "x${pid_zk_source}" != "x" ]; then kill_child_processes 0 ${pid_zk_source}; fi -} - -# ========================================= -# start_background_producer -# ========================================= -start_background_producer() { - - topic=$1 - - batch_no=0 - - while [ ! -e $tmp_file_to_stop_background_producer ] - do - sleeptime=$(get_random_range $producer_sleep_min $producer_sleep_max) - - info "producing $num_msg_per_batch messages on topic '$topic'" - $base_dir/bin/kafka-run-class.sh \ - kafka.tools.ProducerPerformance \ - --brokerinfo zk.connect=localhost:2181 \ - --topics $topic \ - --messages $num_msg_per_batch \ - --message-size $message_size \ - --threads $num_producer_threads \ - --initial-message-id $batch_no \ - 2>&1 >> $base_dir/producer_performance.log # appending all producers' msgs - - batch_no=$(($batch_no + $num_msg_per_batch)) - sleep $sleeptime - done -} - -# ========================================= -# cmp_checksum -# ========================================= -cmp_checksum() { - - cmp_result=0 - - grep MessageID $console_consumer_source_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $console_consumer_source_mid_log - grep MessageID $console_consumer_target_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $console_consumer_target_mid_log - grep MessageID $producer_performance_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $producer_performance_mid_log - - sort $console_consumer_target_mid_log > $console_consumer_target_mid_sorted_log - sort $console_consumer_source_mid_log > $console_consumer_source_mid_sorted_log - sort $producer_performance_mid_log > $producer_performance_mid_sorted_log - - sort -u $console_consumer_target_mid_log > $console_consumer_target_mid_sorted_uniq_log - sort -u $console_consumer_source_mid_log > $console_consumer_source_mid_sorted_uniq_log - sort -u $producer_performance_mid_log > $producer_performance_mid_sorted_uniq_log - - msg_count_from_source_consumer=`cat $console_consumer_source_mid_log | wc -l | tr -d ' '` - uniq_msg_count_from_source_consumer=`cat $console_consumer_source_mid_sorted_uniq_log | wc -l | tr -d ' '` - - msg_count_from_mirror_consumer=`cat $console_consumer_target_mid_log | wc -l | tr -d ' '` - uniq_msg_count_from_mirror_consumer=`cat $console_consumer_target_mid_sorted_uniq_log | wc -l | tr -d ' '` - - uniq_msg_count_from_producer=`cat $producer_performance_mid_sorted_uniq_log | wc -l | tr -d ' '` - - total_msg_published=`cat $producer_performance_mid_log | wc -l | tr -d ' '` - - duplicate_msg_in_producer=$(( $total_msg_published - $uniq_msg_count_from_producer )) - - crc_only_in_mirror_consumer=`comm -23 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - crc_only_in_source_consumer=`comm -13 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - crc_common_in_both_consumer=`comm -12 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - - crc_only_in_producer=`comm -23 $producer_performance_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - - duplicate_mirror_mid=`comm -23 $console_consumer_target_mid_sorted_log $console_consumer_target_mid_sorted_uniq_log` - no_of_duplicate_msg=$(( $msg_count_from_mirror_consumer - $uniq_msg_count_from_mirror_consumer \ - + $msg_count_from_source_consumer - $uniq_msg_count_from_source_consumer - \ - 2*$duplicate_msg_in_producer )) - - source_mirror_uniq_msg_diff=$(($uniq_msg_count_from_source_consumer - $uniq_msg_count_from_mirror_consumer)) - - echo "" - echo "========================================================" - echo "no. of messages published : $total_msg_published" - echo "producer unique msg rec'd : $uniq_msg_count_from_producer" - echo "source consumer msg rec'd : $msg_count_from_source_consumer" - echo "source consumer unique msg rec'd : $uniq_msg_count_from_source_consumer" - echo "mirror consumer msg rec'd : $msg_count_from_mirror_consumer" - echo "mirror consumer unique msg rec'd : $uniq_msg_count_from_mirror_consumer" - echo "total source/mirror duplicate msg : $no_of_duplicate_msg" - echo "source/mirror uniq msg count diff : $source_mirror_uniq_msg_diff" - echo "========================================================" - echo "(Please refer to $checksum_diff_log for more details)" - echo "" - - echo "========================================================" >> $checksum_diff_log - echo "crc only in producer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_producer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "crc only in source consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_source_consumer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "crc only in mirror consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_mirror_consumer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "duplicate crc in mirror consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${duplicate_mirror_mid}" >> $checksum_diff_log - - echo "=================" - if [[ $source_mirror_uniq_msg_diff -eq 0 && $uniq_msg_count_from_source_consumer -gt 0 ]]; then - echo "## Test PASSED" - else - echo "## Test FAILED" - fi - echo "=================" - echo - - return $cmp_result -} - -# ========================================= -# start_test -# ========================================= -start_test() { - - echo - info "===========================================================" - info "#### Starting Kafka Broker / Mirror Maker Failure Test ####" - info "===========================================================" - echo - - start_zk - sleep 2 - - start_source_servers_cluster - sleep 2 - - create_topic $test_topic localhost:$zk_source_port 1 - sleep 2 - - start_target_servers_cluster - sleep 2 - - start_target_mirror_maker - sleep 2 - - start_background_producer $test_topic & - background_producer_pid=$! - - info "Started background producer pid [${background_producer_pid}]" - sleep 5 - - # loop for no. of iterations specified in $num_iterations - while [ $num_iterations -ge $iter ] - do - # if $svr_to_bounce is '0', it means no bouncing - if [[ $num_iterations -ge $iter && $svr_to_bounce -gt 0 ]]; then - idx= - - # check which type of broker bouncing is requested: source, mirror_maker or target - - # $svr_to_bounce contains $bounce_target_id - eg. '3', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_target_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce target kafka broker - idx=$(get_random_range 1 $num_kafka_target_server) - - if [ "x${kafka_target_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka TARGET Broker ####" - - info "terminating kafka target[$idx] with process id ${kafka_target_pids[$idx]}" - kill_child_processes 0 ${kafka_target_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka target server" - start_target_server $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - - # $svr_to_bounce contains $bounce_mir_mkr_id - eg. '2', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_mir_mkr_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce mirror maker - idx=$(get_random_range 1 $num_kafka_mirror_maker) - - if [ "x${kafka_mirror_maker_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka Mirror Maker ####" - - info "terminating kafka mirror maker [$idx] with process id ${kafka_mirror_maker_pids[$idx]}" - kill_child_processes 0 ${kafka_mirror_maker_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka mirror maker" - start_mirror_maker $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - - # $svr_to_bounce contains $bounce_source_id - eg. '1', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_source_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce source kafka broker - idx=$(get_random_range 1 $num_kafka_source_server) - - if [ "x${kafka_source_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka SOURCE Broker ####" - - info "terminating kafka source[$idx] with process id ${kafka_source_pids[$idx]}" - kill_child_processes 0 ${kafka_source_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka source server" - start_source_server $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - else - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - info "No bouncing performed" - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - done - - # notify background producer to stop - `touch $tmp_file_to_stop_background_producer` - - echo - info "Tests completed. Waiting for consumers to catch up " - - # ======================================================= - # remove the following 'sleep 30' when KAFKA-313 is fixed - # ======================================================= - info "sleeping 30 sec" - sleep 30 -} - -# ========================================= -# print_usage -# ========================================= -print_usage() { - echo - echo "Error : invalid no. of arguments" - echo "Usage : $0 -n -s " - echo - echo " num of iterations - the number of iterations that the test runs" - echo - echo " servers to bounce - the servers to be bounced in a round-robin fashion" - echo " Values of the servers:" - echo " 0 - no bouncing" - echo " 1 - source broker" - echo " 2 - mirror maker" - echo " 3 - target broker" - echo " Example:" - echo " * To bounce only mirror maker and target broker" - echo " in turns, enter the value 23" - echo " * To bounce only mirror maker, enter the value 2" - echo " * To run the test without bouncing, enter 0" - echo - echo "Usage Example : $0 -n 10 -s 12" - echo " (run 10 iterations and bounce source broker (1) + mirror maker (2) in turn)" - echo -} - - -# ========================================= -# -# Main test begins here -# -# ========================================= - -# get command line arguments -while getopts "hb:i:n:s:x:" opt -do - case $opt in - b) - num_msg_per_batch=$OPTARG - ;; - h) - print_usage - exit - ;; - i) - producer_sleep_min=$OPTARG - ;; - n) - num_iterations=$OPTARG - ;; - s) - svr_to_bounce=$OPTARG - ;; - x) - producer_sleep_max=$OPTARG - ;; - ?) - print_usage - exit - ;; - esac -done - -# initialize and cleanup -initialize -cleanup -sleep 5 - -# Ctrl-c trap. Catches INT signal -trap "shutdown_servers; force_shutdown_consumer; force_shutdown_background_producer; cmp_checksum; exit 0" INT - -# starting the test -start_test - -# starting consumer to consume data in source -start_console_consumer $source_console_consumer_grp $zk_source_port $console_consumer_source_log DecodedMessageFormatter - -# starting consumer to consume data in target -start_console_consumer $target_console_consumer_grp $zk_target_port $console_consumer_target_log DecodedMessageFormatter - -# wait for zero source consumer lags -wait_for_zero_consumer_lags $source_console_consumer_grp $zk_source_port - -# wait for zero target consumer lags -wait_for_zero_consumer_lags $target_console_consumer_grp $zk_target_port - -# ======================================================= -# remove the following 'sleep 30' when KAFKA-313 is fixed -# ======================================================= -info "sleeping 30 sec" -sleep 30 - -shutdown_servers - -cmp_checksum -result=$? - -# =============================================== -# Report the time taken -# =============================================== -test_end_time="$(date +%s)" -total_test_time_sec=$(( $test_end_time - $test_start_time )) -total_test_time_min=$(( $total_test_time_sec / 60 )) -info "Total time taken: $total_test_time_min min for $num_iterations iterations" -echo - -exit $result diff --git a/system_test/broker_failure/config/log4j.properties b/system_test/broker_failure/config/log4j.properties deleted file mode 100644 index 23ece9bb5b3..00000000000 --- a/system_test/broker_failure/config/log4j.properties +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=INFO, stdout - -# ==================================== -# messages going to kafkaAppender -# ==================================== -log4j.logger.kafka=DEBUG, kafkaAppender -log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, kafkaAppender -log4j.logger.org.apache.zookeeper=INFO, kafkaAppender - -# ==================================== -# messages going to zookeeperAppender -# ==================================== -# (comment out this line to redirect ZK-related messages to kafkaAppender -# to allow reading both Kafka and ZK debugging messages in a single file) -log4j.logger.org.apache.zookeeper=INFO, zookeeperAppender - -# ==================================== -# stdout -# ==================================== -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n - -# ==================================== -# fileAppender -# ==================================== -log4j.appender.fileAppender=org.apache.log4j.FileAppender -log4j.appender.fileAppender.File=/tmp/kafka_all_request.log -log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.fileAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -# ==================================== -# kafkaAppender -# ==================================== -log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaAppender.File=/tmp/kafka.log -log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.additivity.kafka=true - -# ==================================== -# zookeeperAppender -# ==================================== -log4j.appender.zookeeperAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.zookeeperAppender.File=/tmp/zookeeper.log -log4j.appender.zookeeperAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.zookeeperAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.additivity.org.apache.zookeeper=false - -# ==================================== -# other available debugging info -# ==================================== -#log4j.logger.kafka.server.EmbeddedConsumer$MirroringThread=TRACE -#log4j.logger.kafka.server.KafkaRequestHandlers=TRACE -#log4j.logger.kafka.producer.async.AsyncProducer=TRACE -#log4j.logger.kafka.producer.async.ProducerSendThread=TRACE -#log4j.logger.kafka.producer.async.DefaultEventHandler=TRACE - -log4j.logger.kafka.consumer=DEBUG -log4j.logger.kafka.tools.VerifyConsumerRebalance=DEBUG -log4j.logger.kafka.tools.ConsumerOffsetChecker=DEBUG - -# to print message checksum from ProducerPerformance -log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG - -# to print socket buffer size validated by Kafka broker -log4j.logger.kafka.network.Acceptor=DEBUG - -# to print socket buffer size validated by SimpleConsumer -log4j.logger.kafka.consumer.SimpleConsumer=TRACE - diff --git a/system_test/broker_failure/config/mirror_producer.properties b/system_test/broker_failure/config/mirror_producer.properties deleted file mode 100644 index 7f80a1e1064..00000000000 --- a/system_test/broker_failure/config/mirror_producer.properties +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - diff --git a/system_test/broker_failure/config/mirror_producer1.properties b/system_test/broker_failure/config/mirror_producer1.properties deleted file mode 100644 index 81dae76f86d..00000000000 --- a/system_test/broker_failure/config/mirror_producer1.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9081 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - diff --git a/system_test/broker_failure/config/mirror_producer2.properties b/system_test/broker_failure/config/mirror_producer2.properties deleted file mode 100644 index 714b95df9b1..00000000000 --- a/system_test/broker_failure/config/mirror_producer2.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9082 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - diff --git a/system_test/broker_failure/config/mirror_producer3.properties b/system_test/broker_failure/config/mirror_producer3.properties deleted file mode 100644 index e8fa72db4d2..00000000000 --- a/system_test/broker_failure/config/mirror_producer3.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9083 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - diff --git a/system_test/broker_failure/config/server_source1.properties b/system_test/broker_failure/config/server_source1.properties deleted file mode 100644 index bbf288ebfec..00000000000 --- a/system_test/broker_failure/config/server_source1.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=1 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9091 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source1-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - diff --git a/system_test/broker_failure/config/server_source2.properties b/system_test/broker_failure/config/server_source2.properties deleted file mode 100644 index 570bafc6b92..00000000000 --- a/system_test/broker_failure/config/server_source2.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=2 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9092 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source2-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - diff --git a/system_test/broker_failure/config/server_source3.properties b/system_test/broker_failure/config/server_source3.properties deleted file mode 100644 index df8ff6a6357..00000000000 --- a/system_test/broker_failure/config/server_source3.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=3 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9093 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source3-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.size=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - diff --git a/system_test/broker_failure/config/server_source4.properties b/system_test/broker_failure/config/server_source4.properties deleted file mode 100644 index ee9c7fd3bee..00000000000 --- a/system_test/broker_failure/config/server_source4.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=4 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9094 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source4-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - diff --git a/system_test/broker_failure/config/server_target1.properties b/system_test/broker_failure/config/server_target1.properties deleted file mode 100644 index 7f776bd3c2b..00000000000 --- a/system_test/broker_failure/config/server_target1.properties +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=1 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9081 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-target1-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 - diff --git a/system_test/broker_failure/config/server_target2.properties b/system_test/broker_failure/config/server_target2.properties deleted file mode 100644 index 6d997dcbdaa..00000000000 --- a/system_test/broker_failure/config/server_target2.properties +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=2 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9082 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-target2-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 - diff --git a/system_test/broker_failure/config/server_target3.properties b/system_test/broker_failure/config/server_target3.properties deleted file mode 100644 index 0d3a9aead1a..00000000000 --- a/system_test/broker_failure/config/server_target3.properties +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=3 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9083 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-target3-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 - diff --git a/system_test/broker_failure/config/whitelisttest.consumer.properties b/system_test/broker_failure/config/whitelisttest.consumer.properties deleted file mode 100644 index dd91bd3049c..00000000000 --- a/system_test/broker_failure/config/whitelisttest.consumer.properties +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.consumer.ConsumerConfig for more details - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -#consumer group id -group.id=group1 - -mirror.topics.whitelist=test_1,test_2 -auto.offset.reset=smallest diff --git a/system_test/broker_failure/config/zookeeper_source.properties b/system_test/broker_failure/config/zookeeper_source.properties deleted file mode 100644 index 76b02a26827..00000000000 --- a/system_test/broker_failure/config/zookeeper_source.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper_source -# the port at which the clients will connect -clientPort=2181 diff --git a/system_test/broker_failure/config/zookeeper_target.properties b/system_test/broker_failure/config/zookeeper_target.properties deleted file mode 100644 index 28561d95898..00000000000 --- a/system_test/broker_failure/config/zookeeper_target.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper_target -# the port at which the clients will connect -clientPort=2182 diff --git a/system_test/cluster_config.json b/system_test/cluster_config.json deleted file mode 100644 index 8ed896b358f..00000000000 --- a/system_test/cluster_config.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - } - ] -} diff --git a/system_test/common/util.sh b/system_test/common/util.sh deleted file mode 100644 index e3d10c68061..00000000000 --- a/system_test/common/util.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ========================================= -# info - print messages with timestamp -# ========================================= -info() { - echo -e "$(date +"%Y-%m-%d %H:%M:%S") $*" -} - -# ========================================= -# info_no_newline - print messages with -# timestamp without newline -# ========================================= -info_no_newline() { - echo -e -n "$(date +"%Y-%m-%d %H:%M:%S") $*" -} - -# ========================================= -# get_random_range - return a random number -# between the lower & upper bounds -# usage: -# get_random_range $lower $upper -# random_no=$? -# ========================================= -get_random_range() { - lo=$1 - up=$2 - range=$(($up - $lo + 1)) - - echo $(($(($RANDOM % range)) + $lo)) -} - -# ========================================= -# kill_child_processes - terminate a -# process and its child processes -# ========================================= -kill_child_processes() { - isTopmost=$1 - curPid=$2 - childPids=$(ps a -o pid= -o ppid= | grep "${curPid}$" | awk '{print $1;}') - - for childPid in $childPids - do - kill_child_processes 0 $childPid - done - if [ $isTopmost -eq 0 ]; then - kill -15 $curPid 2> /dev/null - fi -} - -# ========================================================================= -# generate_kafka_properties_files - -# 1. it takes the following arguments and generate server_{1..n}.properties -# for the total no. of kafka broker as specified in "num_server"; the -# resulting properties files will be located at: -# /system_test//config -# 2. the default values in the generated properties files will be copied -# from the settings in config/server.properties while the brokerid and -# server port will be incremented accordingly -# 3. to generate properties files with non-default values such as -# "socket.send.buffer.bytes=2097152", simply add the property with new value -# to the array variable kafka_properties_to_replace as shown below -# ========================================================================= -generate_kafka_properties_files() { - - test_suite_full_path=$1 # eg. /system_test/single_host_multi_brokers - num_server=$2 # total no. of brokers in the cluster - brokerid_to_start=$3 # this should be '0' in most cases - kafka_port_to_start=$4 # if 9091 is used, the rest would be 9092, 9093, ... - - this_config_dir=${test_suite_full_path}/config - - # info "test suite full path : $test_suite_full_path" - # info "broker id to start : $brokerid_to_start" - # info "kafka port to start : $kafka_port_to_start" - # info "num of server : $num_server" - # info "config dir : $this_config_dir" - - # ============================================= - # array to keep kafka properties statements - # from the file 'server.properties' need - # to be changed from their default values - # ============================================= - # kafka_properties_to_replace # DO NOT uncomment this line !! - - # ============================================= - # Uncomment the following kafka properties - # array element as needed to change the default - # values. Other kafka properties can be added - # in a similar fashion. - # ============================================= - # kafka_properties_to_replace[1]="socket.send.buffer.bytes=2097152" - # kafka_properties_to_replace[2]="socket.receive.buffer.bytes=2097152" - # kafka_properties_to_replace[3]="num.partitions=3" - # kafka_properties_to_replace[4]="socket.request.max.bytes=10485760" - - server_properties=`cat ${this_config_dir}/server.properties` - - for ((i=1; i<=$num_server; i++)) - do - # ====================== - # update misc properties - # ====================== - for ((j=1; j<=${#kafka_properties_to_replace[@]}; j++)) - do - keyword_to_replace=`echo ${kafka_properties_to_replace[${j}]} | awk -F '=' '{print $1}'` - string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace` - # info "string to be replaced : [$string_to_be_replaced]" - # info "string to replace : [${kafka_properties_to_replace[${j}]}]" - - echo "${server_properties}" | \ - sed -e "s/${string_to_be_replaced}/${kafka_properties_to_replace[${j}]}/g" \ - >${this_config_dir}/server_${i}.properties - - server_properties=`cat ${this_config_dir}/server_${i}.properties` - done - - # ====================== - # update brokerid - # ====================== - keyword_to_replace="brokerid=" - string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace` - brokerid_idx=$(( $brokerid_to_start + $i)) - string_to_replace="${keyword_to_replace}${brokerid_idx}" - # info "string to be replaced : [${string_to_be_replaced}]" - # info "string to replace : [${string_to_replace}]" - - echo "${server_properties}" | \ - sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \ - >${this_config_dir}/server_${i}.properties - - server_properties=`cat ${this_config_dir}/server_${i}.properties` - - # ====================== - # update kafak_port - # ====================== - keyword_to_replace="port=" - string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace` - port_idx=$(( $kafka_port_to_start + $i - 1 )) - string_to_replace="${keyword_to_replace}${port_idx}" - # info "string to be replaced : [${string_to_be_replaced}]" - # info "string to replace : [${string_to_replace}]" - - echo "${server_properties}" | \ - sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \ - >${this_config_dir}/server_${i}.properties - - server_properties=`cat ${this_config_dir}/server_${i}.properties` - - # ====================== - # update kafka_log dir - # ====================== - keyword_to_replace="log.dir=" - string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace` - string_to_be_replaced=${string_to_be_replaced//\//\\\/} - string_to_replace="${keyword_to_replace}\/tmp\/kafka_server_${i}_logs" - # info "string to be replaced : [${string_to_be_replaced}]" - # info "string to replace : [${string_to_replace}]" - - echo "${server_properties}" | \ - sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \ - >${this_config_dir}/server_${i}.properties - - server_properties=`cat ${this_config_dir}/server_${i}.properties` - - done -} - diff --git a/system_test/logging.conf b/system_test/logging.conf deleted file mode 100644 index e9e921321e0..00000000000 --- a/system_test/logging.conf +++ /dev/null @@ -1,56 +0,0 @@ -# ============================================== -# declaration - must have a 'root' logger -# ============================================== -[loggers] -keys=root,namedLogger,anonymousLogger - -[handlers] -keys=namedConsoleHandler,anonymousConsoleHandler - -[formatters] -keys=namedFormatter,anonymousFormatter - -# ============================================== -# loggers session -# ============================================== -[logger_root] -level=NOTSET -handlers= - -[logger_namedLogger] -level=DEBUG -handlers=namedConsoleHandler -qualname=namedLogger -propagate=0 - -[logger_anonymousLogger] -level=DEBUG -handlers=anonymousConsoleHandler -qualname=anonymousLogger -propagate=0 - -# ============================================== -# handlers session -# ** Change 'level' to INFO/DEBUG in this session -# ============================================== -[handler_namedConsoleHandler] -class=StreamHandler -level=INFO -formatter=namedFormatter -args=[] - -[handler_anonymousConsoleHandler] -class=StreamHandler -level=INFO -formatter=anonymousFormatter -args=[] - -# ============================================== -# formatters session -# ============================================== -[formatter_namedFormatter] -format=%(asctime)s - %(levelname)s - %(message)s %(name_of_class)s - -[formatter_anonymousFormatter] -format=%(asctime)s - %(levelname)s - %(message)s - diff --git a/system_test/metrics.json b/system_test/metrics.json deleted file mode 100644 index 30dabe59687..00000000000 --- a/system_test/metrics.json +++ /dev/null @@ -1,174 +0,0 @@ -{ - "dashboards": [ - { - "role": "broker", - "graphs": [ - { - "graph_name": "Produce-Request-Rate", - "y_label": "requests-per-sec", - "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RequestsPerSec", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "Produce-Request-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Produce-TotalTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "Produce-Request-Remote-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RemoteTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "Fetch-Consumer-Request-Rate", - "y_label": "requests-per-sec", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RequestsPerSec", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "Fetch-Consumer-Request-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-TotalTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "Fetch-Consumer-Request-Remote-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RemoteTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "Fetch-Follower-Request-Rate", - "y_label": "requests-per-sec", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-RequestsPerSec", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "Fetch-Follower-Request-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-TotalTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "Fetch-Follower-Request-Remote-Time", - "y_label": "ms,ms", - "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-RemoteTimeMs", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "ProducePurgatoryExpirationRate", - "y_label": "expirations-per-sec", - "bean_name": "kafka.server:type=DelayedProducerRequestMetrics,name=AllExpiresPerSecond", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "FetchConsumerPurgatoryExpirationRate", - "y_label": "expirations-per-sec", - "bean_name": "kafka.server:type=DelayedFetchRequestMetrics,name=ConsumerExpiresPerSecond", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "FetchFollowerPurgatoryExpirationRate", - "y_label": "expirations-per-sec", - "bean_name": "kafka.server:type=DelayedFetchRequestMetrics,name=FollowerExpiresPerSecond", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "ProducePurgatoryQueueSize", - "y_label": "size", - "bean_name": "kafka.server:type=ProducerRequestPurgatory,name=NumDelayedOperations", - "attributes": "Value" - }, - { - "graph_name": "FetchPurgatoryQueueSize", - "y_label": "size", - "bean_name": "kafka.server:type=FetchRequestPurgatory,name=NumDelayedOperations", - "attributes": "Value" - }, - { - "graph_name": "ControllerLeaderElectionRateAndTime", - "y_label": "elections-per-sec,ms,ms", - "bean_name": "kafka.controller:type=ControllerStat,name=LeaderElectionRateAndTimeMs", - "attributes": "OneMinuteRate,Mean,99thPercentile" - }, - { - "graph_name": "LogFlushRateAndTime", - "y_label": "flushes-per-sec,ms,ms", - "bean_name": "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs", - "attributes": "OneMinuteRate,Mean,99thPercentile" - }, - { - "graph_name": "AllBytesOutRate", - "y_label": "bytes-per-sec", - "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsBytesOutPerSec", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "AllBytesInRate", - "y_label": "bytes-per-sec", - "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsBytesInPerSec", - "attributes": "OneMinuteRate" - }, - { - "graph_name": "AllMessagesInRate", - "y_label": "messages-per-sec", - "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsMessagesInPerSec", - "attributes": "OneMinuteRate" - } - ] - }, - { - "role": "producer_performance", - "graphs": [ - { - "graph_name": "ProduceRequestRateAndTime", - "y_label": "requests-per-sec,ms,ms", - "bean_name": "kafka.producer:type=ProducerRequestStat,name=ProduceRequestRateAndTimeMs", - "attributes": "OneMinuteRate,Mean,99thPercentile" - }, - { - "graph_name": "ProduceRequestSize", - "y_label": "bytes,bytes", - "bean_name": "kafka.producer:type=ProducerRequestStat,name=ProducerRequestSize", - "attributes": "Mean,99thPercentile" - } - ] - }, - { - "role": "console_consumer", - "graphs": [ - { - "graph_name": "FetchRequestRateAndTime", - "y_label": "requests-per-sec,ms,ms", - "bean_name": "kafka.consumer:type=FetchRequestAndResponseStat,name=FetchRequestRateAndTimeMs", - "attributes": "OneMinuteRate,Mean,99thPercentile" - }, - { - "graph_name": "FetchResponseSize", - "y_label": "bytes,bytes", - "bean_name": "kafka.consumer:type=FetchRequestAndResponseStat,name=FetchResponseSize", - "attributes": "Mean,99thPercentile" - }, - { - "graph_name": "ConsumedMessageRate", - "y_label": "messages-per-sec", - "bean_name": "kafka.consumer:type=ConsumerTopicStat,name=AllTopicsMessagesPerSec", - "attributes": "OneMinuteRate" - } - ] - }, - { - "role": "zookeeper", - "graphs": [ - { - "graph_name": "ZookeeperServerStats", - "y_label": "zookeeper-latency-ms", - "bean_name": "org.apache.ZooKeeperService:name0=StandaloneServer_port-1", - "attributes": "AvgRequestLatency" - } - ] - } - ] -} diff --git a/system_test/mirror_maker_testsuite/__init__.py b/system_test/mirror_maker_testsuite/__init__.py deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/system_test/mirror_maker_testsuite/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/system_test/mirror_maker_testsuite/cluster_config.json b/system_test/mirror_maker_testsuite/cluster_config.json deleted file mode 100644 index 5b908ff3bae..00000000000 --- a/system_test/mirror_maker_testsuite/cluster_config.json +++ /dev/null @@ -1,136 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - - { - "entity_id": "12", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - - { - "entity_id": "13", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/config/console_consumer.properties b/system_test/mirror_maker_testsuite/config/console_consumer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/mirror_maker_testsuite/config/consumer.properties b/system_test/mirror_maker_testsuite/config/consumer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/mirror_maker_testsuite/config/log4j.properties b/system_test/mirror_maker_testsuite/config/log4j.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties b/system_test/mirror_maker_testsuite/config/mirror_consumer.properties deleted file mode 100644 index e90634af02d..00000000000 --- a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties +++ /dev/null @@ -1,12 +0,0 @@ -zookeeper.connect=localhost:2108 -zookeeper.connection.timeout.ms=1000000 -group.id=mm_regtest_grp -auto.commit.interval.ms=120000 -auto.offset.reset=smallest -#fetch.message.max.bytes=1048576 -#rebalance.max.retries=4 -#rebalance.backoff.ms=2000 -socket.receive.buffer.bytes=1048576 -fetch.message.max.bytes=1048576 -zookeeper.sync.time.ms=15000 -shallow.iterator.enable=false diff --git a/system_test/mirror_maker_testsuite/config/mirror_producer.properties b/system_test/mirror_maker_testsuite/config/mirror_producer.properties deleted file mode 100644 index f94bebd3f5a..00000000000 --- a/system_test/mirror_maker_testsuite/config/mirror_producer.properties +++ /dev/null @@ -1,12 +0,0 @@ -# old producer -metadata.broker.list=localhost:9094 -compression.codec=0 -request.retries=3 -request.required.acks=1 - -# new producer -block.on.buffer.full=true -bootstrap.servers=localhost:9094 -compression.type=none -retries=3 -acks=1 diff --git a/system_test/mirror_maker_testsuite/config/producer.properties b/system_test/mirror_maker_testsuite/config/producer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/mirror_maker_testsuite/config/producer_performance.properties b/system_test/mirror_maker_testsuite/config/producer_performance.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/mirror_maker_testsuite/config/server.properties b/system_test/mirror_maker_testsuite/config/server.properties deleted file mode 100644 index 9717cd63193..00000000000 --- a/system_test/mirror_maker_testsuite/config/server.properties +++ /dev/null @@ -1,139 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9091 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=102400 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=1 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 diff --git a/system_test/mirror_maker_testsuite/config/zookeeper.properties b/system_test/mirror_maker_testsuite/config/zookeeper.properties deleted file mode 100644 index 5474a72be1a..00000000000 --- a/system_test/mirror_maker_testsuite/config/zookeeper.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 -syncLimit=5 -initLimit=10 -tickTime=2000 diff --git a/system_test/mirror_maker_testsuite/mirror_maker_test.py b/system_test/mirror_maker_testsuite/mirror_maker_test.py deleted file mode 100644 index 48f9ff6b281..00000000000 --- a/system_test/mirror_maker_testsuite/mirror_maker_test.py +++ /dev/null @@ -1,324 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# mirror_maker_test.py -# =================================== - -import inspect -import logging -import os -import signal -import subprocess -import sys -import time -import traceback - -from system_test_env import SystemTestEnv -sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR) - -from setup_utils import SetupUtils -from replication_utils import ReplicationUtils -import system_test_utils -from testcase_env import TestcaseEnv - -# product specific: Kafka -import kafka_system_test_utils -import metrics - -class MirrorMakerTest(ReplicationUtils, SetupUtils): - - testModuleAbsPathName = os.path.realpath(__file__) - testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName)) - - def __init__(self, systemTestEnv): - - # SystemTestEnv - provides cluster level environment settings - # such as entity_id, hostname, kafka_home, java_home which - # are available in a list of dictionary named - # "clusterEntityConfigDictList" - self.systemTestEnv = systemTestEnv - - super(MirrorMakerTest, self).__init__(self) - - # dict to pass user-defined attributes to logger argument: "extra" - d = {'name_of_class': self.__class__.__name__} - - def signal_handler(self, signal, frame): - self.log_message("Interrupt detected - User pressed Ctrl+c") - - # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - sys.exit(1) - - def runTest(self): - - # ====================================================================== - # get all testcase directories under this testsuite - # ====================================================================== - testCasePathNameList = system_test_utils.get_dir_paths_with_prefix( - self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX) - testCasePathNameList.sort() - - replicationUtils = ReplicationUtils(self) - - # ============================================================= - # launch each testcase one by one: testcase_1, testcase_2, ... - # ============================================================= - for testCasePathName in testCasePathNameList: - - skipThisTestCase = False - - try: - # ====================================================================== - # A new instance of TestcaseEnv to keep track of this testcase's env vars - # and initialize some env vars as testCasePathName is available now - # ====================================================================== - self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self) - self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName - self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName) - self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"] - - # ====================================================================== - # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json - # ====================================================================== - testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"] - - if self.systemTestEnv.printTestDescriptionsOnly: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - continue - elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName): - self.log_message("Skipping : " + testcaseDirName) - skipThisTestCase = True - continue - else: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName) - - # ============================================================================== # - # ============================================================================== # - # Product Specific Testing Code Starts Here: # - # ============================================================================== # - # ============================================================================== # - - # initialize self.testcaseEnv with user-defined environment variables (product specific) - self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = "" - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False - self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False - - # initialize signal handler - signal.signal(signal.SIGINT, self.signal_handler) - - # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file: - # system_test/_testsuite/testcase_/testcase__properties.json - self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data( - self.testcaseEnv.testcasePropJsonPathName) - - # clean up data directories specified in zookeeper.properties and kafka_server_.properties - kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase - # for collecting logs from remote machines - kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv) - - # TestcaseEnv - initialize producer & consumer config / log file pathnames - kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv) - - # generate remote hosts log/config dirs if not exist - kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # generate properties files for zookeeper, kafka, producer, consumer and mirror-maker: - # 1. copy system_test/_testsuite/config/*.properties to - # system_test/_testsuite/testcase_/config/ - # 2. update all properties files in system_test/_testsuite/testcase_/config - # by overriding the settings specified in: - # system_test/_testsuite/testcase_/testcase__properties.json - kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName, - self.testcaseEnv, self.systemTestEnv) - - # ============================================= - # preparing all entities to start the test - # ============================================= - self.log_message("starting zookeepers") - kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 2s") - time.sleep(2) - - self.log_message("starting brokers") - kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - self.log_message("creating topics") - kafka_system_test_utils.create_topic_for_producer_performance(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - - self.log_message("starting mirror makers") - kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 10s") - time.sleep(10) - - - # ============================================= - # starting producer - # ============================================= - self.log_message("starting producer in the background") - kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False) - msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"] - self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages") - time.sleep(int(msgProducingFreeTimeSec)) - - # ============================================= - # A while-loop to bounce mirror maker as specified - # by "num_iterations" in testcase_n_properties.json - # ============================================= - i = 1 - numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) - bouncedEntityDownTimeSec = 15 - try: - bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"]) - except: - pass - - while i <= numIterations: - - self.log_message("Iteration " + str(i) + " of " + str(numIterations)) - - # ============================================= - # Bounce Mirror Maker if specified in testcase config - # ============================================= - bounceMirrorMaker = self.testcaseEnv.testcaseArgumentsDict["bounce_mirror_maker"] - self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker) - if (bounceMirrorMaker.lower() == "true"): - - clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList - mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterConfigList, "role", "mirror_maker", "entity_id") - stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0] - - mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId] - self.log_message("stopping mirror maker : " + mirrorMakerPPid) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid) - self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec") - time.sleep(bouncedEntityDownTimeSec) - - # starting previously terminated broker - self.log_message("starting the previously terminated mirror maker") - kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId) - - self.anonLogger.info("sleeping for 15s") - time.sleep(15) - i += 1 - # while loop - - # ============================================= - # tell producer to stop - # ============================================= - self.testcaseEnv.lock.acquire() - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(1) - - # ============================================= - # wait for producer thread's update of - # "backgroundProducerStopped" to be "True" - # ============================================= - while 1: - self.testcaseEnv.lock.acquire() - self.logger.info("status of backgroundProducerStopped : [" + \ - str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d) - if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: - time.sleep(1) - self.testcaseEnv.lock.release() - self.logger.info("all producer threads completed", extra=self.d) - break - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(2) - - self.anonLogger.info("sleeping for 15s") - time.sleep(15) - self.anonLogger.info("terminate Mirror Maker") - cmdStr = "ps auxw | grep Mirror | grep -v grep | tr -s ' ' | cut -f2 -d ' ' | xargs kill -15" - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - line = line.rstrip('\n') - self.anonLogger.info("#### ["+line+"]") - self.anonLogger.info("sleeping for 15s") - time.sleep(15) - - # ============================================= - # starting consumer - # ============================================= - self.log_message("starting consumer in the background") - kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 10s") - time.sleep(10) - - # ============================================= - # this testcase is completed - stop all entities - # ============================================= - self.log_message("stopping all entities") - for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - # make sure all entities are stopped - kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv) - - # ============================================= - # collect logs from remote hosts - # ============================================= - kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # ============================================= - # validate the data matched and checksum - # ============================================= - self.log_message("validating data matched") - kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils) - kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "source") - kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "target") - - # ============================================= - # draw graphs - # ============================================= - metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, - self.testcaseEnv, - self.systemTestEnv.clusterEntityConfigDictList) - - # build dashboard, one for each role - metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME, - self.testcaseEnv.testCaseDashboardsDir, - self.systemTestEnv.clusterEntityConfigDictList) - - except Exception as e: - self.log_message("Exception while running test {0}".format(e)) - traceback.print_exc() - self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED" - - finally: - if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly: - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - diff --git a/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json b/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json deleted file mode 100644 index 9dd3477e70a..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "description": {"01":"To Test : 'Replication with Mirror Maker'", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 10240" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json b/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json deleted file mode 100644 index d6495e54d68..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json deleted file mode 100644 index f6fe86787f1..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - - { - "entity_id": "12", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json b/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json deleted file mode 100644 index 842c70eaf33..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "2", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json deleted file mode 100644 index f6fe86787f1..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - - { - "entity_id": "12", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json b/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json deleted file mode 100644 index 48864e61afb..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15005/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15005/cluster_config.json deleted file mode 100644 index 63ba37b70e4..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15005/cluster_config.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - { - "entity_id": "12", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - }, - - { - "entity_id": "14", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9114" - }, - { - "entity_id": "15", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9115" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json b/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json deleted file mode 100644 index 92b2a6b4ffe..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15006/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15006/cluster_config.json deleted file mode 100644 index 63ba37b70e4..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15006/cluster_config.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - { - "entity_id": "12", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - }, - - { - "entity_id": "14", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9114" - }, - { - "entity_id": "15", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9115" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json b/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json deleted file mode 100644 index 7d5019c6aa7..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json b/system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json deleted file mode 100644 index 08918364219..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "description": {"01":"To Test : 'Replication with Mirror Maker'", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "new-producer":"true", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json b/system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json deleted file mode 100644 index 56e481255cb..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "new-producer":"true", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json deleted file mode 100644 index f6fe86787f1..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - - { - "entity_id": "12", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json b/system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json deleted file mode 100644 index 8f8c47af4e7..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "2", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "new-producer":"true", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "new-producer":"true", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json deleted file mode 100644 index f6fe86787f1..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - - { - "entity_id": "12", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json b/system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json deleted file mode 100644 index baa639b6888..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "new-producer":"true", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "new-producer":"true", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json deleted file mode 100644 index 63ba37b70e4..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - { - "entity_id": "12", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - }, - - { - "entity_id": "14", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9114" - }, - { - "entity_id": "15", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9115" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json b/system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json deleted file mode 100644 index 8c383756d88..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json +++ /dev/null @@ -1,182 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "new-producer":"true", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "new-producer":"true", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json deleted file mode 100644 index 63ba37b70e4..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - - { - "entity_id": "2", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - - { - "entity_id": "7", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "broker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - - { - "entity_id": "10", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - }, - { - "entity_id": "11", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9111" - }, - { - "entity_id": "12", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9112" - }, - { - "entity_id": "13", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9113" - }, - - { - "entity_id": "14", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9114" - }, - { - "entity_id": "15", - "hostname": "localhost", - "role": "mirror_maker", - "cluster_name":"target", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9115" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json b/system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json deleted file mode 100644 index fb275330bd1..00000000000 --- a/system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json +++ /dev/null @@ -1,182 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "new-producer":"true", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "new-producer":"true", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/offset_management_testsuite/cluster_config.json b/system_test/offset_management_testsuite/cluster_config.json deleted file mode 100644 index dcca2007de4..00000000000 --- a/system_test/offset_management_testsuite/cluster_config.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9100" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9101" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9102" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9103" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "broker", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9104" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9105" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9106" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9107" - }, - { - "entity_id": "8", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9108" - }, - { - "entity_id": "9", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9109" - }, - { - "entity_id": "10", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name":"source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9110" - } - ] -} diff --git a/system_test/offset_management_testsuite/config/console_consumer.properties b/system_test/offset_management_testsuite/config/console_consumer.properties deleted file mode 100644 index a2ab8b9c155..00000000000 --- a/system_test/offset_management_testsuite/config/console_consumer.properties +++ /dev/null @@ -1,2 +0,0 @@ -auto.offset.reset=smallest -auto.commit.interval.ms=1000 diff --git a/system_test/offset_management_testsuite/config/producer_performance.properties b/system_test/offset_management_testsuite/config/producer_performance.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/offset_management_testsuite/config/server.properties b/system_test/offset_management_testsuite/config/server.properties deleted file mode 100644 index b6de5289f48..00000000000 --- a/system_test/offset_management_testsuite/config/server.properties +++ /dev/null @@ -1,143 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9091 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=102400 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=1 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 - -offsets.topic.num.partitions=2 -offsets.topic.replication.factor=4 - diff --git a/system_test/offset_management_testsuite/config/zookeeper.properties b/system_test/offset_management_testsuite/config/zookeeper.properties deleted file mode 100644 index 5474a72be1a..00000000000 --- a/system_test/offset_management_testsuite/config/zookeeper.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 -syncLimit=5 -initLimit=10 -tickTime=2000 diff --git a/system_test/offset_management_testsuite/offset_management_test.py b/system_test/offset_management_testsuite/offset_management_test.py deleted file mode 100644 index aa389105aa4..00000000000 --- a/system_test/offset_management_testsuite/offset_management_test.py +++ /dev/null @@ -1,299 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# offset_management_test.py -# =================================== - -import os -import signal -import sys -import time -import traceback - -from system_test_env import SystemTestEnv -sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR) - -from setup_utils import SetupUtils -from replication_utils import ReplicationUtils -import system_test_utils -from testcase_env import TestcaseEnv - -# product specific: Kafka -import kafka_system_test_utils -import metrics - -class OffsetManagementTest(ReplicationUtils, SetupUtils): - - testModuleAbsPathName = os.path.realpath(__file__) - testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName)) - - def __init__(self, systemTestEnv): - - # SystemTestEnv - provides cluster level environment settings - # such as entity_id, hostname, kafka_home, java_home which - # are available in a list of dictionary named - # "clusterEntityConfigDictList" - self.systemTestEnv = systemTestEnv - - super(OffsetManagementTest, self).__init__(self) - - # dict to pass user-defined attributes to logger argument: "extra" - d = {'name_of_class': self.__class__.__name__} - - def signal_handler(self, signal, frame): - self.log_message("Interrupt detected - User pressed Ctrl+c") - - # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - sys.exit(1) - - def runTest(self): - - # ====================================================================== - # get all testcase directories under this testsuite - # ====================================================================== - testCasePathNameList = system_test_utils.get_dir_paths_with_prefix( - self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX) - testCasePathNameList.sort() - - replicationUtils = ReplicationUtils(self) - - # ============================================================= - # launch each testcase one by one: testcase_1, testcase_2, ... - # ============================================================= - for testCasePathName in testCasePathNameList: - - skipThisTestCase = False - - try: - # ====================================================================== - # A new instance of TestcaseEnv to keep track of this testcase's env vars - # and initialize some env vars as testCasePathName is available now - # ====================================================================== - self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self) - self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName - self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName) - self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"] - - # ====================================================================== - # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json - # ====================================================================== - testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"] - - if self.systemTestEnv.printTestDescriptionsOnly: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - continue - elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName): - self.log_message("Skipping : " + testcaseDirName) - skipThisTestCase = True - continue - else: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName) - - # ============================================================================== # - # ============================================================================== # - # Product Specific Testing Code Starts Here: # - # ============================================================================== # - # ============================================================================== # - - # initialize self.testcaseEnv with user-defined environment variables (product specific) - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False - self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False - - # initialize signal handler - signal.signal(signal.SIGINT, self.signal_handler) - - # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file: - # system_test/_testsuite/testcase_/testcase__properties.json - self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data( - self.testcaseEnv.testcasePropJsonPathName) - - # clean up data directories specified in zookeeper.properties and kafka_server_.properties - kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase - # for collecting logs from remote machines - kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv) - - # TestcaseEnv - initialize producer & consumer config / log file pathnames - kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv) - - # generate remote hosts log/config dirs if not exist - kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # generate properties files for zookeeper, kafka, producer, and consumer: - # 1. copy system_test/_testsuite/config/*.properties to - # system_test/_testsuite/testcase_/config/ - # 2. update all properties files in system_test/_testsuite/testcase_/config - # by overriding the settings specified in: - # system_test/_testsuite/testcase_/testcase__properties.json - kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName, - self.testcaseEnv, self.systemTestEnv) - - # ============================================= - # preparing all entities to start the test - # ============================================= - self.log_message("starting zookeepers") - kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 2s") - time.sleep(2) - - self.log_message("starting brokers") - kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - self.log_message("creating offset topic") - kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 3, 2) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - # ============================================= - # starting producer - # ============================================= - self.log_message("starting producer in the background") - kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False) - msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"] - self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages") - time.sleep(int(msgProducingFreeTimeSec)) - - kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv) - - kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0) - - # ============================================= - # A while-loop to bounce consumers as specified - # by "num_iterations" in testcase_n_properties.json - # ============================================= - i = 1 - numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) - bouncedEntityDownTimeSec = 10 - try: - bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"]) - except: - pass - - # group1 -> offsets partition 0 // has one consumer; eid: 6 - # group2 -> offsets partition 1 // has four consumers; eid: 7, 8, 9, 10 - - offsets_0_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0) - offsets_1_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 1) - - while i <= numIterations: - - self.log_message("Iteration " + str(i) + " of " + str(numIterations)) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_0_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_0_leader_entity]) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_1_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_1_leader_entity]) - - # ============================================= - # Bounce consumers if specified in testcase config - # ============================================= - bounceConsumers = self.testcaseEnv.testcaseArgumentsDict["bounce_consumers"] - self.log_message("bounce_consumers flag : " + bounceConsumers) - if (bounceConsumers.lower() == "true"): - - clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList - consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( clusterConfigList, "role", "console_consumer", "entity_id") - - for stoppedConsumerEntityId in consumerEntityIdList: - consumerPPID = self.testcaseEnv.entityConsoleConsumerParentPidDict[stoppedConsumerEntityId] - self.log_message("stopping consumer: " + consumerPPID) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedConsumerEntityId, consumerPPID) - - self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec") - time.sleep(bouncedEntityDownTimeSec) - # leaders would have changed during the above bounce. - self.log_message("starting the previously terminated consumers.") - for stoppedConsumerEntityId in consumerEntityIdList: - # starting previously terminated consumer - kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv, stoppedConsumerEntityId) - - self.log_message("starting the previously terminated brokers") - kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_0_leader_entity) - kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_1_leader_entity) - - self.anonLogger.info("sleeping for 15s") - time.sleep(15) - i += 1 - # while loop - - # ============================================= - # tell producer to stop - # ============================================= - self.testcaseEnv.lock.acquire() - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(1) - - # ============================================= - # wait for producer thread's update of - # "backgroundProducerStopped" to be "True" - # ============================================= - while 1: - self.testcaseEnv.lock.acquire() - self.logger.info("status of backgroundProducerStopped : [" + \ - str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d) - if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: - time.sleep(1) - self.logger.info("all producer threads completed", extra=self.d) - break - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(2) - - self.anonLogger.info("sleeping for 15s") - time.sleep(15) - - # ============================================= - # this testcase is completed - stop all entities - # ============================================= - self.log_message("stopping all entities") - for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - # make sure all entities are stopped - kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv) - - # ============================================= - # collect logs from remote hosts - # ============================================= - kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # ============================================= - # validate the data matched and checksum - # ============================================= - self.log_message("validating data matched") - kafka_system_test_utils.validate_data_matched_in_multi_topics_from_single_consumer_producer(self.systemTestEnv, self.testcaseEnv, replicationUtils) - - except Exception as e: - self.log_message("Exception while running test {0}".format(e)) - traceback.print_exc() - self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED" - - finally: - if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly: - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - diff --git a/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json b/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json deleted file mode 100644 index 1f0b7180a9d..00000000000 --- a/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "description": {"01":"To Test : 'Basic offset management test.'", - "02":"Set up a Zk and Kafka cluster.", - "03":"Produce messages to a multiple topics - various partition counts.", - "04":"Start multiple consumer groups to read various subsets of above topics.", - "05":"Bounce consumers.", - "06":"Verify that there are no duplicate messages or lost messages on any consumer group.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0" - }, - "testcase_args": { - "bounce_leaders": "false", - "bounce_consumers": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50", - "num_topics_for_auto_generated_string":"1" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_1.log", - "config_filename": "kafka_server_1.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_2.log", - "config_filename": "kafka_server_2.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_3.log", - "config_filename": "kafka_server_3.properties" - }, - { - "entity_id": "4", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "topic": "test", - "threads": "3", - "compression-codec": "0", - "message-size": "500", - "message": "1000", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "6", - "topic": "test_0001", - "group.id": "group1", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_6.properties" - } - ] -} diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties deleted file mode 100644 index 9efbd9da7c4..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=1 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9091 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_1_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=10240 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2108 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=3 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 - -offsets.topic.num.partitions=2 -offsets.topic.replication.factor=4 - -kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-1/metrics -kafka.csv.metrics.reporter.enabled=true -kafka.metrics.polling.interval.secs=5 -kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties deleted file mode 100644 index d4bf702554a..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=2 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9092 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_2_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=10240 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2108 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=3 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 - -offsets.topic.num.partitions=2 -offsets.topic.replication.factor=4 - -kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-2/metrics -kafka.csv.metrics.reporter.enabled=true -kafka.metrics.polling.interval.secs=5 -kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties deleted file mode 100644 index e6e06bef378..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=3 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9093 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_3_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=10240 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2108 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=3 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 - -offsets.topic.num.partitions=2 -offsets.topic.replication.factor=4 - -kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-3/metrics -kafka.csv.metrics.reporter.enabled=true -kafka.metrics.polling.interval.secs=5 -kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties deleted file mode 100644 index 2cb03e4fbec..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties +++ /dev/null @@ -1,147 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=4 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9094 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_4_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=10240 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2108 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=3 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=4096 -num.replica.fetchers=1 - -offsets.topic.num.partitions=2 -offsets.topic.replication.factor=4 - -kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-4/metrics -kafka.csv.metrics.reporter.enabled=true -kafka.metrics.polling.interval.secs=5 -kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter diff --git a/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties b/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties deleted file mode 100644 index 97c07b9cd47..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper_0 -# the port at which the clients will connect -clientPort=2108 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 -syncLimit=5 -initLimit=10 -tickTime=2000 -server.1=localhost:2107:2109 diff --git a/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json b/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json deleted file mode 100644 index c5866a2ecc4..00000000000 --- a/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "description": {"01":"To Test : 'Basic offset management test.'", - "02":"Set up a Zk and Kafka cluster.", - "03":"Produce messages to a multiple topics - various partition counts.", - "04":"Start multiple consumer groups to read various subsets of above topics.", - "05":"Bounce consumers.", - "06":"Verify that there are no duplicate messages or lost messages on any consumer group.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0" - }, - "testcase_args": { - "bounce_leaders": "false", - "bounce_consumers": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50", - "num_topics_for_auto_generated_string":"3" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_1.log", - "config_filename": "kafka_server_1.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_2.log", - "config_filename": "kafka_server_2.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_3.log", - "config_filename": "kafka_server_3.properties" - }, - { - "entity_id": "4", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "topic": "test", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "1000", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "6", - "topic": "test_0001", - "group.id": "group1", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_0002", - "group.id": "group2", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_7.properties" - }, - { - "entity_id": "8", - "topic": "test_0002", - "group.id": "group2", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_8.properties" - }, - { - "entity_id": "9", - "topic": "test_0002", - "group.id": "group2", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_9.properties" - }, - { - "entity_id": "10", - "topic": "test_0003", - "group.id": "group2", - "consumer-timeout-ms": "30000", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer_10.properties" - } - ] -} diff --git a/system_test/producer_perf/README b/system_test/producer_perf/README deleted file mode 100644 index be3bb51302d..00000000000 --- a/system_test/producer_perf/README +++ /dev/null @@ -1,9 +0,0 @@ -This test produces a large number of messages to a broker. It measures the throughput and tests -the amount of data received is expected. - -To run this test, do -bin/run-test.sh - -The expected output is given in expected.out. There are 2 things to pay attention to: -1. The output should have a line "test passed". -2. The throughput from the producer should be around 300,000 Messages/sec on a typical machine. diff --git a/system_test/producer_perf/bin/expected.out b/system_test/producer_perf/bin/expected.out deleted file mode 100644 index 311d9b73923..00000000000 --- a/system_test/producer_perf/bin/expected.out +++ /dev/null @@ -1,32 +0,0 @@ -start the servers ... -start producing 2000000 messages ... -[2011-05-17 14:31:12,568] INFO Creating async producer for broker id = 0 at localhost:9092 (kafka.producer.ProducerPool) -thread 0: 100000 messages sent 3272786.7779 nMsg/sec 3.1212 MBs/sec -thread 0: 200000 messages sent 3685956.5057 nMsg/sec 3.5152 MBs/sec -thread 0: 300000 messages sent 3717472.1190 nMsg/sec 3.5453 MBs/sec -thread 0: 400000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec -thread 0: 500000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec -thread 0: 600000 messages sent 3722315.2801 nMsg/sec 3.5499 MBs/sec -thread 0: 700000 messages sent 3718854.5928 nMsg/sec 3.5466 MBs/sec -thread 0: 800000 messages sent 3714020.4271 nMsg/sec 3.5420 MBs/sec -thread 0: 900000 messages sent 3713330.8578 nMsg/sec 3.5413 MBs/sec -thread 0: 1000000 messages sent 3710575.1391 nMsg/sec 3.5387 MBs/sec -thread 0: 1100000 messages sent 3711263.6853 nMsg/sec 3.5393 MBs/sec -thread 0: 1200000 messages sent 3716090.6726 nMsg/sec 3.5439 MBs/sec -thread 0: 1300000 messages sent 3709198.8131 nMsg/sec 3.5374 MBs/sec -thread 0: 1400000 messages sent 3705762.4606 nMsg/sec 3.5341 MBs/sec -thread 0: 1500000 messages sent 3701647.2330 nMsg/sec 3.5302 MBs/sec -thread 0: 1600000 messages sent 3696174.4594 nMsg/sec 3.5249 MBs/sec -thread 0: 1700000 messages sent 3703703.7037 nMsg/sec 3.5321 MBs/sec -thread 0: 1800000 messages sent 3703017.9596 nMsg/sec 3.5315 MBs/sec -thread 0: 1900000 messages sent 3700277.5208 nMsg/sec 3.5289 MBs/sec -thread 0: 2000000 messages sent 3702332.4695 nMsg/sec 3.5308 MBs/sec -[2011-05-17 14:33:01,102] INFO Closing all async producers (kafka.producer.ProducerPool) -[2011-05-17 14:33:01,103] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer) -Total Num Messages: 2000000 bytes: 400000000 in 108.678 secs -Messages/sec: 18402.9886 -MB/sec: 3.5101 -wait for data to be persisted -test passed -bin/../../../bin/kafka-server-start.sh: line 11: 21110 Terminated $(dirname $0)/kafka-run-class.sh kafka.Kafka $@ -bin/../../../bin/zookeeper-server-start.sh: line 9: 21109 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@ diff --git a/system_test/producer_perf/bin/run-compression-test.sh b/system_test/producer_perf/bin/run-compression-test.sh deleted file mode 100755 index 5297d1f93e3..00000000000 --- a/system_test/producer_perf/bin/run-compression-test.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -num_messages=2000000 -message_size=200 - -base_dir=$(dirname $0)/.. - -rm -rf /tmp/zookeeper -rm -rf /tmp/kafka-logs - -echo "start the servers ..." -$base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log & -$base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log & - -sleep 4 -echo "start producing $num_messages messages ..." -$base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topics test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async --compression-codec 1 - -echo "wait for data to be persisted" -cur_offset="-1" -quit=0 -while [ $quit -eq 0 ] -do - sleep 2 - target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1` - if [ $target_size -eq $cur_offset ] - then - quit=1 - fi - cur_offset=$target_size -done - -sleep 2 -actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1` -num_batches=`expr $num_messages \/ $message_size` -expected_size=`expr $num_batches \* 262` - -if [ $actual_size != $expected_size ] -then - echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!" -else - echo "test passed" -fi - -ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null -sleep 2 -ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null diff --git a/system_test/producer_perf/bin/run-test.sh b/system_test/producer_perf/bin/run-test.sh deleted file mode 100755 index 9a3b8858a9b..00000000000 --- a/system_test/producer_perf/bin/run-test.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -num_messages=2000000 -message_size=200 - -base_dir=$(dirname $0)/.. - -rm -rf /tmp/zookeeper -rm -rf /tmp/kafka-logs - -echo "start the servers ..." -$base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log & -$base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log & - -sleep 4 -echo "start producing $num_messages messages ..." -$base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topics test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async - -echo "wait for data to be persisted" -cur_offset="-1" -quit=0 -while [ $quit -eq 0 ] -do - sleep 2 - target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1` - if [ $target_size -eq $cur_offset ] - then - quit=1 - fi - cur_offset=$target_size -done - -sleep 2 -actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1` -msg_full_size=`expr $message_size + 10` -expected_size=`expr $num_messages \* $msg_full_size` - -if [ $actual_size != $expected_size ] -then - echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!" -else - echo "test passed" -fi - -ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null -sleep 2 -ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null diff --git a/system_test/producer_perf/config/server.properties b/system_test/producer_perf/config/server.properties deleted file mode 100644 index 83a1e06794c..00000000000 --- a/system_test/producer_perf/config/server.properties +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=0 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9092 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=536870912 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zookeeper.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 diff --git a/system_test/producer_perf/config/zookeeper.properties b/system_test/producer_perf/config/zookeeper.properties deleted file mode 100644 index bd3fe84cb43..00000000000 --- a/system_test/producer_perf/config/zookeeper.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 diff --git a/system_test/replication_testsuite/__init__.py b/system_test/replication_testsuite/__init__.py deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/system_test/replication_testsuite/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/system_test/replication_testsuite/config/console_consumer.properties b/system_test/replication_testsuite/config/console_consumer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/replication_testsuite/config/consumer.properties b/system_test/replication_testsuite/config/consumer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/replication_testsuite/config/log4j.properties b/system_test/replication_testsuite/config/log4j.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/replication_testsuite/config/producer.properties b/system_test/replication_testsuite/config/producer.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/replication_testsuite/config/producer_performance.properties b/system_test/replication_testsuite/config/producer_performance.properties deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/system_test/replication_testsuite/config/server.properties b/system_test/replication_testsuite/config/server.properties deleted file mode 100644 index d1dff6865b3..00000000000 --- a/system_test/replication_testsuite/config/server.properties +++ /dev/null @@ -1,139 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned -# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost -# may not be what you want. -#host.name= - - -############################# Socket Server Settings ############################# - -# The port the socket server listens on -port=9091 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=2 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# The directory under which to store log files -log.dir=/tmp/kafka_server_logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=5 - -# Overrides for for the default given by num.partitions on a per-topic basis -#topic.partition.count.map=topic1:3, topic2:4 - -############################# Log Flush Policy ############################# - -# The following configurations control the flush of data to disk. This is the most -# important performance knob in kafka. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. -# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). -# 3. Throughput: The flush is generally the most expensive operation. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -log.flush.interval.ms=1000 - -# Per-topic overrides for log.flush.interval.ms -#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000 - -# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. -log.flush.scheduler.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 -log.retention.bytes=-1 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.size=536870912 -log.segment.bytes=102400 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.cleanup.interval.mins=1 - -############################# Zookeeper ############################# - -# Enable connecting to zookeeper -enable.zookeeper=true - -# Zk connection string (see zk docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=1000000 - -monitoring.period.secs=1 -message.max.bytes=1000000 -queued.max.requests=500 -log.roll.hours=168 -log.index.size.max.bytes=10485760 -log.index.interval.bytes=4096 -auto.create.topics.enable=true -controller.socket.timeout.ms=30000 -default.replication.factor=1 -replica.lag.time.max.ms=10000 -replica.lag.max.messages=4000 -replica.socket.timeout.ms=30000 -replica.socket.receive.buffer.bytes=65536 -replica.fetch.max.bytes=1048576 -replica.fetch.wait.max.ms=500 -replica.fetch.min.bytes=1 -num.replica.fetchers=1 diff --git a/system_test/replication_testsuite/config/zookeeper.properties b/system_test/replication_testsuite/config/zookeeper.properties deleted file mode 100644 index 74cbf90428f..00000000000 --- a/system_test/replication_testsuite/config/zookeeper.properties +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 diff --git a/system_test/replication_testsuite/replica_basic_test.py b/system_test/replication_testsuite/replica_basic_test.py deleted file mode 100644 index 16a24a40705..00000000000 --- a/system_test/replication_testsuite/replica_basic_test.py +++ /dev/null @@ -1,461 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# replica_basic_test.py -# =================================== - -import inspect -import logging -import os -import pprint -import signal -import subprocess -import sys -import time -import traceback - -from system_test_env import SystemTestEnv -sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR) - -from setup_utils import SetupUtils -from replication_utils import ReplicationUtils -import system_test_utils -from testcase_env import TestcaseEnv - -# product specific: Kafka -import kafka_system_test_utils -import metrics - -class ReplicaBasicTest(ReplicationUtils, SetupUtils): - - testModuleAbsPathName = os.path.realpath(__file__) - testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName)) - - def __init__(self, systemTestEnv): - - # SystemTestEnv - provides cluster level environment settings - # such as entity_id, hostname, kafka_home, java_home which - # are available in a list of dictionary named - # "clusterEntityConfigDictList" - self.systemTestEnv = systemTestEnv - - super(ReplicaBasicTest, self).__init__(self) - - # dict to pass user-defined attributes to logger argument: "extra" - d = {'name_of_class': self.__class__.__name__} - - def signal_handler(self, signal, frame): - self.log_message("Interrupt detected - User pressed Ctrl+c") - - # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - sys.exit(1) - - def runTest(self): - - # ====================================================================== - # get all testcase directories under this testsuite - # ====================================================================== - testCasePathNameList = system_test_utils.get_dir_paths_with_prefix( - self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX) - testCasePathNameList.sort() - - replicationUtils = ReplicationUtils(self) - - # ============================================================= - # launch each testcase one by one: testcase_1, testcase_2, ... - # ============================================================= - for testCasePathName in testCasePathNameList: - - skipThisTestCase = False - - try: - # ====================================================================== - # A new instance of TestcaseEnv to keep track of this testcase's env vars - # and initialize some env vars as testCasePathName is available now - # ====================================================================== - self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self) - self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName - self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName) - self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"] - - # ====================================================================== - # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json - # ====================================================================== - testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"] - - if self.systemTestEnv.printTestDescriptionsOnly: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - continue - elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName): - self.log_message("Skipping : " + testcaseDirName) - skipThisTestCase = True - continue - else: - self.testcaseEnv.printTestCaseDescription(testcaseDirName) - system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName) - - - # ============================================================================== # - # ============================================================================== # - # Product Specific Testing Code Starts Here: # - # ============================================================================== # - # ============================================================================== # - - # get optional testcase arguments - logRetentionTest = "false" - try: - logRetentionTest = self.testcaseEnv.testcaseArgumentsDict["log_retention_test"] - except: - pass - consumerMultiTopicsMode = "false" - try: - consumerMultiTopicsMode = self.testcaseEnv.testcaseArgumentsDict["consumer_multi_topics_mode"] - except: - pass - autoCreateTopic = "false" - try: - autoCreateTopic = self.testcaseEnv.testcaseArgumentsDict["auto_create_topic"] - except: - pass - - - # initialize self.testcaseEnv with user-defined environment variables (product specific) - self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = "" - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False - self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False - self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"] = [] - - # initialize signal handler - signal.signal(signal.SIGINT, self.signal_handler) - - # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file: - # system_test/_testsuite/testcase_/testcase__properties.json - self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data( - self.testcaseEnv.testcasePropJsonPathName) - - # clean up data directories specified in zookeeper.properties and kafka_server_.properties - kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase - # for collecting logs from remote machines - kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv) - - # TestcaseEnv - initialize producer & consumer config / log file pathnames - kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv) - - # generate remote hosts log/config dirs if not exist - kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # generate properties files for zookeeper, kafka, producer, consumer: - # 1. copy system_test/_testsuite/config/*.properties to - # system_test/_testsuite/testcase_/config/ - # 2. update all properties files in system_test/_testsuite/testcase_/config - # by overriding the settings specified in: - # system_test/_testsuite/testcase_/testcase__properties.json - kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName, - self.testcaseEnv, self.systemTestEnv) - - # ============================================= - # preparing all entities to start the test - # ============================================= - self.log_message("starting zookeepers") - kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 2s") - time.sleep(2) - - self.log_message("starting brokers") - kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - if autoCreateTopic.lower() == "false": - self.log_message("creating topics") - kafka_system_test_utils.create_topic_for_producer_performance(self.systemTestEnv, self.testcaseEnv) - self.anonLogger.info("sleeping for 5s") - time.sleep(5) - - # ============================================= - # start ConsoleConsumer if this is a Log Retention test - # ============================================= - if logRetentionTest.lower() == "true": - self.log_message("starting consumer in the background") - kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv) - time.sleep(1) - - # ============================================= - # starting producer - # ============================================= - self.log_message("starting producer in the background") - kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False) - msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"] - self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages") - time.sleep(int(msgProducingFreeTimeSec)) - - # ============================================= - # A while-loop to bounce leader as specified - # by "num_iterations" in testcase_n_properties.json - # ============================================= - i = 1 - numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) - brokerType = self.testcaseEnv.testcaseArgumentsDict["broker_type"] - bounceBrokerFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_broker"] - - while i <= numIterations: - self.log_message("Iteration " + str(i) + " of " + str(numIterations)) - self.log_message("bounce_broker flag : " + bounceBrokerFlag) - - leaderDict = None - controllerDict = None - stoppedBrokerEntityId = "" - - # ============================================== - # Find out the entity id for the stopping broker - # ============================================== - if brokerType == "leader" or brokerType == "follower": - self.log_message("looking up leader") - leaderDict = kafka_system_test_utils.get_leader_attributes(self.systemTestEnv, self.testcaseEnv) - - # ========================== - # leaderDict looks like this: - # ========================== - #{'entity_id': u'3', - # 'partition': '0', - # 'timestamp': 1345050255.8280001, - # 'hostname': u'localhost', - # 'topic': 'test_1', - # 'brokerid': '3'} - - if brokerType == "leader": - stoppedBrokerEntityId = leaderDict["entity_id"] - self.log_message("Found leader with entity id: " + stoppedBrokerEntityId) - else: # Follower - self.log_message("looking up follower") - # a list of all brokers - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(self.systemTestEnv.clusterEntityConfigDictList, "role", "broker", "entity_id") - - # we pick the first non-leader broker as the follower - firstFollowerEntityId = None - for brokerEntityId in brokerEntityIdList: - if brokerEntityId != leaderDict["entity_id"]: - firstFollowerEntityId = brokerEntityId - break - stoppedBrokerEntityId = firstFollowerEntityId - self.log_message("Found follower with entity id: " + stoppedBrokerEntityId) - - elif brokerType == "controller": - self.log_message("looking up controller") - controllerDict = kafka_system_test_utils.get_controller_attributes(self.systemTestEnv, self.testcaseEnv) - - # ========================== - # controllerDict looks like this: - # ========================== - #{'entity_id': u'3', - # 'timestamp': 1345050255.8280001, - # 'hostname': u'localhost', - # 'brokerid': '3'} - - stoppedBrokerEntityId = controllerDict["entity_id"] - self.log_message("Found controller with entity id: " + stoppedBrokerEntityId) - - # ============================================= - # Bounce the broker - # ============================================= - if bounceBrokerFlag.lower() == "true": - if brokerType == "leader": - # validate to see if leader election is successful - self.log_message("validating leader election") - kafka_system_test_utils.validate_leader_election_successful(self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict) - - # trigger leader re-election by stopping leader to get re-election latency - #reelectionLatency = kafka_system_test_utils.get_reelection_latency(self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict) - #latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"] - #self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms" - #self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"].append("{0:.2f}".format(reelectionLatency * 1000)) - - elif brokerType == "follower": - # stopping Follower - self.log_message("stopping follower with entity id: " + firstFollowerEntityId) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, firstFollowerEntityId, self.testcaseEnv.entityBrokerParentPidDict[firstFollowerEntityId]) - - elif brokerType == "controller": - # stopping Controller - self.log_message("stopping controller : " + controllerDict["brokerid"]) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, controllerDict["entity_id"], self.testcaseEnv.entityBrokerParentPidDict[controllerDict["entity_id"]]) - - brokerDownTimeInSec = 5 - try: - brokerDownTimeInSec = int(self.testcaseEnv.testcaseArgumentsDict["broker_down_time_in_sec"]) - except: - pass # take default - time.sleep(brokerDownTimeInSec) - - # starting previously terminated broker - self.log_message("starting the previously terminated broker") - kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId) - - else: - # GC Pause simulation - pauseTime = None - try: - hostname = leaderDict["hostname"] - pauseTime = self.testcaseEnv.testcaseArgumentsDict["pause_time_in_seconds"] - parentPid = self.testcaseEnv.entityBrokerParentPidDict[leaderDict["entity_id"]] - pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid) - system_test_utils.simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTime) - except: - pass - - - self.anonLogger.info("sleeping for 60s") - time.sleep(60) - i += 1 - # while loop - - # update Leader Election Latency MIN/MAX to testcaseEnv.validationStatusDict - #self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = None - #try: - # self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = \ - # min(self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"]) - #except: - # pass - # - #self.testcaseEnv.validationStatusDict["Leader Election Latency MAX"] = None - #try: - # self.testcaseEnv.validationStatusDict["Leader Election Latency MAX"] = \ - # max(self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"]) - #except: - # pass - - # ============================================= - # tell producer to stop - # ============================================= - self.testcaseEnv.lock.acquire() - self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(1) - - # ============================================= - # wait for producer thread's update of - # "backgroundProducerStopped" to be "True" - # ============================================= - while 1: - self.testcaseEnv.lock.acquire() - self.logger.info("status of backgroundProducerStopped : [" + \ - str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d) - if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: - time.sleep(1) - self.testcaseEnv.lock.release() - self.logger.info("all producer threads completed", extra=self.d) - break - time.sleep(1) - self.testcaseEnv.lock.release() - time.sleep(2) - - # ============================================= - # collect logs from remote hosts to find the - # minimum common offset of a certain log - # segment file among all replicas - # ============================================= - minStartingOffsetDict = None - if logRetentionTest.lower() == "true": - self.anonLogger.info("sleeping for 60s to make sure log truncation is completed") - time.sleep(60) - kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - minStartingOffsetDict = kafka_system_test_utils.getMinCommonStartingOffset(self.systemTestEnv, self.testcaseEnv) - print - pprint.pprint(minStartingOffsetDict) - - # ============================================= - # starting debug consumer - # ============================================= - if consumerMultiTopicsMode.lower() == "false": - self.log_message("starting debug consumers in the background") - kafka_system_test_utils.start_simple_consumer(self.systemTestEnv, self.testcaseEnv, minStartingOffsetDict) - self.anonLogger.info("sleeping for 10s") - time.sleep(10) - - # ============================================= - # starting console consumer - # ============================================= - if logRetentionTest.lower() == "false": - self.log_message("starting consumer in the background") - kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv) - time.sleep(10) - - # ============================================= - # this testcase is completed - stop all entities - # ============================================= - self.log_message("stopping all entities") - for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items(): - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) - - # make sure all entities are stopped - kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv) - - # ============================================= - # collect logs from remote hosts - # ============================================= - kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) - - # ============================================= - # validate the data matched and checksum - # ============================================= - self.log_message("validating data matched") - - if logRetentionTest.lower() == "true": - kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils) - elif consumerMultiTopicsMode.lower() == "true": - kafka_system_test_utils.validate_data_matched_in_multi_topics_from_single_consumer_producer( - self.systemTestEnv, self.testcaseEnv, replicationUtils) - else: - kafka_system_test_utils.validate_simple_consumer_data_matched_across_replicas(self.systemTestEnv, self.testcaseEnv) - kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv) - kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils) - - kafka_system_test_utils.validate_index_log(self.systemTestEnv, self.testcaseEnv) - - # ============================================= - # draw graphs - # ============================================= - metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, - self.testcaseEnv, - self.systemTestEnv.clusterEntityConfigDictList) - - # build dashboard, one for each role - metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME, - self.testcaseEnv.testCaseDashboardsDir, - self.systemTestEnv.clusterEntityConfigDictList) - except Exception as e: - self.log_message("Exception while running test {0}".format(e)) - traceback.print_exc() - self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED" - - - finally: - if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly: - self.log_message("stopping all entities - please wait ...") - kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) - diff --git a/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json b/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json deleted file mode 100644 index 7a32e8d5032..00000000000 --- a/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : Base Test", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:-1, comp:0", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json b/system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json deleted file mode 100644 index 29294149e32..00000000000 --- a/system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:-1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json b/system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json deleted file mode 100644 index d9818e19f9e..00000000000 --- a/system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json b/system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json deleted file mode 100644 index fe42626ef82..00000000000 --- a/system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:-1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json b/system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json deleted file mode 100644 index 37d180a794a..00000000000 --- a/system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json b/system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json deleted file mode 100644 index dae8f763db8..00000000000 --- a/system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. comp => 1", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:-1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json b/system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json deleted file mode 100644 index b6f513ff510..00000000000 --- a/system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:-1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json b/system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json deleted file mode 100644 index 4954752089b..00000000000 --- a/system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json b/system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json deleted file mode 100644 index 0476b121209..00000000000 --- a/system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json b/system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json deleted file mode 100644 index e25ddb917dd..00000000000 --- a/system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1; 4. log segment size => 1M", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 1048576 (1M)" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json b/system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json deleted file mode 100644 index ac175700340..00000000000 --- a/system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Replication Basic : 1. auto create topic => true", - "02":"Produce and consume messages to a single topic - 3 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 1048576 (1M)" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0021/cluster_config.json b/system_test/replication_testsuite/testcase_0021/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0021/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json b/system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json deleted file mode 100644 index f35a4397820..00000000000 --- a/system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "description": {"01":"Replication Basic on Multi Topics & Partitions : Base Test", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:-1, comp:0", - "07":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0022/cluster_config.json b/system_test/replication_testsuite/testcase_0022/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0022/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json b/system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json deleted file mode 100644 index 5a168f3f5cf..00000000000 --- a/system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. acks => 1; 2. log segment size => 512K", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:sync, acks:1, comp:0", - "07":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0023/cluster_config.json b/system_test/replication_testsuite/testcase_0023/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0023/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json b/system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json deleted file mode 100644 index 09d81a6584a..00000000000 --- a/system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json b/system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json deleted file mode 100644 index 5661b88b14f..00000000000 --- a/system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. auto_create_topic => true", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"At the end it verifies the log size and contents", - "05":"Use a consumer to verify no message loss.", - "06":"Producer dimensions : mode:async, acks:1, comp:1", - "07":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "auto_create_topic": "true", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1,test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_1,test_2", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_5.log", - "config_filename": "console_consumer_5.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json b/system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json deleted file mode 100644 index b9517b4c655..00000000000 --- a/system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : Base Test", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json b/system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json deleted file mode 100644 index 3eb39a26f60..00000000000 --- a/system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json b/system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json deleted file mode 100644 index 6bfc7570c88..00000000000 --- a/system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json b/system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json deleted file mode 100644 index 1cfe71ca3ac..00000000000 --- a/system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json b/system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json deleted file mode 100644 index 13f3ac0eec4..00000000000 --- a/system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json b/system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json deleted file mode 100644 index ccd4774f7fc..00000000000 --- a/system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1; 2. no of partion => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json b/system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json deleted file mode 100644 index b1da75a0b26..00000000000 --- a/system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 1; 3. no of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json b/system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json deleted file mode 100644 index 359abe7ab4d..00000000000 --- a/system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1; 3. no. of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json b/system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json deleted file mode 100644 index 90ea4417947..00000000000 --- a/system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitions => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 10000000" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json b/system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json deleted file mode 100644 index f11c705d92c..00000000000 --- a/system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitins => 3; 5. log segment size => 1M", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 1048576 (1M)" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json b/system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json deleted file mode 100644 index cc1eae690f6..00000000000 --- a/system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : Base Test", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json b/system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json deleted file mode 100644 index 48a6c9d2351..00000000000 --- a/system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. mode => async", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json b/system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json deleted file mode 100644 index a88b49b4363..00000000000 --- a/system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json b/system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json deleted file mode 100644 index 12616143961..00000000000 --- a/system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json b/system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json deleted file mode 100644 index 2d649da83b0..00000000000 --- a/system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json b/system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json deleted file mode 100644 index cbad6f2b7b9..00000000000 --- a/system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json b/system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json deleted file mode 100644 index 0099a8f5db4..00000000000 --- a/system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json b/system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json deleted file mode 100644 index 6954d439627..00000000000 --- a/system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json b/system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json deleted file mode 100644 index ab1e47a87c6..00000000000 --- a/system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures in Replication : 1. auto_create_topic => true", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0121/cluster_config.json b/system_test/replication_testsuite/testcase_0121/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0121/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json b/system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json deleted file mode 100644 index c7940c46056..00000000000 --- a/system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0122/cluster_config.json b/system_test/replication_testsuite/testcase_0122/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0122/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json b/system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json deleted file mode 100644 index 35daf5b4cc3..00000000000 --- a/system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0123/cluster_config.json b/system_test/replication_testsuite/testcase_0123/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0123/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json b/system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json deleted file mode 100644 index fe5e49af72d..00000000000 --- a/system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0124/cluster_config.json b/system_test/replication_testsuite/testcase_0124/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0124/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json b/system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json deleted file mode 100644 index bff5d7342fc..00000000000 --- a/system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. log.index.interval.bytes => 490", - "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", - "03":"Produce and consume messages to 2 topics - 3 partitions", - "04":"This test sends messages to 3 replicas", - "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "06":"Restart the terminated broker", - "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "08":"At the end it verifies the log size and contents", - "09":"Use a consumer to verify no message loss.", - "10":"Producer dimensions : mode:sync, acks:-1, comp:0", - "11":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0125/cluster_config.json b/system_test/replication_testsuite/testcase_0125/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0125/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json b/system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json deleted file mode 100644 index 1f57ecc0ada..00000000000 --- a/system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", - "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", - "03":"Produce and consume messages to 2 topics - 3 partitions", - "04":"This test sends messages to 3 replicas", - "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "06":"Restart the terminated broker", - "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "08":"At the end it verifies the log size and contents", - "09":"Use a consumer to verify no message loss.", - "10":"Producer dimensions : mode:sync, acks:1, comp:0", - "11":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0126/cluster_config.json b/system_test/replication_testsuite/testcase_0126/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0126/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json b/system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json deleted file mode 100644 index ffa0fc31fc9..00000000000 --- a/system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => -1, 2. comp => 1", - "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", - "03":"Produce and consume messages to 2 topics - 3 partitions", - "04":"This test sends messages to 3 replicas", - "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "06":"Restart the terminated broker", - "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "08":"At the end it verifies the log size and contents", - "09":"Use a consumer to verify no message loss.", - "10":"Producer dimensions : mode:sync, acks:-1, comp:1", - "11":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0127/cluster_config.json b/system_test/replication_testsuite/testcase_0127/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0127/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json b/system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json deleted file mode 100644 index 78ecd8fa3f8..00000000000 --- a/system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", - "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", - "03":"Produce and consume messages to 2 topics - 3 partitions", - "04":"This test sends messages to 3 replicas", - "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "06":"Restart the terminated broker", - "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "08":"At the end it verifies the log size and contents", - "09":"Use a consumer to verify no message loss.", - "10":"Producer dimensions : mode:sync, acks:1, comp:1", - "11":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log.index.interval.bytes": "490", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json b/system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json deleted file mode 100644 index 589eb2068d6..00000000000 --- a/system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1,test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_1,test_2", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_5.log", - "config_filename": "console_consumer_5.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0131/cluster_config.json b/system_test/replication_testsuite/testcase_0131/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0131/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json b/system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json deleted file mode 100644 index 0324b6f327c..00000000000 --- a/system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0132/cluster_config.json b/system_test/replication_testsuite/testcase_0132/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0132/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json b/system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json deleted file mode 100644 index 83bcaaac3a6..00000000000 --- a/system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0133/cluster_config.json b/system_test/replication_testsuite/testcase_0133/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_0133/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json b/system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json deleted file mode 100644 index 2a1eaa51efb..00000000000 --- a/system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json b/system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json deleted file mode 100644 index 0a98ce5bb4f..00000000000 --- a/system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1,test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_1,test_2", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_5.log", - "config_filename": "console_consumer_5.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json b/system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json deleted file mode 100644 index 237a34388f2..00000000000 --- a/system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : Base Test", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json b/system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json deleted file mode 100644 index 8d576108517..00000000000 --- a/system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json b/system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json deleted file mode 100644 index 89b933f6743..00000000000 --- a/system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json b/system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json deleted file mode 100644 index fe3f98fb36b..00000000000 --- a/system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json b/system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json deleted file mode 100644 index 7f9ced89907..00000000000 --- a/system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json b/system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json deleted file mode 100644 index ec1e83cc11d..00000000000 --- a/system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json b/system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json deleted file mode 100644 index e96ed325d77..00000000000 --- a/system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json b/system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json deleted file mode 100644 index 7ca29427d9c..00000000000 --- a/system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json b/system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json deleted file mode 100644 index cf7ccc3911e..00000000000 --- a/system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. auto_create_topic => true", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "signal_type": "SIGKILL", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "producer-retry-backoff-ms": "3500", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json b/system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json deleted file mode 100644 index 521592b4d23..00000000000 --- a/system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : Base Test", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json b/system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json deleted file mode 100644 index c2feeb870ae..00000000000 --- a/system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json b/system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json deleted file mode 100644 index 83b4dbceb26..00000000000 --- a/system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json b/system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json deleted file mode 100644 index 629b7baf43f..00000000000 --- a/system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json b/system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json deleted file mode 100644 index a9d13c380dd..00000000000 --- a/system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json b/system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json deleted file mode 100644 index e31666917ab..00000000000 --- a/system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json b/system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json deleted file mode 100644 index 1e08f46af45..00000000000 --- a/system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json b/system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json deleted file mode 100644 index 1dd38f478a8..00000000000 --- a/system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json b/system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json deleted file mode 100644 index ac6b4d0013b..00000000000 --- a/system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. auto_create_topic => true", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "controller", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json b/system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json deleted file mode 100644 index 9f06f3072af..00000000000 --- a/system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : Base Test", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json b/system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json deleted file mode 100644 index c264fca3876..00000000000 --- a/system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json b/system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json deleted file mode 100644 index e5fdb2eab95..00000000000 --- a/system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json b/system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json deleted file mode 100644 index 27ce4e97ec0..00000000000 --- a/system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json b/system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json deleted file mode 100644 index 1148a4546b4..00000000000 --- a/system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json b/system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json deleted file mode 100644 index 1b58e9bd9bf..00000000000 --- a/system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json b/system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json deleted file mode 100644 index 42e33c265a7..00000000000 --- a/system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json b/system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json deleted file mode 100644 index ae9ce5eb72d..00000000000 --- a/system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json b/system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json deleted file mode 100644 index 72783272264..00000000000 --- a/system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. auto_create_topic => true", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "follower", - "bounce_broker": "true", - "signal_type": "SIGTERM", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json b/system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json deleted file mode 100644 index f9b775e04b2..00000000000 --- a/system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : Base Test", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json b/system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json deleted file mode 100644 index af468c3a733..00000000000 --- a/system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json b/system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json deleted file mode 100644 index 374ff9e8685..00000000000 --- a/system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json b/system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json deleted file mode 100644 index 1b0f2eea4cc..00000000000 --- a/system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. acks => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json b/system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json deleted file mode 100644 index 568de4bb3b7..00000000000 --- a/system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json b/system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json deleted file mode 100644 index ab933380abb..00000000000 --- a/system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json b/system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json deleted file mode 100644 index 06b06238411..00000000000 --- a/system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json b/system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json deleted file mode 100644 index 0fda7c61549..00000000000 --- a/system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json b/system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json deleted file mode 100644 index 2879c8ff9bb..00000000000 --- a/system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. auto_create_topic => true", - "02":"Produce and consume messages to a single topic - three partition.", - "03":"This test sends messages to 3 replicas", - "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", - "05":"At the end it verifies the log size and contents", - "06":"Use a consumer to verify no message loss.", - "07":"Producer dimensions : mode:sync, acks:-1, comp:0", - "08":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "pause_time_in_seconds": "5", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_1/cluster_config.json b/system_test/replication_testsuite/testcase_1/cluster_config.json deleted file mode 100644 index ab9016dd4fc..00000000000 --- a/system_test/replication_testsuite/testcase_1/cluster_config.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9994" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9995" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_1/testcase_1_properties.json b/system_test/replication_testsuite/testcase_1/testcase_1_properties.json deleted file mode 100644 index 680213f1e97..00000000000 --- a/system_test/replication_testsuite/testcase_1/testcase_1_properties.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "description": {"01":"To Test : 'Leader Failure in Replication'", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "2", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "10000000", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "async":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json b/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json deleted file mode 100644 index 3f8e5870799..00000000000 --- a/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : Base Test", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json b/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json deleted file mode 100644 index c96352d5e18..00000000000 --- a/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json b/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json deleted file mode 100644 index 55fa39e1488..00000000000 --- a/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json b/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json deleted file mode 100644 index 15827eb994a..00000000000 --- a/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json b/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json deleted file mode 100644 index d1fa1ade07c..00000000000 --- a/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json b/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json deleted file mode 100644 index 675c76f656e..00000000000 --- a/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1; 2. no of partion => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json b/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json deleted file mode 100644 index afc221cdc8b..00000000000 --- a/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 1; 3. no of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json b/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json deleted file mode 100644 index 5df72f3d52a..00000000000 --- a/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1; 3. no. of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json b/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json deleted file mode 100644 index 9b156681ec4..00000000000 --- a/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitions => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json b/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json deleted file mode 100644 index f51abc156ae..00000000000 --- a/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitins => 3; 5. log segment size => 1M", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 1048576 (1M)" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10131/cluster_config.json b/system_test/replication_testsuite/testcase_10131/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_10131/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json b/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json deleted file mode 100644 index a140882287b..00000000000 --- a/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10132/cluster_config.json b/system_test/replication_testsuite/testcase_10132/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_10132/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json b/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json deleted file mode 100644 index 48b30c7e014..00000000000 --- a/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10133/cluster_config.json b/system_test/replication_testsuite/testcase_10133/cluster_config.json deleted file mode 100644 index cf147eb3f20..00000000000 --- a/system_test/replication_testsuite/testcase_10133/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9099" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json b/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json deleted file mode 100644 index 8276aae0aa8..00000000000 --- a/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json b/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json deleted file mode 100644 index 73bb8599522..00000000000 --- a/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1,test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_1,test_2", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_5.log", - "config_filename": "console_consumer_5.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4001/cluster_config.json b/system_test/replication_testsuite/testcase_4001/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4001/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json b/system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json deleted file mode 100644 index 2652f16b784..00000000000 --- a/system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : Base Test", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4002/cluster_config.json b/system_test/replication_testsuite/testcase_4002/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4002/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json b/system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json deleted file mode 100644 index 87245971271..00000000000 --- a/system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4003/cluster_config.json b/system_test/replication_testsuite/testcase_4003/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4003/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json b/system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json deleted file mode 100644 index 4e3b6f56281..00000000000 --- a/system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => -1, comp => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4004/cluster_config.json b/system_test/replication_testsuite/testcase_4004/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4004/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json b/system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json deleted file mode 100644 index f8718a648ab..00000000000 --- a/system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4005/cluster_config.json b/system_test/replication_testsuite/testcase_4005/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4005/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json b/system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json deleted file mode 100644 index af96c7b3bda..00000000000 --- a/system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. sync => false, acks => -1, comp => 0", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4006/cluster_config.json b/system_test/replication_testsuite/testcase_4006/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4006/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json b/system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json deleted file mode 100644 index e132236ff3b..00000000000 --- a/system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4007/cluster_config.json b/system_test/replication_testsuite/testcase_4007/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4007/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json b/system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json deleted file mode 100644 index 5c4e5bbfd7d..00000000000 --- a/system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => -1, 2. comp => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4008/cluster_config.json b/system_test/replication_testsuite/testcase_4008/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4008/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json b/system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json deleted file mode 100644 index 8dce9b2b388..00000000000 --- a/system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4011/cluster_config.json b/system_test/replication_testsuite/testcase_4011/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4011/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json b/system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json deleted file mode 100644 index c6f1d1c15f8..00000000000 --- a/system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : Base Test", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4012/cluster_config.json b/system_test/replication_testsuite/testcase_4012/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4012/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json b/system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json deleted file mode 100644 index bc1ff6397ba..00000000000 --- a/system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4013/cluster_config.json b/system_test/replication_testsuite/testcase_4013/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4013/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json b/system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json deleted file mode 100644 index aa48a6861a7..00000000000 --- a/system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, comp => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4014/cluster_config.json b/system_test/replication_testsuite/testcase_4014/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4014/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json b/system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json deleted file mode 100644 index 7acf8b68514..00000000000 --- a/system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4015/cluster_config.json b/system_test/replication_testsuite/testcase_4015/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4015/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json b/system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json deleted file mode 100644 index 7841273a387..00000000000 --- a/system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. sync => false, acks => -1, comp => 0", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4016/cluster_config.json b/system_test/replication_testsuite/testcase_4016/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4016/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json b/system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json deleted file mode 100644 index 0519d273dcf..00000000000 --- a/system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4017/cluster_config.json b/system_test/replication_testsuite/testcase_4017/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4017/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json b/system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json deleted file mode 100644 index c29077bfd5d..00000000000 --- a/system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, 2. comp => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4018/cluster_config.json b/system_test/replication_testsuite/testcase_4018/cluster_config.json deleted file mode 100644 index 9e733cfd98f..00000000000 --- a/system_test/replication_testsuite/testcase_4018/cluster_config.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - }, - { - "entity_id": "6", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9999" - }, - { - "entity_id": "7", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9191" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json b/system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json deleted file mode 100644 index ab57e5ab187..00000000000 --- a/system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 2 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "broker_down_time_in_sec": "5", - "message_producing_free_time_sec": "15", - "log_retention_test": "true" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.retention.size": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "2", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "new-producer":"true", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "500", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_9051/cluster_config.json b/system_test/replication_testsuite/testcase_9051/cluster_config.json deleted file mode 100644 index 8ed896b358f..00000000000 --- a/system_test/replication_testsuite/testcase_9051/cluster_config.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "cluster_config": [ - { - "entity_id": "0", - "hostname": "localhost", - "role": "zookeeper", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9990" - }, - { - "entity_id": "1", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9991" - }, - { - "entity_id": "2", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9992" - }, - { - "entity_id": "3", - "hostname": "localhost", - "role": "broker", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9993" - }, - { - "entity_id": "4", - "hostname": "localhost", - "role": "producer_performance", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9997" - }, - { - "entity_id": "5", - "hostname": "localhost", - "role": "console_consumer", - "cluster_name": "source", - "kafka_home": "default", - "java_home": "default", - "jmx_port": "9998" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json b/system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json deleted file mode 100644 index e959aeda4fa..00000000000 --- a/system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"To Test : 'Leader Failure in Replication'", - "02":"Produce and consume messages to 300 topics - 4 partitions.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 1048576" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "false", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "5", - "message_producing_free_time_sec": "15", - "num_topics_for_auto_generated_string": "20", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "new-producer":"true", - "topic": "t001", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "async":"false", - "log_filename": "producer_performance_9.log", - "config_filename": "producer_performance_9.properties" - }, - { - "entity_id": "5", - "topic": "t001", - "group.id": "mytestgroup", - "consumer-timeout-ms": "60000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer_10.log", - "config_filename": "console_consumer_10.properties" - } - ] -} diff --git a/system_test/run_all.sh b/system_test/run_all.sh deleted file mode 100755 index 0c5c02d98ef..00000000000 --- a/system_test/run_all.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -cp testcase_to_run_all.json testcase_to_run.json - -python -B system_test_runner.py - - diff --git a/system_test/run_all_replica.sh b/system_test/run_all_replica.sh deleted file mode 100755 index b3bce843814..00000000000 --- a/system_test/run_all_replica.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -cp testcase_to_run_all_replica.json testcase_to_run.json - -python -B system_test_runner.py - - diff --git a/system_test/run_sanity.sh b/system_test/run_sanity.sh deleted file mode 100755 index a301b96e1a7..00000000000 --- a/system_test/run_sanity.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -cp testcase_to_run_sanity.json testcase_to_run.json - -python -B system_test_runner.py - - diff --git a/system_test/system_test_env.py b/system_test/system_test_env.py deleted file mode 100644 index c24d3e83922..00000000000 --- a/system_test/system_test_env.py +++ /dev/null @@ -1,138 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# system_test_env.py -# =================================== - -import copy -import json -import os -import sys - -from utils import system_test_utils - -class SystemTestEnv(): - - # private: - _cwdFullPath = os.getcwd() - _thisScriptFullPathName = os.path.realpath(__file__) - _thisScriptBaseDir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))) - - # public: - SYSTEM_TEST_BASE_DIR = os.path.abspath(_thisScriptBaseDir) - SYSTEM_TEST_UTIL_DIR = os.path.abspath(SYSTEM_TEST_BASE_DIR + "/utils") - SYSTEM_TEST_SUITE_SUFFIX = "_testsuite" - SYSTEM_TEST_CASE_PREFIX = "testcase_" - SYSTEM_TEST_MODULE_EXT = ".py" - CLUSTER_CONFIG_FILENAME = "cluster_config.json" - CLUSTER_CONFIG_PATHNAME = os.path.abspath(SYSTEM_TEST_BASE_DIR + "/" + CLUSTER_CONFIG_FILENAME) - METRICS_FILENAME = "metrics.json" - METRICS_PATHNAME = os.path.abspath(SYSTEM_TEST_BASE_DIR + "/" + METRICS_FILENAME) - TESTCASE_TO_RUN_FILENAME = "testcase_to_run.json" - TESTCASE_TO_RUN_PATHNAME = os.path.abspath(SYSTEM_TEST_BASE_DIR + "/" + TESTCASE_TO_RUN_FILENAME) - TESTCASE_TO_SKIP_FILENAME = "testcase_to_skip.json" - TESTCASE_TO_SKIP_PATHNAME = os.path.abspath(SYSTEM_TEST_BASE_DIR + "/" + TESTCASE_TO_SKIP_FILENAME) - - clusterEntityConfigDictList = [] # cluster entity config for current level - clusterEntityConfigDictListInSystemTestLevel = [] # cluster entity config defined in system level - clusterEntityConfigDictListLastFoundInTestSuite = [] # cluster entity config last found in testsuite level - clusterEntityConfigDictListLastFoundInTestCase = [] # cluster entity config last found in testcase level - - systemTestResultsList = [] - testCaseToRunListDict = {} - testCaseToSkipListDict = {} - - printTestDescriptionsOnly = False - doNotValidateRemoteHost = False - - def __init__(self): - "Create an object with this system test session environment" - - # load the system level cluster config - system_test_utils.load_cluster_config(self.CLUSTER_CONFIG_PATHNAME, self.clusterEntityConfigDictList) - - # save the system level cluster config - self.clusterEntityConfigDictListInSystemTestLevel = copy.deepcopy(self.clusterEntityConfigDictList) - - # retrieve testcases to run from testcase_to_run.json - try: - testcaseToRunFileContent = open(self.TESTCASE_TO_RUN_PATHNAME, "r").read() - testcaseToRunData = json.loads(testcaseToRunFileContent) - for testClassName, caseList in testcaseToRunData.items(): - self.testCaseToRunListDict[testClassName] = caseList - except: - pass - - # retrieve testcases to skip from testcase_to_skip.json - try: - testcaseToSkipFileContent = open(self.TESTCASE_TO_SKIP_PATHNAME, "r").read() - testcaseToSkipData = json.loads(testcaseToSkipFileContent) - for testClassName, caseList in testcaseToSkipData.items(): - self.testCaseToSkipListDict[testClassName] = caseList - except: - pass - - def isTestCaseToSkip(self, testClassName, testcaseDirName): - testCaseToRunList = {} - testCaseToSkipList = {} - - try: - testCaseToRunList = self.testCaseToRunListDict[testClassName] - except: - # no 'testClassName' found => no need to run any cases for this test class - return True - - try: - testCaseToSkipList = self.testCaseToSkipListDict[testClassName] - except: - pass - - # if testCaseToRunList has elements, it takes precedence: - if len(testCaseToRunList) > 0: - #print "#### testClassName => ", testClassName - #print "#### testCaseToRunList => ", testCaseToRunList - #print "#### testcaseDirName => ", testcaseDirName - if not testcaseDirName in testCaseToRunList: - #self.log_message("Skipping : " + testcaseDirName) - return True - elif len(testCaseToSkipList) > 0: - #print "#### testClassName => ", testClassName - #print "#### testCaseToSkipList => ", testCaseToSkipList - #print "#### testcaseDirName => ", testcaseDirName - if testcaseDirName in testCaseToSkipList: - #self.log_message("Skipping : " + testcaseDirName) - return True - - return False - - - def getSystemTestEnvDict(self): - envDict = {} - envDict["system_test_base_dir"] = self.SYSTEM_TEST_BASE_DIR - envDict["system_test_util_dir"] = self.SYSTEM_TEST_UTIL_DIR - envDict["cluster_config_pathname"] = self.CLUSTER_CONFIG_PATHNAME - envDict["system_test_suite_suffix"] = self.SYSTEM_TEST_SUITE_SUFFIX - envDict["system_test_case_prefix"] = self.SYSTEM_TEST_CASE_PREFIX - envDict["system_test_module_ext"] = self.SYSTEM_TEST_MODULE_EXT - envDict["cluster_config_pathname"] = self.CLUSTER_CONFIG_PATHNAME - envDict["cluster_entity_config_dict_list"] = self.clusterEntityConfigDictList - envDict["system_test_results_list"] = self.systemTestResultsList - return envDict - - diff --git a/system_test/system_test_runner.py b/system_test/system_test_runner.py deleted file mode 100644 index d6251b2af83..00000000000 --- a/system_test/system_test_runner.py +++ /dev/null @@ -1,331 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/evn python - -# ================================================================= -# system_test_runner.py -# -# - This script is the test driver for a distributed environment -# system testing framework. It is located at the top level of the -# framework hierachy (in this case - system_test/). -# -# - This test driver servers as an entry point to launch a series -# of test suites (module) with multiple functionally similar test -# cases which can be grouped together. -# -# - Please refer to system_test/README.txt for more details on -# how to add test suite and test case. -# -# - In most cases, it is not necessary to make any changes to this -# script. -# ================================================================= - -from optparse import OptionParser -from system_test_env import SystemTestEnv -from utils import system_test_utils - -import logging.config -import os -import pprint -import sys - - -# load the config file for logging -logging.config.fileConfig('logging.conf') - -# 'd' is an argument to be merged into the log message (see Python doc for logging). -# In this case, corresponding class name can be appended to the end of the logging -# message to facilitate debugging. -d = {'name_of_class': '(system_test_runner)'} - -class report: - systemTestEnv = None - reportString = "" - reportFileName = "system_test_report.html" - systemTestReport = None - header = """ - Kafka System Test Report - - - - - - """ - footer = """ """ - - def __init__(self, systemTestEnv): - self.totalSkipped = 0 - self.totalPassed = 0 - self.totalTests = 0 - self.totalFailed = 0 - self.systemTestEnv = systemTestEnv - self.systemTestReport = open(self.reportFileName, 'w') - - def __del__(self): - self.systemTestReport.close() - self.systemTestReport = None - - def writeHtmlPage(self, body): - html = """ - - - """ - html += self.header - html += body - html += self.footer - html += """ - - """ - self.systemTestReport.write(html) - - def wrapIn(self, tag, content): - html = "\n<" + tag + ">" - html += "\n " + content - html += "\n" - return html - - def genModal(self, className, caseName, systemTestResult): - key = "validation_status" - id = className + "_" + caseName - info = self.wrapIn("h4", "Validation Status") - for validatedItem in sorted(systemTestResult[key].iterkeys()): - testItemStatus = systemTestResult[key][validatedItem] - info += validatedItem + " : " + testItemStatus - return self.wrapIn("div class=\"modal fade\" id=\"" + id + "\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"" + id + "Label\" aria-hidden=\"true\"", - self.wrapIn("div class=\"modal-dialog\"", - self.wrapIn("div class=\"modal-content\"", - self.wrapIn("div class=\"modal-header\"", - self.wrapIn("h4 class=\"modal-title\" id=\"" + id + "Label\"", - className + " - " + caseName)) + - self.wrapIn("div class=\"modal-body\"", - info) + - self.wrapIn("div class=\"modal-footer\"", - self.wrapIn("button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\"", "Close"))))) - - def summarize(self): - testItemsTableHeader = self.wrapIn("thead", - self.wrapIn("tr", - self.wrapIn("th", "Test Class Name") + - self.wrapIn("th", "Test Case Name") + - self.wrapIn("th", "Validation Status"))) - testItemsTableBody = "" - modals = "" - - for systemTestResult in self.systemTestEnv.systemTestResultsList: - self.totalTests += 1 - if "_test_class_name" in systemTestResult: - testClassName = systemTestResult["_test_class_name"] - else: - testClassName = "" - - if "_test_case_name" in systemTestResult: - testCaseName = systemTestResult["_test_case_name"] - else: - testCaseName = "" - - if "validation_status" in systemTestResult: - testItemStatus = "SKIPPED" - for key in systemTestResult["validation_status"].iterkeys(): - testItemStatus = systemTestResult["validation_status"][key] - if "FAILED" == testItemStatus: - break; - if "FAILED" == testItemStatus: - self.totalFailed += 1 - validationStatus = self.wrapIn("div class=\"text-danger\" data-toggle=\"modal\" data-target=\"#" + testClassName + "_" + testCaseName + "\"", "FAILED") - modals += self.genModal(testClassName, testCaseName, systemTestResult) - elif "PASSED" == testItemStatus: - self.totalPassed += 1 - validationStatus = self.wrapIn("div class=\"text-success\"", "PASSED") - else: - self.totalSkipped += 1 - validationStatus = self.wrapIn("div class=\"text-warning\"", "SKIPPED") - else: - self.reportString += "|" - - testItemsTableBody += self.wrapIn("tr", - self.wrapIn("td", testClassName) + - self.wrapIn("td", testCaseName) + - self.wrapIn("td", validationStatus)) - - testItemsTableBody = self.wrapIn("tbody", testItemsTableBody) - testItemsTable = self.wrapIn("table class=\"table table-striped\"", testItemsTableHeader + testItemsTableBody) - - statsTblBody = self.wrapIn("tr class=\"active\"", self.wrapIn("td", "Total tests") + self.wrapIn("td", str(self.totalTests))) - statsTblBody += self.wrapIn("tr class=\"success\"", self.wrapIn("td", "Total tests passed") + self.wrapIn("td", str(self.totalPassed))) - statsTblBody += self.wrapIn("tr class=\"danger\"", self.wrapIn("td", "Total tests failed") + self.wrapIn("td", str(self.totalFailed))) - statsTblBody += self.wrapIn("tr class=\"warning\"", self.wrapIn("td", "Total tests skipped") + self.wrapIn("td", str(self.totalSkipped))) - testStatsTable = self.wrapIn("table class=\"table\"", statsTblBody) - - body = self.wrapIn("div class=\"container\"", - self.wrapIn("h2", "Kafka System Test Report") + - self.wrapIn("div class=\"row\"", self.wrapIn("div class=\"col-md-4\"", testStatsTable)) + - self.wrapIn("div class=\"row\"", self.wrapIn("div class=\"col-md-6\"", testItemsTable)) + - modals) - self.writeHtmlPage(self.wrapIn("body", body)) - -def main(): - nLogger = logging.getLogger('namedLogger') - aLogger = logging.getLogger('anonymousLogger') - - optionParser = OptionParser() - optionParser.add_option("-p", "--print-test-descriptions-only", - dest="printTestDescriptionsOnly", - default=False, - action="store_true", - help="print test descriptions only - don't run the test") - - optionParser.add_option("-n", "--do-not-validate-remote-host", - dest="doNotValidateRemoteHost", - default=False, - action="store_true", - help="do not validate remote host (due to different kafka versions are installed)") - - (options, args) = optionParser.parse_args() - - print "\n" - aLogger.info("=================================================") - aLogger.info(" System Regression Test Framework") - aLogger.info("=================================================") - print "\n" - - testSuiteClassDictList = [] - - # SystemTestEnv is a class to provide all environement settings for this session - # such as the SYSTEM_TEST_BASE_DIR, SYSTEM_TEST_UTIL_DIR, ... - systemTestEnv = SystemTestEnv() - - if options.printTestDescriptionsOnly: - systemTestEnv.printTestDescriptionsOnly = True - if options.doNotValidateRemoteHost: - systemTestEnv.doNotValidateRemoteHost = True - - if not systemTestEnv.printTestDescriptionsOnly: - if not systemTestEnv.doNotValidateRemoteHost: - if not system_test_utils.setup_remote_hosts(systemTestEnv): - nLogger.error("Remote hosts sanity check failed. Aborting test ...", extra=d) - print - sys.exit(1) - else: - nLogger.info("SKIPPING : checking remote machines", extra=d) - print - - # get all defined names within a module: - definedItemList = dir(SystemTestEnv) - aLogger.debug("=================================================") - aLogger.debug("SystemTestEnv keys:") - for item in definedItemList: - aLogger.debug(" " + item) - aLogger.debug("=================================================") - - aLogger.info("=================================================") - aLogger.info("looking up test suites ...") - aLogger.info("=================================================") - # find all test suites in SYSTEM_TEST_BASE_DIR - for dirName in os.listdir(systemTestEnv.SYSTEM_TEST_BASE_DIR): - - # make sure this is a valid testsuite directory - if os.path.isdir(dirName) and dirName.endswith(systemTestEnv.SYSTEM_TEST_SUITE_SUFFIX): - print - nLogger.info("found a testsuite : " + dirName, extra=d) - testModulePathName = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + dirName) - - if not systemTestEnv.printTestDescriptionsOnly: - system_test_utils.setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName) - - # go through all test modules file in this testsuite - for moduleFileName in os.listdir(testModulePathName): - - # make sure it is a valid test module - if moduleFileName.endswith(systemTestEnv.SYSTEM_TEST_MODULE_EXT) \ - and not moduleFileName.startswith("__"): - - # found a test module file - nLogger.info("found a test module file : " + moduleFileName, extra=d) - - testModuleClassName = system_test_utils.sys_call("grep ^class " + testModulePathName + "/" + \ - moduleFileName + " | sed 's/^class //g' | sed 's/(.*):.*//g'") - testModuleClassName = testModuleClassName.rstrip('\n') - - # collect the test suite class data - testSuiteClassDict = {} - testSuiteClassDict["suite"] = dirName - extLenToRemove = systemTestEnv.SYSTEM_TEST_MODULE_EXT.__len__() * -1 - testSuiteClassDict["module"] = moduleFileName[:extLenToRemove] - testSuiteClassDict["class"] = testModuleClassName - testSuiteClassDictList.append(testSuiteClassDict) - - suiteName = testSuiteClassDict["suite"] - moduleName = testSuiteClassDict["module"] - className = testSuiteClassDict["class"] - - # add testsuite directory to sys.path such that the module can be loaded - sys.path.append(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + suiteName) - - if not systemTestEnv.printTestDescriptionsOnly: - aLogger.info("=================================================") - aLogger.info("Running Test for : ") - aLogger.info(" suite : " + suiteName) - aLogger.info(" module : " + moduleName) - aLogger.info(" class : " + className) - aLogger.info("=================================================") - - # dynamically loading a module and starting the test class - mod = __import__(moduleName) - theClass = getattr(mod, className) - instance = theClass(systemTestEnv) - instance.runTest() - print - - report(systemTestEnv).summarize() - - if not systemTestEnv.printTestDescriptionsOnly: - totalFailureCount = 0 - print - print "========================================================" - print " TEST REPORTS" - print "========================================================" - for systemTestResult in systemTestEnv.systemTestResultsList: - for key in sorted(systemTestResult.iterkeys()): - if key == "validation_status": - print key, " : " - testItemStatus = None - for validatedItem in sorted(systemTestResult[key].iterkeys()): - testItemStatus = systemTestResult[key][validatedItem] - print " ", validatedItem, " : ", testItemStatus - if "FAILED" == testItemStatus: - totalFailureCount += 1 - else: - print key, " : ", systemTestResult[key] - print - print "========================================================" - print - - print "========================================================" - print "Total failures count : " + str(totalFailureCount) - print "========================================================" - print - return totalFailureCount - - return -1 - -# ========================= -# main entry point -# ========================= - -sys.exit(main()) - - diff --git a/system_test/testcase_to_run_all.json b/system_test/testcase_to_run_all.json deleted file mode 100644 index 3e80a1f79a6..00000000000 --- a/system_test/testcase_to_run_all.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "ReplicaBasicTest" : [ - "testcase_0001", - "testcase_0002", - "testcase_0003", - "testcase_0004", - "testcase_0005", - "testcase_0006", - "testcase_0007", - "testcase_0008", - "testcase_0009", - "testcase_0010", - - "testcase_0021", - "testcase_0022", - "testcase_0023", - - "testcase_0101", - "testcase_0102", - "testcase_0103", - "testcase_0104", - "testcase_0105", - "testcase_0106", - "testcase_0107", - "testcase_0108", - "testcase_0109", - "testcase_0110", - - "testcase_10101", - "testcase_10102", - "testcase_10103", - "testcase_10104", - "testcase_10105", - "testcase_10106", - "testcase_10107", - "testcase_10108", - "testcase_10109", - "testcase_10110", - - "testcase_0111", - "testcase_0112", - "testcase_0113", - "testcase_0114", - "testcase_0115", - "testcase_0116", - "testcase_0117", - "testcase_0118", - - "testcase_0121", - "testcase_0122", - "testcase_0123", - "testcase_0124", - "testcase_0125", - "testcase_0126", - "testcase_0127", - - "testcase_0131", - "testcase_0132", - "testcase_0133", - "testcase_0134", - - "testcase_10131", - "testcase_10132", - "testcase_10133", - "testcase_10134", - - "testcase_0151", - "testcase_0152", - "testcase_0153", - "testcase_0154", - "testcase_0155", - "testcase_0156", - "testcase_0157", - "testcase_0158", - - "testcase_0201", - "testcase_0202", - "testcase_0203", - "testcase_0204", - "testcase_0205", - "testcase_0206", - "testcase_0207", - "testcase_0208", - - "testcase_0251", - "testcase_0252", - "testcase_0253", - "testcase_0254", - "testcase_0255", - "testcase_0256", - "testcase_0257", - "testcase_0258", - - "testcase_0301", - "testcase_0302", - "testcase_0303", - "testcase_0304", - "testcase_0305", - "testcase_0306", - "testcase_0307", - "testcase_0308", - - "testcase_4001", - "testcase_4002", - "testcase_4003", - "testcase_4004", - "testcase_4005", - "testcase_4006", - "testcase_4007", - "testcase_4008", - - "testcase_4011", - "testcase_4012", - "testcase_4013", - "testcase_4014", - "testcase_4015", - "testcase_4016", - "testcase_4017", - "testcase_4018", - - "testcase_9051" - ], - - "MirrorMakerTest" : [ - "testcase_5001", - "testcase_5002", - "testcase_5003", - "testcase_5004", - "testcase_5005", - "testcase_5006", - - "testcase_15001", - "testcase_15002", - "testcase_15003", - "testcase_15004", - "testcase_15005", - "testcase_15006" - ] -} diff --git a/system_test/testcase_to_run_all_replica.json b/system_test/testcase_to_run_all_replica.json deleted file mode 100644 index 34841f55c8c..00000000000 --- a/system_test/testcase_to_run_all_replica.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "ReplicaBasicTest" : [ - "testcase_0001", - "testcase_0002", - "testcase_0003", - "testcase_0004", - "testcase_0005", - "testcase_0006", - "testcase_0007", - "testcase_0008", - "testcase_0009", - "testcase_0010", - - "testcase_0021", - "testcase_0022", - "testcase_0023", - - "testcase_0101", - "testcase_0102", - "testcase_0103", - "testcase_0104", - "testcase_0105", - "testcase_0106", - "testcase_0107", - "testcase_0108", - "testcase_0109", - "testcase_0110", - - "testcase_10101", - "testcase_10102", - "testcase_10103", - "testcase_10104", - "testcase_10105", - "testcase_10106", - "testcase_10107", - "testcase_10108", - "testcase_10109", - "testcase_10110", - - "testcase_0111", - "testcase_0112", - "testcase_0113", - "testcase_0114", - "testcase_0115", - "testcase_0116", - "testcase_0117", - "testcase_0118", - - "testcase_0121", - "testcase_0122", - "testcase_0123", - "testcase_0124", - "testcase_0125", - "testcase_0126", - "testcase_0127", - - "testcase_0131", - "testcase_0132", - "testcase_0133", - "testcase_0134", - - "testcase_10131", - "testcase_10132", - "testcase_10133", - "testcase_10134", - - "testcase_0151", - "testcase_0152", - "testcase_0153", - "testcase_0154", - "testcase_0155", - "testcase_0156", - "testcase_0157", - "testcase_0158", - - "testcase_0201", - "testcase_0202", - "testcase_0203", - "testcase_0204", - "testcase_0205", - "testcase_0206", - "testcase_0207", - "testcase_0208", - - "testcase_0251", - "testcase_0252", - "testcase_0253", - "testcase_0254", - "testcase_0255", - "testcase_0256", - "testcase_0257", - "testcase_0258", - - "testcase_0301", - "testcase_0302", - "testcase_0303", - "testcase_0304", - "testcase_0305", - "testcase_0306", - "testcase_0307", - "testcase_0308", - - "testcase_4001", - "testcase_4002", - "testcase_4003", - "testcase_4004", - "testcase_4005", - "testcase_4006", - "testcase_4007", - "testcase_4008", - - "testcase_4011", - "testcase_4012", - "testcase_4013", - "testcase_4014", - "testcase_4015", - "testcase_4016", - "testcase_4017", - "testcase_4018", - - "testcase_9051" - ] -} diff --git a/system_test/testcase_to_run_sanity.json b/system_test/testcase_to_run_sanity.json deleted file mode 100644 index c6cf17ea690..00000000000 --- a/system_test/testcase_to_run_sanity.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "ReplicaBasicTest" : [ - "testcase_1" - ] -} diff --git a/system_test/testcase_to_skip.json b/system_test/testcase_to_skip.json deleted file mode 100644 index c24f83957c1..00000000000 --- a/system_test/testcase_to_skip.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "ReplicaBasicTest": [ "testcase_1" ] -} diff --git a/system_test/utils/__init__.py b/system_test/utils/__init__.py deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/system_test/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/system_test/utils/kafka_system_test_utils.py b/system_test/utils/kafka_system_test_utils.py deleted file mode 100644 index a9b73f7f383..00000000000 --- a/system_test/utils/kafka_system_test_utils.py +++ /dev/null @@ -1,2512 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# kafka_system_test_utils.py -# =================================== - -import datetime -import getpass -import hashlib -import inspect -import json -import logging -import os -import pprint -import re -import subprocess -import sys -import thread -import time -import traceback - -import system_test_utils -import metrics - -from datetime import datetime -from time import mktime - -# ==================================================================== -# Two logging formats are defined in system_test/system_test_runner.py -# ==================================================================== - -# 1. "namedLogger" is defined to log message in this format: -# "%(asctime)s - %(levelname)s - %(message)s %(name_of_class)s" -# usage: to log message and showing the class name of the message - -logger = logging.getLogger("namedLogger") -thisClassName = '(kafka_system_test_utils)' -d = {'name_of_class': thisClassName} - -# 2. "anonymousLogger" is defined to log message in this format: -# "%(asctime)s - %(levelname)s - %(message)s" -# usage: to log message without showing class name and it's appropriate -# for logging generic message such as "sleeping for 5 seconds" - -anonLogger = logging.getLogger("anonymousLogger") - - -# ===================================== -# Sample usage of getting testcase env -# ===================================== -def get_testcase_env(testcaseEnv): - anonLogger.info("================================================") - anonLogger.info("systemTestBaseDir : " + testcaseEnv.systemTestBaseDir) - anonLogger.info("testSuiteBaseDir : " + testcaseEnv.testSuiteBaseDir) - anonLogger.info("testCaseBaseDir : " + testcaseEnv.testCaseBaseDir) - anonLogger.info("testCaseLogsDir : " + testcaseEnv.testCaseLogsDir) - anonLogger.info("userDefinedEnvVarDict : (testcaseEnv.userDefinedEnvVarDict)") - anonLogger.info("================================================") - - -def get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, type): - - defaultLogDir = testcaseEnv.testCaseLogsDir + "/" + role + "-" + entityId - - # type is either "metrics" or "dashboards" or "default" - if type == "metrics": - return testcaseEnv.testCaseLogsDir + "/" + role + "-" + entityId + "/metrics" - elif type == "log_segments" : - return testcaseEnv.testCaseLogsDir + "/" + role + "-" + entityId + "/log_segments" - elif type == "default" : - return testcaseEnv.testCaseLogsDir + "/" + role + "-" + entityId - elif type == "dashboards": - return testcaseEnv.testCaseLogsDir + "/dashboards" - elif type == "config": - return testcaseEnv.testCaseBaseDir + "/config" - else: - logger.error("unrecognized log directory type : " + type, extra=d) - logger.error("returning default log dir : " + defaultLogDir, extra=d) - return defaultLogDir - - -def generate_testcase_log_dirs(systemTestEnv, testcaseEnv): - - testcasePathName = testcaseEnv.testCaseBaseDir - logger.debug("testcase pathname: " + testcasePathName, extra=d) - - if not os.path.exists(testcasePathName + "/config") : os.makedirs(testcasePathName + "/config") - if not os.path.exists(testcasePathName + "/logs") : os.makedirs(testcasePathName + "/logs") - if not os.path.exists(testcasePathName + "/dashboards") : os.makedirs(testcasePathName + "/dashboards") - - dashboardsPathName = testcasePathName + "/dashboards" - if not os.path.exists(dashboardsPathName) : os.makedirs(dashboardsPathName) - - for clusterEntityConfigDict in systemTestEnv.clusterEntityConfigDictList: - entityId = clusterEntityConfigDict["entity_id"] - role = clusterEntityConfigDict["role"] - - metricsPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics") - if not os.path.exists(metricsPathName) : os.makedirs(metricsPathName) - - # create the role directory under dashboards - dashboardsRoleDir = dashboardsPathName + "/" + role - if not os.path.exists(dashboardsRoleDir) : os.makedirs(dashboardsRoleDir) - - -def collect_logs_from_remote_hosts(systemTestEnv, testcaseEnv): - anonLogger.info("================================================") - anonLogger.info("collecting logs from remote machines") - anonLogger.info("================================================") - - testCaseBaseDir = testcaseEnv.testCaseBaseDir - tcConfigsList = testcaseEnv.testcaseConfigsList - - for clusterEntityConfigDict in systemTestEnv.clusterEntityConfigDictList: - hostname = clusterEntityConfigDict["hostname"] - entity_id = clusterEntityConfigDict["entity_id"] - role = clusterEntityConfigDict["role"] - kafkaHome = clusterEntityConfigDict["kafka_home"] - - logger.debug("entity_id : " + entity_id, extra=d) - logger.debug("hostname : " + hostname, extra=d) - logger.debug("role : " + role, extra=d) - - configPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "config") - metricsPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "metrics") - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "default") - rmtLogPathName = logPathName - rmtMetricsPathName = metricsPathName - - if hostname != "localhost": - rmtConfigPathName = replace_kafka_home(configPathName, kafkaHome) - rmtMetricsPathName = replace_kafka_home(metricsPathName, kafkaHome) - rmtLogPathName = replace_kafka_home(logPathName, kafkaHome) - - # ============================== - # collect entity log file - # ============================== - cmdList = ["scp", - hostname + ":" + rmtLogPathName + "/*", - logPathName] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - # ============================== - # collect entity metrics file - # ============================== - cmdList = ["scp", - hostname + ":" + rmtMetricsPathName + "/*", - metricsPathName] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - # ============================== - # collect broker log segment file - # ============================== - if role == "broker": - dataLogPathName = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", entity_id, "log.dir") - - cmdList = ["scp -r", - hostname + ":" + dataLogPathName, - logPathName] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - # ============================== - # collect ZK log - # ============================== - if role == "zookeeper": - dataLogPathName = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", entity_id, "dataDir") - - cmdList = ["scp -r", - hostname + ":" + dataLogPathName, - logPathName] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - # ============================== - # collect dashboards file - # ============================== - dashboardsPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "dashboards") - rmtDashboardsPathName = dashboardsPathName - - if hostname != "localhost": - rmtDashboardsPathName = replace_kafka_home(dashboardsPathName, kafkaHome) - - cmdList = ["scp", - hostname + ":" + rmtDashboardsPathName + "/*", - dashboardsPathName] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - -def generate_testcase_log_dirs_in_remote_hosts(systemTestEnv, testcaseEnv): - testCaseBaseDir = testcaseEnv.testCaseBaseDir - - for clusterEntityConfigDict in systemTestEnv.clusterEntityConfigDictList: - hostname = clusterEntityConfigDict["hostname"] - entity_id = clusterEntityConfigDict["entity_id"] - role = clusterEntityConfigDict["role"] - kafkaHome = clusterEntityConfigDict["kafka_home"] - - logger.debug("entity_id : " + entity_id, extra=d) - logger.debug("hostname : " + hostname, extra=d) - logger.debug("role : " + role, extra=d) - - configPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "config") - metricsPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "metrics") - dashboardsPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entity_id, "dashboards") - - if hostname != "localhost": - configPathName = replace_kafka_home(configPathName, kafkaHome) - metricsPathName = replace_kafka_home(metricsPathName, kafkaHome) - dashboardsPathName = replace_kafka_home(dashboardsPathName, kafkaHome) - - cmdList = ["ssh " + hostname, - "'mkdir -p", - configPathName, - metricsPathName, - dashboardsPathName + "'"] - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - -def init_entity_props(systemTestEnv, testcaseEnv): - clusterConfigsList = systemTestEnv.clusterEntityConfigDictList - testcaseConfigsList = testcaseEnv.testcaseConfigsList - testcasePathName = testcaseEnv.testCaseBaseDir - - try: - # consumer config / log files location - consEntityIdList = system_test_utils.get_data_from_list_of_dicts( \ - clusterConfigsList, "role", "console_consumer", "entity_id") - consLogList = system_test_utils.get_data_from_list_of_dicts( \ - testcaseConfigsList, "entity_id", consEntityIdList[0], "log_filename") - consLogPathname = testcasePathName + "/logs/" + consLogList[0] - consCfgList = system_test_utils.get_data_from_list_of_dicts( \ - testcaseConfigsList, "entity_id", consEntityIdList[0], "config_filename") - consCfgPathname = testcasePathName + "/config/" + consCfgList[0] - - # producer config / log files location - prodEntityIdList = system_test_utils.get_data_from_list_of_dicts( \ - clusterConfigsList, "role", "producer_performance", "entity_id") - prodLogList = system_test_utils.get_data_from_list_of_dicts( \ - testcaseConfigsList, "entity_id", prodEntityIdList[0], "log_filename") - prodLogPathname = testcasePathName + "/logs/" + prodLogList[0] - prodCfgList = system_test_utils.get_data_from_list_of_dicts( \ - testcaseConfigsList, "entity_id", prodEntityIdList[0], "config_filename") - prodCfgPathname = testcasePathName + "/config/" + prodCfgList[0] - except: - logger.error("Failed to initialize entity config/log path names: possibly mismatched " \ - + "number of entities in cluster_config.json & testcase_n_properties.json", extra=d) - raise - - testcaseEnv.userDefinedEnvVarDict["consumerLogPathName"] = consLogPathname - testcaseEnv.userDefinedEnvVarDict["consumerConfigPathName"] = consCfgPathname - testcaseEnv.userDefinedEnvVarDict["producerLogPathName"] = prodLogPathname - testcaseEnv.userDefinedEnvVarDict["producerConfigPathName"] = prodCfgPathname - - -def copy_file_with_dict_values(srcFile, destFile, dictObj, keyValToAddDict): - infile = open(srcFile, "r") - inlines = infile.readlines() - infile.close() - - outfile = open(destFile, 'w') - for line in inlines: - for key in dictObj.keys(): - if (line.startswith(key + "=")): - line = key + "=" + dictObj[key] + "\n" - outfile.write(line) - - if (keyValToAddDict is not None): - for key in sorted(keyValToAddDict.iterkeys()): - line = key + "=" + keyValToAddDict[key] + "\n" - outfile.write(line) - - outfile.close() - -def generate_overriden_props_files(testsuitePathname, testcaseEnv, systemTestEnv): - logger.info("calling generate_properties_files", extra=d) - - clusterConfigsList = systemTestEnv.clusterEntityConfigDictList - tcPathname = testcaseEnv.testCaseBaseDir - tcConfigsList = testcaseEnv.testcaseConfigsList - - cfgTemplatePathname = os.path.abspath(testsuitePathname + "/config") - cfgDestPathname = os.path.abspath(tcPathname + "/config") - logger.info("config template (source) pathname : " + cfgTemplatePathname, extra=d) - logger.info("testcase config (dest) pathname : " + cfgDestPathname, extra=d) - - # loop through all zookeepers (if more than 1) to retrieve host and clientPort - # to construct a zookeeper.connect str for broker in the form of: - # zookeeper.connect=:,:,... - testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] = "" - testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] = "" - testcaseEnv.userDefinedEnvVarDict["sourceZkEntityIdList"] = [] - testcaseEnv.userDefinedEnvVarDict["targetZkEntityIdList"] = [] - testcaseEnv.userDefinedEnvVarDict["sourceZkHostPortDict"] = {} - testcaseEnv.userDefinedEnvVarDict["targetZkHostPortDict"] = {} - testcaseEnv.userDefinedEnvVarDict["sourceBrokerEntityIdList"] = [] - testcaseEnv.userDefinedEnvVarDict["targetBrokerEntityIdList"] = [] - testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] = "" - testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] = "" - - # update zookeeper cluster info into "testcaseEnv.userDefinedEnvVarDict" - zkDictList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigsList, "role", "zookeeper") - - for zkDict in zkDictList: - entityID = zkDict["entity_id"] - hostname = zkDict["hostname"] - clusterName = zkDict["cluster_name"] - clientPortList = system_test_utils.get_data_from_list_of_dicts(tcConfigsList, "entity_id", entityID, "clientPort") - clientPort = clientPortList[0] - - if clusterName == "source": - # update source cluster zookeeper entities - testcaseEnv.userDefinedEnvVarDict["sourceZkEntityIdList"].append(entityID) - if ( len(testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] = hostname + ":" + clientPort - else: - testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] += "," + hostname + ":" + clientPort - - # generate these strings for zookeeper config: - # server.1=host1:2180:2182 - # server.2=host2:2180:2182 - zkClusterSize = len(testcaseEnv.userDefinedEnvVarDict["sourceZkHostPortDict"]) - zkClusterId = str(zkClusterSize + 1) - key = "server." + zkClusterId - val = hostname + ":" + str(int(clientPort) - 1) + ":" + str(int(clientPort) + 1) - testcaseEnv.userDefinedEnvVarDict["sourceZkHostPortDict"][key] = val - - elif clusterName == "target": - # update target cluster zookeeper entities - testcaseEnv.userDefinedEnvVarDict["targetZkEntityIdList"].append(entityID) - if ( len(testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] = hostname + ":" + clientPort - else: - testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] += "," + hostname + ":" + clientPort - - # generate these strings for zookeeper config: - # server.1=host1:2180:2182 - # server.2=host2:2180:2182 - zkClusterSize = len(testcaseEnv.userDefinedEnvVarDict["targetZkHostPortDict"]) - zkClusterId = str(zkClusterSize + 1) - key = "server." + zkClusterId - val = hostname + ":" + str(int(clientPort) - 1) + ":" + str(int(clientPort) + 1) - testcaseEnv.userDefinedEnvVarDict["targetZkHostPortDict"][key] = val - - else: - logger.error("Invalid cluster name: " + clusterName, extra=d) - raise Exception("Invalid cluster name : " + clusterName) - sys.exit(1) - - # update broker cluster info into "testcaseEnv.userDefinedEnvVarDict" - brokerDictList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigsList, "role", "broker") - for brokerDict in brokerDictList: - entityID = brokerDict["entity_id"] - hostname = brokerDict["hostname"] - clusterName = brokerDict["cluster_name"] - portList = system_test_utils.get_data_from_list_of_dicts(tcConfigsList, "entity_id", entityID, "port") - port = portList[0] - - if clusterName == "source": - if ( len(testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] = hostname + ":" + port - else: - testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] += "," + hostname + ":" + port - elif clusterName == "target": - if ( len(testcaseEnv.userDefinedEnvVarDict["targetBrokerList"]) == 0 ): - testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] = hostname + ":" + port - else: - testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] += "," + hostname + ":" + port - else: - logger.error("Invalid cluster name: " + clusterName, extra=d) - raise Exception("Invalid cluster name : " + clusterName) - sys.exit(1) - - # for each entity in the cluster config - for clusterCfg in clusterConfigsList: - cl_entity_id = clusterCfg["entity_id"] - - # loop through testcase config list 'tcConfigsList' for a matching cluster entity_id - for tcCfg in tcConfigsList: - if (tcCfg["entity_id"] == cl_entity_id): - - # copy the associated .properties template, update values, write to testcase_/config - - if (clusterCfg["role"] == "broker"): - brokerVersion = "0.8" - try: - brokerVersion = tcCfg["version"] - except: - pass - - if (brokerVersion == "0.7"): - if clusterCfg["cluster_name"] == "source": - tcCfg["zk.connect"] = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - else: - logger.error("Unknown cluster name for 0.7: " + clusterName, extra=d) - sys.exit(1) - else: - if clusterCfg["cluster_name"] == "source": - tcCfg["zookeeper.connect"] = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - elif clusterCfg["cluster_name"] == "target": - tcCfg["zookeeper.connect"] = testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] - else: - logger.error("Unknown cluster name: " + clusterName, extra=d) - sys.exit(1) - - addedCSVConfig = {} - addedCSVConfig["kafka.csv.metrics.dir"] = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", clusterCfg["entity_id"], "metrics") - addedCSVConfig["kafka.metrics.polling.interval.secs"] = "5" - addedCSVConfig["kafka.metrics.reporters"] = "kafka.metrics.KafkaCSVMetricsReporter" - addedCSVConfig["kafka.csv.metrics.reporter.enabled"] = "true" - addedCSVConfig["listeners"] = "PLAINTEXT://localhost:"+tcCfg["port"] - - if brokerVersion == "0.7": - addedCSVConfig["brokerid"] = tcCfg["brokerid"] - - copy_file_with_dict_values(cfgTemplatePathname + "/server.properties", - cfgDestPathname + "/" + tcCfg["config_filename"], tcCfg, addedCSVConfig) - - elif ( clusterCfg["role"] == "zookeeper"): - if clusterCfg["cluster_name"] == "source": - copy_file_with_dict_values(cfgTemplatePathname + "/zookeeper.properties", - cfgDestPathname + "/" + tcCfg["config_filename"], tcCfg, - testcaseEnv.userDefinedEnvVarDict["sourceZkHostPortDict"]) - elif clusterCfg["cluster_name"] == "target": - copy_file_with_dict_values(cfgTemplatePathname + "/zookeeper.properties", - cfgDestPathname + "/" + tcCfg["config_filename"], tcCfg, - testcaseEnv.userDefinedEnvVarDict["targetZkHostPortDict"]) - else: - logger.error("Unknown cluster name: " + clusterName, extra=d) - sys.exit(1) - - elif ( clusterCfg["role"] == "mirror_maker"): - tcCfg["metadata.broker.list"] = testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] - tcCfg["bootstrap.servers"] = testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] # for new producer - copy_file_with_dict_values(cfgTemplatePathname + "/mirror_producer.properties", - cfgDestPathname + "/" + tcCfg["mirror_producer_config_filename"], tcCfg, None) - - # update zookeeper.connect with the zk entities specified in cluster_config.json - tcCfg["zookeeper.connect"] = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - copy_file_with_dict_values(cfgTemplatePathname + "/mirror_consumer.properties", - cfgDestPathname + "/" + tcCfg["mirror_consumer_config_filename"], tcCfg, None) - - else: - logger.debug("UNHANDLED role " + clusterCfg["role"], extra=d) - - # scp updated config files to remote hosts - scp_file_to_remote_host(clusterConfigsList, testcaseEnv) - - -def scp_file_to_remote_host(clusterEntityConfigDictList, testcaseEnv): - - testcaseConfigsList = testcaseEnv.testcaseConfigsList - - for clusterEntityConfigDict in clusterEntityConfigDictList: - hostname = clusterEntityConfigDict["hostname"] - kafkaHome = clusterEntityConfigDict["kafka_home"] - localTestcasePathName = testcaseEnv.testCaseBaseDir - remoteTestcasePathName = localTestcasePathName - - if hostname != "localhost": - remoteTestcasePathName = replace_kafka_home(localTestcasePathName, kafkaHome) - - cmdStr = "scp " + localTestcasePathName + "/config/* " + hostname + ":" + remoteTestcasePathName + "/config" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - -def start_zookeepers(systemTestEnv, testcaseEnv): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - zkEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "zookeeper", "entity_id") - - for zkEntityId in zkEntityIdList: - configPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "zookeeper", zkEntityId, "config") - configFile = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", zkEntityId, "config_filename") - clientPort = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", zkEntityId, "clientPort") - dataDir = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", zkEntityId, "dataDir") - hostname = system_test_utils.get_data_by_lookup_keyval( - clusterEntityConfigDictList, "entity_id", zkEntityId, "hostname") - minusOnePort = str(int(clientPort) - 1) - plusOnePort = str(int(clientPort) + 1) - - # read configFile to find out the id of the zk and create the file "myid" - infile = open(configPathName + "/" + configFile, "r") - inlines = infile.readlines() - infile.close() - - for line in inlines: - if line.startswith("server.") and hostname + ":" + minusOnePort + ":" + plusOnePort in line: - # server.1=host1:2187:2189 - matchObj = re.match("server\.(.*?)=.*", line) - zkServerId = matchObj.group(1) - - cmdStr = "ssh " + hostname + " 'mkdir -p " + dataDir + "; echo " + zkServerId + " > " + dataDir + "/myid'" - logger.debug("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - pass # dummy loop to wait until producer is completed - - time.sleep(2) - start_entity_in_background(systemTestEnv, testcaseEnv, zkEntityId) - -def start_brokers(systemTestEnv, testcaseEnv): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "broker", "entity_id") - - for brokerEntityId in brokerEntityIdList: - start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId) - -def start_console_consumers(systemTestEnv, testcaseEnv, onlyThisEntityId=None): - - if onlyThisEntityId is not None: - start_entity_in_background(systemTestEnv, testcaseEnv, onlyThisEntityId) - else: - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - consoleConsumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "console_consumer", "entity_id") - for entityId in consoleConsumerEntityIdList: - start_entity_in_background(systemTestEnv, testcaseEnv, entityId) - - -def start_mirror_makers(systemTestEnv, testcaseEnv, onlyThisEntityId=None): - - if onlyThisEntityId is not None: - start_entity_in_background(systemTestEnv, testcaseEnv, onlyThisEntityId) - else: - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "mirror_maker", "entity_id") - - for brokerEntityId in brokerEntityIdList: - start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId) - - -def get_broker_shutdown_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict): - - logger.info("looking up broker shutdown...", extra=d) - - # keep track of broker related data in this dict such as broker id, - # entity id and timestamp and return it to the caller function - shutdownBrokerDict = {} - - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "broker", "entity_id") - - for brokerEntityId in brokerEntityIdList: - - hostname = system_test_utils.get_data_by_lookup_keyval( - clusterEntityConfigDictList, "entity_id", brokerEntityId, "hostname") - logFile = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", brokerEntityId, "log_filename") - - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") - cmdStrList = ["ssh " + hostname, - "\"grep -i -h '" + leaderAttributesDict["BROKER_SHUT_DOWN_COMPLETED_MSG"] + "' ", - logPathName + "/" + logFile + " | ", - "sort | tail -1\""] - cmdStr = " ".join(cmdStrList) - - logger.debug("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - - line = line.rstrip('\n') - - if leaderAttributesDict["BROKER_SHUT_DOWN_COMPLETED_MSG"] in line: - logger.debug("found the log line : " + line, extra=d) - try: - matchObj = re.match(leaderAttributesDict["REGX_BROKER_SHUT_DOWN_COMPLETED_PATTERN"], line) - datetimeStr = matchObj.group(1) - datetimeObj = datetime.strptime(datetimeStr, "%Y-%m-%d %H:%M:%S,%f") - unixTs = time.mktime(datetimeObj.timetuple()) + 1e-6*datetimeObj.microsecond - #print "{0:.3f}".format(unixTs) - - # update shutdownBrokerDict when - # 1. shutdownBrokerDict has no logline entry - # 2. shutdownBrokerDict has existing logline enty but found another logline with more recent timestamp - if (len(shutdownBrokerDict) > 0 and shutdownBrokerDict["timestamp"] < unixTs) or (len(shutdownBrokerDict) == 0): - shutdownBrokerDict["timestamp"] = unixTs - shutdownBrokerDict["brokerid"] = matchObj.group(2) - shutdownBrokerDict["hostname"] = hostname - shutdownBrokerDict["entity_id"] = brokerEntityId - logger.debug("brokerid: [" + shutdownBrokerDict["brokerid"] + \ - "] entity_id: [" + shutdownBrokerDict["entity_id"] + "]", extra=d) - except: - logger.error("ERROR [unable to find matching leader details: Has the matching pattern changed?]", extra=d) - raise - - return shutdownBrokerDict - - -def get_leader_elected_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict): - - logger.debug("looking up leader...", extra=d) - - # keep track of leader related data in this dict such as broker id, - # entity id and timestamp and return it to the caller function - leaderDict = {} - - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( \ - clusterEntityConfigDictList, "role", "broker", "entity_id") - - for brokerEntityId in brokerEntityIdList: - - hostname = system_test_utils.get_data_by_lookup_keyval( \ - clusterEntityConfigDictList, "entity_id", brokerEntityId, "hostname") - kafkaHome = system_test_utils.get_data_by_lookup_keyval( \ - clusterEntityConfigDictList, "entity_id", brokerEntityId, "kafka_home") - logFile = system_test_utils.get_data_by_lookup_keyval( \ - testcaseEnv.testcaseConfigsList, "entity_id", brokerEntityId, "log_filename") - - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") - - if hostname != "localhost": - logPathName = replace_kafka_home(logPathName, kafkaHome) - - cmdStrList = ["ssh " + hostname, - "\"grep -i -h '" + leaderAttributesDict["LEADER_ELECTION_COMPLETED_MSG"] + "' ", - logPathName + "/" + logFile + " | ", - "sort | tail -1\""] - cmdStr = " ".join(cmdStrList) - - logger.debug("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - - line = line.rstrip('\n') - - if leaderAttributesDict["LEADER_ELECTION_COMPLETED_MSG"] in line: - logger.debug("found the log line : " + line, extra=d) - try: - matchObj = re.match(leaderAttributesDict["REGX_LEADER_ELECTION_PATTERN"], line) - datetimeStr = matchObj.group(1) - datetimeObj = datetime.strptime(datetimeStr, "%Y-%m-%d %H:%M:%S,%f") - unixTs = time.mktime(datetimeObj.timetuple()) + 1e-6*datetimeObj.microsecond - #print "{0:.3f}".format(unixTs) - - # update leaderDict when - # 1. leaderDict has no logline entry - # 2. leaderDict has existing logline entry but found another logline with more recent timestamp - if (len(leaderDict) > 0 and leaderDict["timestamp"] < unixTs) or (len(leaderDict) == 0): - leaderDict["timestamp"] = unixTs - leaderDict["brokerid"] = matchObj.group(2) - leaderDict["topic"] = matchObj.group(3) - leaderDict["partition"] = matchObj.group(4) - leaderDict["entity_id"] = brokerEntityId - leaderDict["hostname"] = hostname - logger.debug("brokerid: [" + leaderDict["brokerid"] + "] entity_id: [" + leaderDict["entity_id"] + "]", extra=d) - except: - logger.error("ERROR [unable to find matching leader details: Has the matching pattern changed?]", extra=d) - raise - #else: - # logger.debug("unmatched line found [" + line + "]", extra=d) - - return leaderDict - - -def start_entity_in_background(systemTestEnv, testcaseEnv, entityId): - - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - # cluster configurations: - hostname = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "hostname") - role = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "role") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "java_home") - jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "jmx_port") - clusterName = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "cluster_name") - - # testcase configurations: - testcaseConfigsList = testcaseEnv.testcaseConfigsList - clientPort = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "clientPort") - configFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "config_filename") - logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename") - - useNewProducer = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "new-producer") - mmConsumerConfigFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, - "mirror_consumer_config_filename") - mmProducerConfigFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, - "mirror_producer_config_filename") - - logger.info("starting " + role + " in host [" + hostname + "] on client port [" + clientPort + "]", extra=d) - - configPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "config") - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "default") - - if hostname != "localhost": - configPathName = replace_kafka_home(configPathName, kafkaHome) - logPathName = replace_kafka_home(logPathName, kafkaHome) - - if role == "zookeeper": - cmdList = ["ssh " + hostname, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaHome + "/bin/zookeeper-server-start.sh ", - configPathName + "/" + configFile + " &> ", - logPathName + "/" + logFile + " & echo pid:$! > ", - logPathName + "/entity_" + entityId + "_pid'"] - - elif role == "broker": - cmdList = ["ssh " + hostname, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - "KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%s/config/log4j.properties" % kafkaHome, - kafkaHome + "/bin/kafka-run-class.sh kafka.Kafka", - configPathName + "/" + configFile + " >> ", - logPathName + "/" + logFile + " & echo pid:$! > ", - logPathName + "/entity_" + entityId + "_pid'"] - - elif role == "mirror_maker": - if useNewProducer.lower() == "true": - cmdList = ["ssh " + hostname, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaHome + "/bin/kafka-run-class.sh kafka.tools.MirrorMaker", - "--consumer.config " + configPathName + "/" + mmConsumerConfigFile, - "--producer.config " + configPathName + "/" + mmProducerConfigFile, - "--new.producer", - "--whitelist=\".*\" >> ", - logPathName + "/" + logFile + " & echo pid:$! > ", - logPathName + "/entity_" + entityId + "_pid'"] - else: - cmdList = ["ssh " + hostname, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaHome + "/bin/kafka-run-class.sh kafka.tools.MirrorMaker", - "--consumer.config " + configPathName + "/" + mmConsumerConfigFile, - "--producer.config " + configPathName + "/" + mmProducerConfigFile, - "--whitelist=\".*\" >> ", - logPathName + "/" + logFile + " & echo pid:$! > ", - logPathName + "/entity_" + entityId + "_pid'"] - - elif role == "console_consumer": - clusterToConsumeFrom = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "cluster_name") - numTopicsForAutoGenString = -1 - try: - numTopicsForAutoGenString = int(testcaseEnv.testcaseArgumentsDict["num_topics_for_auto_generated_string"]) - except: - pass - - topic = "" - if numTopicsForAutoGenString < 0: - topic = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "topic") - else: - topic = generate_topics_string("topic", numTopicsForAutoGenString) - - # update this variable and will be used by data validation functions - testcaseEnv.consumerTopicsString = topic - - # 2. consumer timeout - timeoutMs = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer-timeout-ms") - - # 3. consumer formatter - formatterOption = "" - try: - formatterOption = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "formatter") - except: - pass - - # 4. consumer config - consumerProperties = {} - consumerProperties["consumer.timeout.ms"] = timeoutMs - try: - groupOption = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "group.id") - consumerProperties["group.id"] = groupOption - except: - pass - - props_file_path=write_consumer_properties(consumerProperties) - scpCmdStr = "scp "+ props_file_path +" "+ hostname + ":/tmp/" - logger.debug("executing command [" + scpCmdStr + "]", extra=d) - system_test_utils.sys_call(scpCmdStr) - - if len(formatterOption) > 0: - formatterOption = " --formatter " + formatterOption + " " - - # get zookeeper connect string - zkConnectStr = "" - if clusterName == "source": - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - elif clusterName == "target": - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] - else: - logger.error("Invalid cluster name : " + clusterName, extra=d) - sys.exit(1) - cmdList = ["ssh " + hostname, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaHome + "/bin/kafka-run-class.sh kafka.tools.ConsoleConsumer", - "--zookeeper " + zkConnectStr, - "--topic " + topic, - "--consumer.config /tmp/consumer.properties", - "--csv-reporter-enabled", - formatterOption, - "--from-beginning", - " >> " + logPathName + "/" + logFile + " & echo pid:$! > ", - logPathName + "/entity_" + entityId + "_pid'"] - - cmdStr = " ".join(cmdList) - - logger.debug("executing command: [" + cmdStr + "]", extra=d) - system_test_utils.async_sys_call(cmdStr) - logger.info("sleeping for 5 seconds.", extra=d) - time.sleep(5) - - pidCmdStr = "ssh " + hostname + " 'cat " + logPathName + "/entity_" + entityId + "_pid' 2> /dev/null" - logger.debug("executing command: [" + pidCmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) - - # keep track of the remote entity pid in a dictionary - for line in subproc.stdout.readlines(): - if line.startswith("pid"): - line = line.rstrip('\n') - logger.debug("found pid line: [" + line + "]", extra=d) - tokens = line.split(':') - if role == "zookeeper": - testcaseEnv.entityZkParentPidDict[entityId] = tokens[1] - elif role == "broker": - testcaseEnv.entityBrokerParentPidDict[entityId] = tokens[1] - elif role == "mirror_maker": - testcaseEnv.entityMirrorMakerParentPidDict[entityId] = tokens[1] - elif role == "console_consumer": - testcaseEnv.entityConsoleConsumerParentPidDict[entityId] = tokens[1] - - -def start_console_consumer(systemTestEnv, testcaseEnv): - - clusterList = systemTestEnv.clusterEntityConfigDictList - - consumerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterList, "role", "console_consumer") - for consumerConfig in consumerConfigList: - host = consumerConfig["hostname"] - entityId = consumerConfig["entity_id"] - jmxPort = consumerConfig["jmx_port"] - role = consumerConfig["role"] - clusterName = consumerConfig["cluster_name"] - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterList, "entity_id", entityId, "java_home") - jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterList, "entity_id", entityId, "jmx_port") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - logger.info("starting console consumer", extra=d) - - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", entityId, "default") - metricsDir = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", entityId, "metrics"), - - if host != "localhost": - consumerLogPath = replace_kafka_home(consumerLogPath, kafkaHome) - #metricsDir = replace_kafka_home(metricsDir, kafkaHome) - - consumerLogPathName = consumerLogPath + "/console_consumer.log" - - testcaseEnv.userDefinedEnvVarDict["consumerLogPathName"] = consumerLogPathName - - # testcase configurations: - testcaseList = testcaseEnv.testcaseConfigsList - - # get testcase arguments - # 1. topics - numTopicsForAutoGenString = -1 - try: - numTopicsForAutoGenString = int(testcaseEnv.testcaseArgumentsDict["num_topics_for_auto_generated_string"]) - except: - pass - - topic = "" - if numTopicsForAutoGenString < 0: - topic = system_test_utils.get_data_by_lookup_keyval(testcaseList, "entity_id", entityId, "topic") - else: - topic = generate_topics_string("topic", numTopicsForAutoGenString) - - # update this variable and will be used by data validation functions - testcaseEnv.consumerTopicsString = topic - - # 2. consumer timeout - timeoutMs = system_test_utils.get_data_by_lookup_keyval(testcaseList, "entity_id", entityId, "consumer-timeout-ms") - - # 3. consumer formatter - formatterOption = "" - try: - formatterOption = system_test_utils.get_data_by_lookup_keyval(testcaseList, "entity_id", entityId, "formatter") - except: - pass - - if len(formatterOption) > 0: - formatterOption = " --formatter " + formatterOption + " " - - # get zookeeper connect string - zkConnectStr = "" - if clusterName == "source": - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - elif clusterName == "target": - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] - else: - logger.error("Invalid cluster name : " + clusterName, extra=d) - sys.exit(1) - - consumerProperties = {} - consumerProperties["consumer.timeout.ms"] = timeoutMs - props_file_path=write_consumer_properties(consumerProperties) - scpCmdStr = "scp "+ props_file_path +" "+ host + ":/tmp/" - logger.debug("executing command [" + scpCmdStr + "]", extra=d) - system_test_utils.sys_call(scpCmdStr) - - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaRunClassBin + " kafka.tools.ConsoleConsumer", - "--zookeeper " + zkConnectStr, - "--topic " + topic, - "--consumer.config /tmp/consumer.properties", - "--csv-reporter-enabled", - #"--metrics-dir " + metricsDir, - formatterOption, - "--from-beginning ", - " >> " + consumerLogPathName, - " & echo pid:$! > " + consumerLogPath + "/entity_" + entityId + "_pid'"] - - cmdStr = " ".join(cmdList) - - logger.debug("executing command: [" + cmdStr + "]", extra=d) - system_test_utils.async_sys_call(cmdStr) - - pidCmdStr = "ssh " + host + " 'cat " + consumerLogPath + "/entity_" + entityId + "_pid'" - logger.debug("executing command: [" + pidCmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) - - # keep track of the remote entity pid in a dictionary - for line in subproc.stdout.readlines(): - if line.startswith("pid"): - line = line.rstrip('\n') - logger.debug("found pid line: [" + line + "]", extra=d) - tokens = line.split(':') - testcaseEnv.consumerHostParentPidDict[host] = tokens[1] - -def start_producer_performance(systemTestEnv, testcaseEnv, kafka07Client): - - entityConfigList = systemTestEnv.clusterEntityConfigDictList - testcaseConfigsList = testcaseEnv.testcaseConfigsList - brokerListStr = "" - - # construct "broker-list" for producer - for entityConfig in entityConfigList: - entityRole = entityConfig["role"] - if entityRole == "broker": - hostname = entityConfig["hostname"] - entityId = entityConfig["entity_id"] - port = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "port") - - producerConfigList = system_test_utils.get_dict_from_list_of_dicts(entityConfigList, "role", "producer_performance") - for producerConfig in producerConfigList: - host = producerConfig["hostname"] - entityId = producerConfig["entity_id"] - jmxPort = producerConfig["jmx_port"] - role = producerConfig["role"] - - thread.start_new_thread(start_producer_in_thread, (testcaseEnv, entityConfigList, producerConfig, kafka07Client)) - logger.debug("calling testcaseEnv.lock.acquire()", extra=d) - testcaseEnv.lock.acquire() - testcaseEnv.numProducerThreadsRunning += 1 - logger.debug("testcaseEnv.numProducerThreadsRunning : " + str(testcaseEnv.numProducerThreadsRunning), extra=d) - time.sleep(1) - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - -def generate_topics_string(topicPrefix, numOfTopics): - # return a topics string in the following format: - # _0001,_0002,... - # eg. "topic_0001,topic_0002,...,topic_xxxx" - - topicsStr = "" - counter = 1 - idx = "1" - while counter <= numOfTopics: - if counter <= 9: - idx = "000" + str(counter) - elif counter <= 99: - idx = "00" + str(counter) - elif counter <= 999: - idx = "0" + str(counter) - elif counter <= 9999: - idx = str(counter) - else: - raise Exception("Error: no. of topics must be under 10000 - current topics count : " + counter) - - if len(topicsStr) == 0: - topicsStr = topicPrefix + "_" + idx - else: - topicsStr = topicsStr + "," + topicPrefix + "_" + idx - - counter += 1 - return topicsStr - -def start_producer_in_thread(testcaseEnv, entityConfigList, producerConfig, kafka07Client): - host = producerConfig["hostname"] - entityId = producerConfig["entity_id"] - jmxPort = producerConfig["jmx_port"] - role = producerConfig["role"] - clusterName = producerConfig["cluster_name"] - kafkaHome = system_test_utils.get_data_by_lookup_keyval(entityConfigList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(entityConfigList, "entity_id", entityId, "java_home") - jmxPort = system_test_utils.get_data_by_lookup_keyval(entityConfigList, "entity_id", entityId, "jmx_port") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - # first keep track of its pid - testcaseEnv.producerHostParentPidDict[entityId] = os.getpid() - - # get optional testcase arguments - numTopicsForAutoGenString = -1 - try: - numTopicsForAutoGenString = int(testcaseEnv.testcaseArgumentsDict["num_topics_for_auto_generated_string"]) - except: - pass - - # testcase configurations: - testcaseConfigsList = testcaseEnv.testcaseConfigsList - topic = "" - if numTopicsForAutoGenString < 0: - topic = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "topic") - else: - topic = generate_topics_string("topic", numTopicsForAutoGenString) - - # update this variable and will be used by data validation functions - testcaseEnv.producerTopicsString = topic - - threads = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "threads") - compCodec = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "compression-codec") - messageSize = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "message-size") - noMsgPerBatch = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "message") - requestNumAcks = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "request-num-acks") - syncMode = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "sync") - useNewProducer = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "new-producer") - retryBackoffMs = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer-retry-backoff-ms") - numOfRetries = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer-num-retries") - - # for optional properties in testcase_xxxx_properties.json, - # check the length of returned value for those properties: - if len(retryBackoffMs) == 0: # no setting for "producer-retry-backoff-ms" - retryBackoffMs = "100" # default - if len(numOfRetries) == 0: # no setting for "producer-num-retries" - numOfRetries = "3" # default - - brokerListStr = "" - if clusterName == "source": - brokerListStr = testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] - elif clusterName == "target": - brokerListStr = testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] - else: - logger.error("Unknown cluster name: " + clusterName, extra=d) - sys.exit(1) - - logger.info("starting producer preformance", extra=d) - - producerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "producer_performance", entityId, "default") - metricsDir = get_testcase_config_log_dir_pathname(testcaseEnv, "producer_performance", entityId, "metrics") - - if host != "localhost": - producerLogPath = replace_kafka_home(producerLogPath, kafkaHome) - metricsDir = replace_kafka_home(metricsDir, kafkaHome) - - producerLogPathName = producerLogPath + "/producer_performance.log" - - testcaseEnv.userDefinedEnvVarDict["producerLogPathName"] = producerLogPathName - - counter = 0 - producerSleepSec = int(testcaseEnv.testcaseArgumentsDict["sleep_seconds_between_producer_calls"]) - - boolArgumentsStr = "" - if syncMode.lower() == "true": - boolArgumentsStr = boolArgumentsStr + " --sync" - if useNewProducer.lower() == "true": - boolArgumentsStr = boolArgumentsStr + " --new-producer" - - # keep calling producer until signaled to stop by: - # testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] - while 1: - logger.debug("calling testcaseEnv.lock.acquire()", extra=d) - testcaseEnv.lock.acquire() - if not testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"]: - initMsgId = counter * int(noMsgPerBatch) - - logger.info("#### [producer thread] status of stopBackgroundProducer : [False] => producing [" \ - + str(noMsgPerBatch) + "] messages with starting message id : [" + str(initMsgId) + "]", extra=d) - - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - "KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%s/config/test-log4j.properties" % kafkaHome, - kafkaRunClassBin + " kafka.tools.ProducerPerformance", - "--broker-list " + brokerListStr, - "--initial-message-id " + str(initMsgId), - "--messages " + noMsgPerBatch, - "--topics " + topic, - "--threads " + threads, - "--compression-codec " + compCodec, - "--message-size " + messageSize, - "--request-num-acks " + requestNumAcks, - "--producer-retry-backoff-ms " + retryBackoffMs, - "--producer-num-retries " + numOfRetries, - "--csv-reporter-enabled", - "--metrics-dir " + metricsDir, - boolArgumentsStr, - " >> " + producerLogPathName, - " & echo $! > " + producerLogPath + "/entity_" + entityId + "_pid", - " & wait'"] - - if kafka07Client: - cmdList[:] = [] - - brokerInfoStr = "" - tokenList = brokerListStr.split(',') - index = 1 - for token in tokenList: - if len(brokerInfoStr) == 0: - brokerInfoStr = str(index) + ":" + token - else: - brokerInfoStr += "," + str(index) + ":" + token - index += 1 - - brokerInfoStr = "broker.list=" + brokerInfoStr - - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - "KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%s/config/test-log4j.properties" % kafkaHome, - kafkaRunClassBin + " kafka.tools.ProducerPerformance", - "--brokerinfo " + brokerInfoStr, - "--initial-message-id " + str(initMsgId), - "--messages " + noMsgPerBatch, - "--topic " + topic, - "--threads " + threads, - "--compression-codec " + compCodec, - "--message-size " + messageSize, - "--vary-message-size --async", - " >> " + producerLogPathName, - " & echo $! > " + producerLogPath + "/entity_" + entityId + "_pid", - " & wait'"] - - cmdStr = " ".join(cmdList) - logger.debug("executing command: [" + cmdStr + "]", extra=d) - - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - logger.debug("waiting for producer to finish", extra=d) - subproc.communicate() - logger.debug("producer finished", extra=d) - else: - testcaseEnv.numProducerThreadsRunning -= 1 - logger.debug("testcaseEnv.numProducerThreadsRunning : " + str(testcaseEnv.numProducerThreadsRunning), extra=d) - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - break - - counter += 1 - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - time.sleep(int(producerSleepSec)) - - # wait until other producer threads also stops and - # let the main testcase know all producers have stopped - while 1: - logger.debug("calling testcaseEnv.lock.acquire()", extra=d) - testcaseEnv.lock.acquire() - time.sleep(1) - if testcaseEnv.numProducerThreadsRunning == 0: - testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = True - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - break - else: - logger.debug("waiting for TRUE of testcaseEnv.userDefinedEnvVarDict['backgroundProducerStopped']", extra=d) - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - time.sleep(1) - - # finally remove itself from the tracking pids - del testcaseEnv.producerHostParentPidDict[entityId] - -def stop_remote_entity(systemTestEnv, entityId, parentPid, signalType="SIGTERM"): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - hostname = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "hostname") - pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid) - - logger.info("terminating (" + signalType + ") process id: " + parentPid + " in host: " + hostname, extra=d) - - if signalType.lower() == "sigterm": - system_test_utils.sigterm_remote_process(hostname, pidStack) - elif signalType.lower() == "sigkill": - system_test_utils.sigkill_remote_process(hostname, pidStack) - else: - logger.error("Invalid signal type: " + signalType, extra=d) - raise Exception("Invalid signal type: " + signalType) - - -def force_stop_remote_entity(systemTestEnv, entityId, parentPid): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - hostname = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", entityId, "hostname") - pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid) - - logger.debug("terminating process id: " + parentPid + " in host: " + hostname, extra=d) - system_test_utils.sigkill_remote_process(hostname, pidStack) - - -def create_topic_for_producer_performance(systemTestEnv, testcaseEnv): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - prodPerfCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "producer_performance") - - for prodPerfCfg in prodPerfCfgList: - topicsStr = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", prodPerfCfg["entity_id"], "topic") - zkEntityId = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "role", "zookeeper", "entity_id") - zkHost = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "role", "zookeeper", "hostname") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", zkEntityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", zkEntityId, "java_home") - createTopicBin = kafkaHome + "/bin/kafka-topics.sh --create" - - logger.debug("zkEntityId : " + zkEntityId, extra=d) - logger.debug("createTopicBin : " + createTopicBin, extra=d) - - zkConnectStr = "" - topicsList = topicsStr.split(',') - - if len(testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"]) > 0: - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - elif len(testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"]) > 0: - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] - else: - raise Exception("Empty zkConnectStr found") - - testcaseBaseDir = testcaseEnv.testCaseBaseDir - - if zkHost != "localhost": - testcaseBaseDir = replace_kafka_home(testcaseBaseDir, kafkaHome) - - for topic in topicsList: - logger.info("creating topic: [" + topic + "] at: [" + zkConnectStr + "]", extra=d) - cmdList = ["ssh " + zkHost, - "'JAVA_HOME=" + javaHome, - createTopicBin, - " --topic " + topic, - " --zookeeper " + zkConnectStr, - " --replication-factor " + testcaseEnv.testcaseArgumentsDict["replica_factor"], - " --partitions " + testcaseEnv.testcaseArgumentsDict["num_partition"] + " >> ", - testcaseBaseDir + "/logs/create_source_cluster_topic.log'"] - - cmdStr = " ".join(cmdList) - logger.debug("executing command: [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - -def create_topic(systemTestEnv, testcaseEnv, topic, replication_factor, num_partitions): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - zkEntityId = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "role", "zookeeper", "entity_id") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", zkEntityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "entity_id", zkEntityId, "java_home") - createTopicBin = kafkaHome + "/bin/kafka-topics.sh --create" - zkConnectStr = "" - zkHost = system_test_utils.get_data_by_lookup_keyval(clusterEntityConfigDictList, "role", "zookeeper", "hostname") - if len(testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"]) > 0: - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"] - elif len(testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"]) > 0: - zkConnectStr = testcaseEnv.userDefinedEnvVarDict["targetZkConnectStr"] - else: - raise Exception("Empty zkConnectStr found") - - testcaseBaseDir = testcaseEnv.testCaseBaseDir - - testcaseBaseDir = replace_kafka_home(testcaseBaseDir, kafkaHome) - - logger.debug("creating topic: [" + topic + "] at: [" + zkConnectStr + "]", extra=d) - cmdList = ["ssh " + zkHost, - "'JAVA_HOME=" + javaHome, - createTopicBin, - " --topic " + topic, - " --zookeeper " + zkConnectStr, - " --replication-factor " + str(replication_factor), - " --partitions " + str(num_partitions) + " >> ", - testcaseBaseDir + "/logs/create_source_cluster_topic.log'"] - - cmdStr = " ".join(cmdList) - logger.info("executing command: [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - - - -def get_message_id(logPathName, topic=""): - logLines = open(logPathName, "r").readlines() - messageIdList = [] - - for line in logLines: - if not "MessageID" in line: - continue - else: - matchObj = re.match('.*Topic:(.*?):.*:MessageID:(.*?):', line) - if len(topic) == 0: - messageIdList.append( matchObj.group(2) ) - else: - if topic == matchObj.group(1): - messageIdList.append( matchObj.group(2) ) - - return messageIdList - -def get_message_checksum(logPathName): - logLines = open(logPathName, "r").readlines() - messageChecksumList = [] - - for line in logLines: - if not "checksum:" in line: - continue - else: - matchObj = re.match('.*checksum:(\d*).*', line) - if matchObj is not None: - checksum = matchObj.group(1) - messageChecksumList.append( checksum ) - else: - logger.error("unexpected log line : " + line, extra=d) - - return messageChecksumList - - -def validate_data_matched(systemTestEnv, testcaseEnv, replicationUtils): - logger.info("#### Inside validate_data_matched", extra=d) - - validationStatusDict = testcaseEnv.validationStatusDict - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - prodPerfCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "producer_performance") - consumerCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "console_consumer") - - consumerDuplicateCount = 0 - - for prodPerfCfg in prodPerfCfgList: - producerEntityId = prodPerfCfg["entity_id"] - topic = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", producerEntityId, "topic") - logger.debug("working on topic : " + topic, extra=d) - acks = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", producerEntityId, "request-num-acks") - - consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( \ - clusterEntityConfigDictList, "role", "console_consumer", "entity_id") - - matchingConsumerEntityId = None - for consumerEntityId in consumerEntityIdList: - consumerTopic = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", consumerEntityId, "topic") - if consumerTopic in topic: - matchingConsumerEntityId = consumerEntityId - logger.info("matching consumer entity id found", extra=d) - break - - if matchingConsumerEntityId is None: - logger.info("matching consumer entity id NOT found", extra=d) - break - - msgIdMissingInConsumerLogPathName = get_testcase_config_log_dir_pathname( \ - testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") + "/msg_id_missing_in_consumer.log" - producerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "producer_performance", producerEntityId, "default") - producerLogPathName = producerLogPath + "/producer_performance.log" - - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") - consumerLogPathName = consumerLogPath + "/console_consumer.log" - - producerMsgIdList = get_message_id(producerLogPathName) - consumerMsgIdList = get_message_id(consumerLogPathName) - producerMsgIdSet = set(producerMsgIdList) - consumerMsgIdSet = set(consumerMsgIdList) - - consumerDuplicateCount = len(consumerMsgIdList) - len(consumerMsgIdSet) - missingUniqConsumerMsgId = system_test_utils.subtract_list(producerMsgIdSet, consumerMsgIdSet) - - outfile = open(msgIdMissingInConsumerLogPathName, "w") - for id in missingUniqConsumerMsgId: - outfile.write(id + "\n") - outfile.close() - - logger.info("no. of unique messages on topic [" + topic + "] sent from publisher : " + str(len(producerMsgIdSet)), extra=d) - logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgIdSet)), extra=d) - validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(producerMsgIdSet)) - validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(consumerMsgIdSet)) - - missingPercentage = len(missingUniqConsumerMsgId) * 100.00 / len(producerMsgIdSet) - logger.info("Data loss threshold % : " + str(replicationUtils.ackOneDataLossThresholdPercent), extra=d) - logger.warn("Data loss % on topic : " + topic + " : " + str(missingPercentage), extra=d) - - if ( len(missingUniqConsumerMsgId) == 0 and len(producerMsgIdSet) > 0 ): - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - elif (acks == "1"): - if missingPercentage <= replicationUtils.ackOneDataLossThresholdPercent: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - logger.warn("Test case (Acks = 1) passes with less than " + str(replicationUtils.ackOneDataLossThresholdPercent) \ - + "% data loss : [" + str(len(missingUniqConsumerMsgId)) + "] missing messages", extra=d) - else: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" - logger.error("Test case (Acks = 1) failed with more than " + str(replicationUtils.ackOneDataLossThresholdPercent) \ - + "% data loss : [" + str(len(missingUniqConsumerMsgId)) + "] missing messages", extra=d) - else: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" - logger.info("See " + msgIdMissingInConsumerLogPathName + " for missing MessageID", extra=d) - - -def validate_leader_election_successful(testcaseEnv, leaderDict, validationStatusDict): - logger.debug("#### Inside validate_leader_election_successful", extra=d) - - if ( len(leaderDict) > 0 ): - try: - leaderBrokerId = leaderDict["brokerid"] - leaderEntityId = leaderDict["entity_id"] - leaderPid = testcaseEnv.entityBrokerParentPidDict[leaderEntityId] - hostname = leaderDict["hostname"] - - logger.info("found leader in entity [" + leaderEntityId + "] with brokerid [" + \ - leaderBrokerId + "] for partition [" + leaderDict["partition"] + "]", extra=d) - validationStatusDict["Validate leader election successful"] = "PASSED" - return True - except Exception, e: - logger.error("leader info not completed: {0}".format(e), extra=d) - traceback.print_exc() - print leaderDict - traceback.print_exc() - validationStatusDict["Validate leader election successful"] = "FAILED" - return False - else: - validationStatusDict["Validate leader election successful"] = "FAILED" - return False - - -def cleanup_data_at_remote_hosts(systemTestEnv, testcaseEnv): - - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - testcaseConfigsList = testcaseEnv.testcaseConfigsList - testCaseBaseDir = testcaseEnv.testCaseBaseDir - - # clean up the following directories in localhost - # system_test//testcase_xxxx/config - # system_test//testcase_xxxx/dashboards - # system_test//testcase_xxxx/logs - logger.info("cleaning up test case dir: [" + testCaseBaseDir + "]", extra=d) - - if "system_test" not in testCaseBaseDir: - # logger.warn("possible destructive command [" + cmdStr + "]", extra=d) - logger.warn("check config file: system_test/cluster_config.properties", extra=d) - logger.warn("aborting test...", extra=d) - sys.exit(1) - else: - system_test_utils.sys_call("rm -rf " + testCaseBaseDir + "/config/*") - system_test_utils.sys_call("rm -rf " + testCaseBaseDir + "/dashboards/*") - system_test_utils.sys_call("rm -rf " + testCaseBaseDir + "/logs/*") - - for clusterEntityConfigDict in systemTestEnv.clusterEntityConfigDictList: - - hostname = clusterEntityConfigDict["hostname"] - entityId = clusterEntityConfigDict["entity_id"] - role = clusterEntityConfigDict["role"] - kafkaHome = clusterEntityConfigDict["kafka_home"] - cmdStr = "" - dataDir = "" - - if hostname == "localhost": - remoteTestCaseBaseDir = testCaseBaseDir - else: - remoteTestCaseBaseDir = replace_kafka_home(testCaseBaseDir, kafkaHome) - - logger.info("cleaning up data dir on host: [" + hostname + "]", extra=d) - - if role == 'zookeeper': - dataDir = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "dataDir") - elif role == 'broker': - dataDir = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log.dir") - else: - logger.info("skipping role [" + role + "] on host : [" + hostname + "]", extra=d) - continue - - cmdStr = "ssh " + hostname + " 'rm -rf " + dataDir + "'" - - if not dataDir.startswith("/tmp"): - logger.warn("possible destructive command [" + cmdStr + "]", extra=d) - logger.warn("check config file: system_test/cluster_config.properties", extra=d) - logger.warn("aborting test...", extra=d) - sys.exit(1) - - # ============================ - # cleaning data dir - # ============================ - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - # ============================ - # cleaning log/metrics/svg, ... - # ============================ - if system_test_utils.remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"): - # so kafkaHome is a real kafka installation - cmdStr = "ssh " + hostname + " \"find " + remoteTestCaseBaseDir + " -name '*.log' | xargs rm 2> /dev/null\"" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - cmdStr = "ssh " + hostname + " \"find " + remoteTestCaseBaseDir + " -name '*_pid' | xargs rm 2> /dev/null\"" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - cmdStr = "ssh " + hostname + " \"find " + remoteTestCaseBaseDir + " -name '*.csv' | xargs rm 2> /dev/null\"" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - cmdStr = "ssh " + hostname + " \"find " + remoteTestCaseBaseDir + " -name '*.svg' | xargs rm 2> /dev/null\"" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - - cmdStr = "ssh " + hostname + " \"find " + remoteTestCaseBaseDir + " -name '*.html' | xargs rm 2> /dev/null\"" - logger.debug("executing command [" + cmdStr + "]", extra=d) - system_test_utils.sys_call(cmdStr) - -def replace_kafka_home(systemTestSubDirPath, kafkaHome): - matchObj = re.match(".*(\/system_test\/.*)$", systemTestSubDirPath) - relativeSubDirPath = matchObj.group(1) - return kafkaHome + relativeSubDirPath - -def get_entity_log_directory(testCaseBaseDir, entity_id, role): - return testCaseBaseDir + "/logs/" + role + "-" + entity_id - -def get_entities_for_role(clusterConfig, role): - return filter(lambda entity: entity['role'] == role, clusterConfig) - -def stop_consumer(): - system_test_utils.sys_call("ps -ef | grep ConsoleConsumer | grep -v grep | tr -s ' ' | cut -f2 -d' ' | xargs kill -15") - -def ps_grep_terminate_running_entity(systemTestEnv): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - username = getpass.getuser() - - for clusterEntityConfigDict in systemTestEnv.clusterEntityConfigDictList: - hostname = clusterEntityConfigDict["hostname"] - cmdList = ["ssh " + hostname, - "\"ps auxw | grep -v grep | grep -v Bootstrap | grep -v vim | grep ^" + username, - "| grep -i 'java\|server\-start\|run\-\|producer\|consumer\|jmxtool' | grep kafka", - "| tr -s ' ' | cut -f2 -d ' ' | xargs kill -9" + "\""] - - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - - system_test_utils.sys_call(cmdStr) - -def get_reelection_latency(systemTestEnv, testcaseEnv, leaderDict, leaderAttributesDict): - leaderEntityId = None - leaderBrokerId = None - leaderPPid = None - shutdownLeaderTimestamp = None - leaderReElectionLatency = -1 - - if testcaseEnv.validationStatusDict["Validate leader election successful"] == "FAILED": - # leader election is not successful - something is wrong => so skip this testcase - return None - else: - # leader elected => stop leader - try: - leaderEntityId = leaderDict["entity_id"] - leaderBrokerId = leaderDict["brokerid"] - leaderPPid = testcaseEnv.entityBrokerParentPidDict[leaderEntityId] - except: - logger.info("leader details unavailable", extra=d) - raise - - logger.info("stopping leader in entity "+leaderEntityId+" with pid "+leaderPPid, extra=d) - signalType = None - try: - signalType = testcaseEnv.testcaseArgumentsDict["signal_type"] - except: - pass - - if signalType is None or signalType.lower() == "sigterm": - stop_remote_entity(systemTestEnv, leaderEntityId, leaderPPid) - elif signalType.lower() == "sigkill": - stop_remote_entity(systemTestEnv, leaderEntityId, leaderPPid, "SIGKILL") - else: - logger.error("Unsupported signal type: " + signalType, extra=d) - raise Exception("Unsupported signal type: " + signalType) - - logger.info("sleeping for 10s for leader re-election to complete", extra=d) - time.sleep(10) - - # get broker shut down completed timestamp - shutdownBrokerDict = get_broker_shutdown_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict) - shutdownTimestamp = -1 - - try: - shutdownTimestamp = shutdownBrokerDict["timestamp"] - logger.debug("unix timestamp of shut down completed: " + str("{0:.6f}".format(shutdownTimestamp)), extra=d) - except: - logger.warn("unable to find broker shut down timestamp", extra=d) - - logger.info("looking up new leader", extra=d) - leaderDict2 = get_leader_elected_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict) - logger.debug("unix timestamp of new elected leader: " + str("{0:.6f}".format(leaderDict2["timestamp"])), extra=d) - - if shutdownTimestamp > 0: - leaderReElectionLatency = float(leaderDict2["timestamp"]) - float(shutdownTimestamp) - logger.info("leader Re-election Latency: " + str(leaderReElectionLatency) + " sec", extra=d) - - return leaderReElectionLatency - - -def stop_all_remote_running_processes(systemTestEnv, testcaseEnv): - - entityConfigs = systemTestEnv.clusterEntityConfigDictList - - # If there are any alive local threads that keep starting remote producer performance, we need to kill them; - # note we do not need to stop remote processes since they will terminate themselves eventually. - if len(testcaseEnv.producerHostParentPidDict) != 0: - # ============================================= - # tell producer to stop - # ============================================= - logger.debug("calling testcaseEnv.lock.acquire()", extra=d) - testcaseEnv.lock.acquire() - testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - - # ============================================= - # wait for producer thread's update of - # "backgroundProducerStopped" to be "True" - # ============================================= - while 1: - logger.debug("calling testcaseEnv.lock.acquire()", extra=d) - testcaseEnv.lock.acquire() - logger.info("status of backgroundProducerStopped : [" + \ - str(testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=d) - if testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - logger.info("all producer threads completed", extra=d) - break - logger.debug("calling testcaseEnv.lock.release()", extra=d) - testcaseEnv.lock.release() - - testcaseEnv.producerHostParentPidDict.clear() - - for hostname, consumerPPid in testcaseEnv.consumerHostParentPidDict.items(): - consumerEntityId = system_test_utils.get_data_by_lookup_keyval(entityConfigs, "hostname", hostname, "entity_id") - stop_remote_entity(systemTestEnv, consumerEntityId, consumerPPid) - - for entityId, jmxParentPidList in testcaseEnv.entityJmxParentPidDict.items(): - for jmxParentPid in jmxParentPidList: - stop_remote_entity(systemTestEnv, entityId, jmxParentPid) - - for entityId, mirrorMakerParentPid in testcaseEnv.entityMirrorMakerParentPidDict.items(): - stop_remote_entity(systemTestEnv, entityId, mirrorMakerParentPid) - - for entityId, consumerParentPid in testcaseEnv.entityConsoleConsumerParentPidDict.items(): - stop_remote_entity(systemTestEnv, entityId, consumerParentPid) - - for entityId, brokerParentPid in testcaseEnv.entityBrokerParentPidDict.items(): - stop_remote_entity(systemTestEnv, entityId, brokerParentPid) - - for entityId, zkParentPid in testcaseEnv.entityZkParentPidDict.items(): - stop_remote_entity(systemTestEnv, entityId, zkParentPid) - - -def start_migration_tool(systemTestEnv, testcaseEnv, onlyThisEntityId=None): - clusterConfigList = systemTestEnv.clusterEntityConfigDictList - migrationToolConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "migration_tool") - - for migrationToolConfig in migrationToolConfigList: - - entityId = migrationToolConfig["entity_id"] - - if onlyThisEntityId is None or entityId == onlyThisEntityId: - - host = migrationToolConfig["hostname"] - jmxPort = migrationToolConfig["jmx_port"] - role = migrationToolConfig["role"] - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "java_home") - jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "jmx_port") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - logger.info("starting kafka migration tool", extra=d) - migrationToolLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "migration_tool", entityId, "default") - migrationToolLogPathName = migrationToolLogPath + "/migration_tool.log" - testcaseEnv.userDefinedEnvVarDict["migrationToolLogPathName"] = migrationToolLogPathName - - testcaseConfigsList = testcaseEnv.testcaseConfigsList - numProducers = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.producers") - numStreams = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.streams") - producerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer.config") - consumerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer.config") - zkClientJar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "zkclient.01.jar") - kafka07Jar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "kafka.07.jar") - whiteList = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "whitelist") - logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename") - - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaRunClassBin + " kafka.tools.KafkaMigrationTool", - "--whitelist=" + whiteList, - "--num.producers=" + numProducers, - "--num.streams=" + numStreams, - "--producer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + producerConfig, - "--consumer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + consumerConfig, - "--zkclient.01.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + zkClientJar, - "--kafka.07.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + kafka07Jar, - " &> " + migrationToolLogPath + "/migrationTool.log", - " & echo pid:$! > " + migrationToolLogPath + "/entity_" + entityId + "_pid'"] - - cmdStr = " ".join(cmdList) - logger.debug("executing command: [" + cmdStr + "]", extra=d) - system_test_utils.async_sys_call(cmdStr) - time.sleep(5) - - pidCmdStr = "ssh " + host + " 'cat " + migrationToolLogPath + "/entity_" + entityId + "_pid' 2> /dev/null" - logger.debug("executing command: [" + pidCmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) - - # keep track of the remote entity pid in a dictionary - for line in subproc.stdout.readlines(): - if line.startswith("pid"): - line = line.rstrip('\n') - logger.debug("found pid line: [" + line + "]", extra=d) - tokens = line.split(':') - testcaseEnv.entityMigrationToolParentPidDict[entityId] = tokens[1] - - -def validate_07_08_migrated_data_matched(systemTestEnv, testcaseEnv): - logger.debug("#### Inside validate_07_08_migrated_data_matched", extra=d) - - validationStatusDict = testcaseEnv.validationStatusDict - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - prodPerfCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "producer_performance") - consumerCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "console_consumer") - - for prodPerfCfg in prodPerfCfgList: - producerEntityId = prodPerfCfg["entity_id"] - topic = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", producerEntityId, "topic") - - consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "console_consumer", "entity_id") - - matchingConsumerEntityId = None - for consumerEntityId in consumerEntityIdList: - consumerTopic = system_test_utils.get_data_by_lookup_keyval( - testcaseEnv.testcaseConfigsList, "entity_id", consumerEntityId, "topic") - if consumerTopic in topic: - matchingConsumerEntityId = consumerEntityId - break - - if matchingConsumerEntityId is None: - break - - msgChecksumMissingInConsumerLogPathName = get_testcase_config_log_dir_pathname( - testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") \ - + "/msg_checksum_missing_in_consumer.log" - producerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "producer_performance", producerEntityId, "default") - producerLogPathName = producerLogPath + "/producer_performance.log" - - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") - consumerLogPathName = consumerLogPath + "/console_consumer.log" - - producerMsgChecksumList = get_message_checksum(producerLogPathName) - consumerMsgChecksumList = get_message_checksum(consumerLogPathName) - producerMsgChecksumSet = set(producerMsgChecksumList) - consumerMsgChecksumSet = set(consumerMsgChecksumList) - producerMsgChecksumUniqList = list(producerMsgChecksumSet) - consumerMsgChecksumUniqList = list(consumerMsgChecksumSet) - - missingMsgChecksumInConsumer = producerMsgChecksumSet - consumerMsgChecksumSet - - logger.debug("size of producerMsgChecksumList : " + str(len(producerMsgChecksumList)), extra=d) - logger.debug("size of consumerMsgChecksumList : " + str(len(consumerMsgChecksumList)), extra=d) - logger.debug("size of producerMsgChecksumSet : " + str(len(producerMsgChecksumSet)), extra=d) - logger.debug("size of consumerMsgChecksumSet : " + str(len(consumerMsgChecksumSet)), extra=d) - logger.debug("size of producerMsgChecksumUniqList : " + str(len(producerMsgChecksumUniqList)), extra=d) - logger.debug("size of consumerMsgChecksumUniqList : " + str(len(consumerMsgChecksumUniqList)), extra=d) - logger.debug("size of missingMsgChecksumInConsumer : " + str(len(missingMsgChecksumInConsumer)), extra=d) - - outfile = open(msgChecksumMissingInConsumerLogPathName, "w") - for id in missingMsgChecksumInConsumer: - outfile.write(id + "\n") - outfile.close() - - logger.info("no. of messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumList)), extra=d) - logger.info("no. of messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumList)), extra=d) - logger.info("no. of unique messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumUniqList)), extra=d) - logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumUniqList)), extra=d) - validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(list(producerMsgChecksumSet))) - validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(list(consumerMsgChecksumSet))) - - if ( len(producerMsgChecksumList) > 0 and len(list(producerMsgChecksumSet)) == len(list(consumerMsgChecksumSet))): - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - else: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" - logger.info("See " + msgChecksumMissingInConsumerLogPathName + " for missing MessageID", extra=d) - -def validate_broker_log_segment_checksum(systemTestEnv, testcaseEnv, clusterName="source"): - logger.debug("#### Inside validate_broker_log_segment_checksum", extra=d) - - anonLogger.info("================================================") - anonLogger.info("validating merged broker log segment checksums") - anonLogger.info("================================================") - - brokerLogCksumDict = {} - testCaseBaseDir = testcaseEnv.testCaseBaseDir - tcConfigsList = testcaseEnv.testcaseConfigsList - validationStatusDict = testcaseEnv.validationStatusDict - clusterConfigList = systemTestEnv.clusterEntityConfigDictList - #brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(clusterConfigList, "role", "broker", "entity_id") - allBrokerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "broker") - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(allBrokerConfigList, "cluster_name", clusterName, "entity_id") - - # loop through all brokers - for brokerEntityId in brokerEntityIdList: - logCksumDict = {} - # remoteLogSegmentPathName : /tmp/kafka_server_4_logs - # => remoteLogSegmentDir : kafka_server_4_logs - remoteLogSegmentPathName = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", brokerEntityId, "log.dir") - remoteLogSegmentDir = os.path.basename(remoteLogSegmentPathName) - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") - localLogSegmentPath = logPathName + "/" + remoteLogSegmentDir - - # localLogSegmentPath : - # .../system_test/mirror_maker_testsuite/testcase_5002/logs/broker-4/kafka_server_4_logs - # |- test_1-0 - # |- 00000000000000000000.index - # |- 00000000000000000000.log - # |- 00000000000000000020.index - # |- 00000000000000000020.log - # |- . . . - # |- test_1-1 - # |- 00000000000000000000.index - # |- 00000000000000000000.log - # |- 00000000000000000020.index - # |- 00000000000000000020.log - # |- . . . - - # loop through all topicPartition directories such as : test_1-0, test_1-1, ... - for topicPartition in os.listdir(localLogSegmentPath): - # found a topic-partition directory - if os.path.isdir(localLogSegmentPath + "/" + topicPartition): - # md5 hasher - m = hashlib.md5() - - # logSegmentKey is like this : kafka_server_9_logs:test_1-0 (delimited by ':') - logSegmentKey = remoteLogSegmentDir + ":" + topicPartition - - # log segment files are located in : localLogSegmentPath + "/" + topicPartition - # sort the log segment files under each topic-partition and get the md5 checksum - for logFile in sorted(os.listdir(localLogSegmentPath + "/" + topicPartition)): - # only process log file: *.log - if logFile.endswith(".log"): - # read the log segment file as binary - offsetLogSegmentPathName = localLogSegmentPath + "/" + topicPartition + "/" + logFile - fin = file(offsetLogSegmentPathName, 'rb') - # keep reading 64K max at a time - while True: - data = fin.read(65536) - if not data: - fin.close() - break - # update it into the hasher - m.update(data) - - # update the md5 checksum into brokerLogCksumDict with the corresponding key - brokerLogCksumDict[logSegmentKey] = m.hexdigest() - - # print it out to the console for reference - pprint.pprint(brokerLogCksumDict) - - # brokerLogCksumDict will look like this: - # { - # 'kafka_server_1_logs:tests_1-0': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_1_logs:tests_1-1': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_1_logs:tests_2-0': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_1_logs:tests_2-1': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_2_logs:tests_1-0': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_2_logs:tests_1-1': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_2_logs:tests_2-0': 'd41d8cd98f00b204e9800998ecf8427e', - # 'kafka_server_2_logs:tests_2-1': 'd41d8cd98f00b204e9800998ecf8427e' - # } - - checksumDict = {} - # organize the checksum according to their topic-partition and checksumDict will look like this: - # { - # 'test_1-0' : ['d41d8cd98f00b204e9800998ecf8427e','d41d8cd98f00b204e9800998ecf8427e'], - # 'test_1-1' : ['d41d8cd98f00b204e9800998ecf8427e','d41d8cd98f00b204e9800998ecf8427e'], - # 'test_2-0' : ['d41d8cd98f00b204e9800998ecf8427e','d41d8cd98f00b204e9800998ecf8427e'], - # 'test_2-1' : ['d41d8cd98f00b204e9800998ecf8427e','d41d8cd98f00b204e9800998ecf8427e'] - # } - - for brokerTopicPartitionKey, md5Checksum in brokerLogCksumDict.items(): - tokens = brokerTopicPartitionKey.split(":") - brokerKey = tokens[0] - topicPartition = tokens[1] - if topicPartition in checksumDict: - # key already exist - checksumDict[topicPartition].append(md5Checksum) - else: - # new key => create a new list to store checksum - checksumDict[topicPartition] = [] - checksumDict[topicPartition].append(md5Checksum) - - failureCount = 0 - - # loop through checksumDict: the checksums should be the same inside each - # topic-partition's list. Otherwise, checksum mismatched is detected - for topicPartition, checksumList in checksumDict.items(): - checksumSet = frozenset(checksumList) - if len(checksumSet) > 1: - failureCount += 1 - logger.error("merged log segment checksum in " + topicPartition + " mismatched", extra=d) - elif len(checksumSet) == 1: - logger.debug("merged log segment checksum in " + topicPartition + " matched", extra=d) - else: - logger.error("unexpected error in " + topicPartition, extra=d) - - if failureCount == 0: - validationStatusDict["Validate for merged log segment checksum in cluster [" + clusterName + "]"] = "PASSED" - else: - validationStatusDict["Validate for merged log segment checksum in cluster [" + clusterName + "]"] = "FAILED" - -def start_simple_consumer(systemTestEnv, testcaseEnv, minStartingOffsetDict=None): - - clusterList = systemTestEnv.clusterEntityConfigDictList - consumerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterList, "role", "console_consumer") - for consumerConfig in consumerConfigList: - host = consumerConfig["hostname"] - entityId = consumerConfig["entity_id"] - jmxPort = consumerConfig["jmx_port"] - clusterName = consumerConfig["cluster_name"] - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterList, "entity_id", entityId, "java_home") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", entityId, "default") - - if host != "localhost": - consumerLogPath = replace_kafka_home(consumerLogPath, kafkaHome) - - # testcase configurations: - testcaseList = testcaseEnv.testcaseConfigsList - topic = system_test_utils.get_data_by_lookup_keyval(testcaseList, "entity_id", entityId, "topic") - - brokerListStr = "" - if clusterName == "source": - brokerListStr = testcaseEnv.userDefinedEnvVarDict["sourceBrokerList"] - elif clusterName == "target": - brokerListStr = testcaseEnv.userDefinedEnvVarDict["targetBrokerList"] - else: - logger.error("Invalid cluster name : " + clusterName, extra=d) - raise Exception("Invalid cluster name : " + clusterName) - - if len(brokerListStr) == 0: - logger.error("Empty broker list str", extra=d) - raise Exception("Empty broker list str") - - numPartitions = None - try: - numPartitions = testcaseEnv.testcaseArgumentsDict["num_partition"] - except: - pass - - if numPartitions is None: - logger.error("Invalid no. of partitions: " + numPartitions, extra=d) - raise Exception("Invalid no. of partitions: " + numPartitions) - else: - numPartitions = int(numPartitions) - - replicaIndex = 1 - startingOffset = -2 - brokerPortList = brokerListStr.split(',') - for brokerPort in brokerPortList: - - partitionId = 0 - while (partitionId < numPartitions): - logger.info("starting debug consumer for replica on [" + brokerPort + "] partition [" + str(partitionId) + "]", extra=d) - - if minStartingOffsetDict is not None: - topicPartition = topic + "-" + str(partitionId) - startingOffset = minStartingOffsetDict[topicPartition] - - outputFilePathName = consumerLogPath + "/simple_consumer_" + topic + "-" + str(partitionId) + "_r" + str(replicaIndex) + ".log" - brokerPortLabel = brokerPort.replace(":", "_") - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - kafkaRunClassBin + " kafka.tools.SimpleConsumerShell", - "--broker-list " + brokerListStr, - "--topic " + topic, - "--partition " + str(partitionId), - "--replica " + str(replicaIndex), - "--offset " + str(startingOffset), - "--no-wait-at-logend ", - " > " + outputFilePathName, - " & echo pid:$! > " + consumerLogPath + "/entity_" + entityId + "_pid'"] - - cmdStr = " ".join(cmdList) - - logger.debug("executing command: [" + cmdStr + "]", extra=d) - subproc_1 = system_test_utils.sys_call_return_subproc(cmdStr) - # dummy for-loop to wait until the process is completed - for line in subproc_1.stdout.readlines(): - pass - time.sleep(1) - - partitionId += 1 - replicaIndex += 1 - -def get_controller_attributes(systemTestEnv, testcaseEnv): - - logger.info("Querying Zookeeper for Controller info ...", extra=d) - - # keep track of controller data in this dict such as broker id & entity id - controllerDict = {} - - clusterConfigsList = systemTestEnv.clusterEntityConfigDictList - tcConfigsList = testcaseEnv.testcaseConfigsList - - zkDictList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigsList, "role", "zookeeper") - firstZkDict = zkDictList[0] - hostname = firstZkDict["hostname"] - zkEntityId = firstZkDict["entity_id"] - clientPort = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", zkEntityId, "clientPort") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "java_home") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - cmdStrList = ["ssh " + hostname, - "\"JAVA_HOME=" + javaHome, - kafkaRunClassBin + " kafka.tools.ZooKeeperMainWrapper ", - "-server " + testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"], - "get /controller 2> /dev/null | tail -1\""] - - cmdStr = " ".join(cmdStrList) - logger.debug("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - if "brokerid" in line: - json_str = line.rstrip('\n') - json_data = json.loads(json_str) - brokerid = str(json_data["brokerid"]) - controllerDict["brokerid"] = brokerid - controllerDict["entity_id"] = system_test_utils.get_data_by_lookup_keyval( - tcConfigsList, "broker.id", brokerid, "entity_id") - else: - pass - - return controllerDict - -def getMinCommonStartingOffset(systemTestEnv, testcaseEnv, clusterName="source"): - - brokerLogStartOffsetDict = {} - minCommonStartOffsetDict = {} - - tcConfigsList = testcaseEnv.testcaseConfigsList - clusterConfigList = systemTestEnv.clusterEntityConfigDictList - allBrokerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "broker") - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(allBrokerConfigList, "cluster_name", clusterName, "entity_id") - - # loop through all brokers - for brokerEntityId in sorted(brokerEntityIdList): - # remoteLogSegmentPathName : /tmp/kafka_server_4_logs - # => remoteLogSegmentDir : kafka_server_4_logs - remoteLogSegmentPathName = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", brokerEntityId, "log.dir") - remoteLogSegmentDir = os.path.basename(remoteLogSegmentPathName) - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") - localLogSegmentPath = logPathName + "/" + remoteLogSegmentDir - - # loop through all topicPartition directories such as : test_1-0, test_1-1, ... - for topicPartition in sorted(os.listdir(localLogSegmentPath)): - # found a topic-partition directory - if os.path.isdir(localLogSegmentPath + "/" + topicPartition): - - # startingOffsetKey : : (eg. 1:test_1-0) - startingOffsetKey = brokerEntityId + ":" + topicPartition - - # log segment files are located in : localLogSegmentPath + "/" + topicPartition - # sort the log segment files under each topic-partition - for logFile in sorted(os.listdir(localLogSegmentPath + "/" + topicPartition)): - - # logFile is located at: - # system_test/xxxx_testsuite/testcase_xxxx/logs/broker-1/kafka_server_1_logs/test_1-0/00000000000000003800.log - if logFile.endswith(".log"): - matchObj = re.match("0*(.*)\.log", logFile) # remove the leading zeros & the file extension - startingOffset = matchObj.group(1) # this is the starting offset from the file name - if len(startingOffset) == 0: # when log filename is: 00000000000000000000.log - startingOffset = "0" - - # starting offset of a topic-partition can be retrieved from the filename of the first log segment - # => break out of this innest for-loop after processing the first log segment file - brokerLogStartOffsetDict[startingOffsetKey] = startingOffset - break - - # brokerLogStartOffsetDict is like this: - # {u'1:test_1-0': u'400', - # u'1:test_1-1': u'400', - # u'1:test_2-0': u'200', - # u'1:test_2-1': u'200', - # u'2:test_1-0': u'400', - # u'2:test_1-1': u'400', - # u'2:test_2-0': u'200', - # u'2:test_2-1': u'200', - # u'3:test_1-0': '0', - # u'3:test_1-1': '0', - # u'3:test_2-0': '0', - # u'3:test_2-1': '0'} - - # loop through brokerLogStartOffsetDict to get the min common starting offset for each topic-partition - for brokerTopicPartition in sorted(brokerLogStartOffsetDict.iterkeys()): - topicPartition = brokerTopicPartition.split(':')[1] - - if topicPartition in minCommonStartOffsetDict: - # key exists => if the new value is greater, replace the existing value with new - if minCommonStartOffsetDict[topicPartition] < brokerLogStartOffsetDict[brokerTopicPartition]: - minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition] - else: - # key doesn't exist => add it to the dictionary - minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition] - - # returning minCommonStartOffsetDict which is like this: - # {u'test_1-0': u'400', - # u'test_1-1': u'400', - # u'test_2-0': u'200', - # u'test_2-1': u'200'} - return minCommonStartOffsetDict - -def validate_simple_consumer_data_matched_across_replicas(systemTestEnv, testcaseEnv): - logger.debug("#### Inside validate_simple_consumer_data_matched_across_replicas", extra=d) - - validationStatusDict = testcaseEnv.validationStatusDict - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "console_consumer", "entity_id") - replicaFactor = testcaseEnv.testcaseArgumentsDict["replica_factor"] - numPartition = testcaseEnv.testcaseArgumentsDict["num_partition"] - - for consumerEntityId in consumerEntityIdList: - - # get topic string from multi consumer "entity" - topicStr = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", consumerEntityId, "topic") - - # the topic string could be multi topics separated by ',' - topicList = topicStr.split(',') - - for topic in topicList: - logger.debug("working on topic : " + topic, extra=d) - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", consumerEntityId, "default") - - # keep track of total msg count across replicas for each topic-partition - # (should be greater than 0 for passing) - totalMsgCounter = 0 - - # keep track of the mismatch msg count for each topic-partition - # (should be equal to 0 for passing) - mismatchCounter = 0 - - replicaIdxMsgIdList = [] - # replicaIdxMsgIdList : - # - This is a list of dictionaries of topic-partition (key) - # mapping to list of MessageID in that topic-partition (val) - # - The list index is mapped to (replicaId - 1) - # [ - # // list index = 0 => replicaId = idx(0) + 1 = 1 - # { - # "topic1-0" : [ "0000000001", "0000000002", "0000000003"], - # "topic1-1" : [ "0000000004", "0000000005", "0000000006"] - # }, - # // list index = 1 => replicaId = idx(1) + 1 = 2 - # { - # "topic1-0" : [ "0000000001", "0000000002", "0000000003"], - # "topic1-1" : [ "0000000004", "0000000005", "0000000006"] - # } - # ] - - # initialize replicaIdxMsgIdList - j = 0 - while j < int(replicaFactor): - newDict = {} - replicaIdxMsgIdList.append(newDict) - j += 1 - - # retrieve MessageID from all simple consumer log4j files - for logFile in sorted(os.listdir(consumerLogPath)): - - if logFile.startswith("simple_consumer_"+topic) and logFile.endswith(".log"): - logger.debug("working on file : " + logFile, extra=d) - matchObj = re.match("simple_consumer_"+topic+"-(\d*)_r(\d*)\.log" , logFile) - partitionId = int(matchObj.group(1)) - replicaIdx = int(matchObj.group(2)) - - consumerLogPathName = consumerLogPath + "/" + logFile - consumerMsgIdList = get_message_id(consumerLogPathName) - - topicPartition = topic + "-" + str(partitionId) - replicaIdxMsgIdList[replicaIdx - 1][topicPartition] = consumerMsgIdList - - logger.info("no. of messages on topic [" + topic + "] at " + logFile + " : " + str(len(consumerMsgIdList)), extra=d) - validationStatusDict["No. of messages from consumer on [" + topic + "] at " + logFile] = str(len(consumerMsgIdList)) - - # print replicaIdxMsgIdList - - # take the first dictionary of replicaIdxMsgIdList and compare with the rest - firstMsgIdDict = replicaIdxMsgIdList[0] - - # loop through all 'topic-partition' such as topic1-0, topic1-1, ... - for topicPartition in sorted(firstMsgIdDict.iterkeys()): - - # compare all replicas' MessageID in corresponding topic-partition - for i in range(len(replicaIdxMsgIdList)): - # skip the first dictionary - if i == 0: - totalMsgCounter += len(firstMsgIdDict[topicPartition]) - continue - - totalMsgCounter += len(replicaIdxMsgIdList[i][topicPartition]) - - # get the count of mismatch MessageID between first MessageID list and the other lists - diffCount = system_test_utils.diff_lists(firstMsgIdDict[topicPartition], replicaIdxMsgIdList[i][topicPartition]) - mismatchCounter += diffCount - logger.info("Mismatch count of topic-partition [" + topicPartition + "] in replica id [" + str(i+1) + "] : " + str(diffCount), extra=d) - - if mismatchCounter == 0 and totalMsgCounter > 0: - validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "PASSED" - else: - validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "FAILED" - - -def validate_data_matched_in_multi_topics_from_single_consumer_producer(systemTestEnv, testcaseEnv, replicationUtils): - logger.debug("#### Inside validate_data_matched_in_multi_topics_from_single_consumer_producer", extra=d) - - validationStatusDict = testcaseEnv.validationStatusDict - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - prodPerfCfgList = system_test_utils.get_dict_from_list_of_dicts(clusterEntityConfigDictList, "role", "producer_performance") - - for prodPerfCfg in prodPerfCfgList: - producerEntityId = prodPerfCfg["entity_id"] - topicStr = testcaseEnv.producerTopicsString - acks = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", producerEntityId, "request-num-acks") - - consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts(clusterEntityConfigDictList, "role", "console_consumer", "entity_id") - - matchingConsumerEntityId = None - for consumerEntityId in consumerEntityIdList: - consumerTopic = testcaseEnv.consumerTopicsString - if consumerTopic in topicStr: - matchingConsumerEntityId = consumerEntityId - break - - if matchingConsumerEntityId is None: - break - - producerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "producer_performance", producerEntityId, "default") - producerLogPathName = producerLogPath + "/producer_performance.log" - - consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") - consumerLogPathName = consumerLogPath + "/console_consumer.log" - - topicList = topicStr.split(',') - for topic in topicList: - consumerDuplicateCount = 0 - msgIdMissingInConsumerLogPathName = get_testcase_config_log_dir_pathname( - testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") \ - + "/msg_id_missing_in_consumer_" + topic + ".log" - producerMsgIdList = get_message_id(producerLogPathName, topic) - consumerMsgIdList = get_message_id(consumerLogPathName, topic) - producerMsgIdSet = set(producerMsgIdList) - consumerMsgIdSet = set(consumerMsgIdList) - - consumerDuplicateCount = len(consumerMsgIdList) -len(consumerMsgIdSet) - missingUniqConsumerMsgId = system_test_utils.subtract_list(producerMsgIdSet, consumerMsgIdSet) - - outfile = open(msgIdMissingInConsumerLogPathName, "w") - for id in missingUniqConsumerMsgId: - outfile.write(id + "\n") - outfile.close() - - logger.info("Producer entity id " + producerEntityId, extra=d) - logger.info("Consumer entity id " + matchingConsumerEntityId, extra=d) - logger.info("no. of unique messages on topic [" + topic + "] sent from publisher : " + str(len(producerMsgIdSet)), extra=d) - logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgIdSet)), extra=d) - logger.info("no. of duplicate messages on topic [" + topic + "] received by consumer: " + str(consumerDuplicateCount), extra=d) - validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(producerMsgIdSet)) - validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(consumerMsgIdSet)) - - missingPercentage = len(missingUniqConsumerMsgId) * 100.00 / len(producerMsgIdSet) - logger.info("Data loss threshold % : " + str(replicationUtils.ackOneDataLossThresholdPercent), extra=d) - logger.warn("Data loss % on topic : " + topic + " : " + str(missingPercentage), extra=d) - - if ( len(missingUniqConsumerMsgId) == 0 and len(producerMsgIdSet) > 0 ): - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - elif (acks == "1"): - if missingPercentage <= replicationUtils.ackOneDataLossThresholdPercent: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - logger.warn("Test case (Acks = 1) passes with less than " + str(replicationUtils.ackOneDataLossThresholdPercent) \ - + "% data loss : [" + str(len(missingUniqConsumerMsgId)) + "] missing messages", extra=d) - else: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" - logger.error("Test case (Acks = 1) failed with more than " + str(replicationUtils.ackOneDataLossThresholdPercent) \ - + "% data loss : [" + str(len(missingUniqConsumerMsgId)) + "] missing messages", extra=d) - else: - validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" - logger.info("See " + msgIdMissingInConsumerLogPathName + " for missing MessageID", extra=d) - - -def validate_index_log(systemTestEnv, testcaseEnv, clusterName="source"): - logger.debug("#### Inside validate_index_log", extra=d) - - failureCount = 0 - brokerLogCksumDict = {} - testCaseBaseDir = testcaseEnv.testCaseBaseDir - tcConfigsList = testcaseEnv.testcaseConfigsList - validationStatusDict = testcaseEnv.validationStatusDict - clusterConfigList = systemTestEnv.clusterEntityConfigDictList - allBrokerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "broker") - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(allBrokerConfigList, "cluster_name", clusterName, "entity_id") - - # loop through all brokers - for brokerEntityId in brokerEntityIdList: - logCksumDict = {} - # remoteLogSegmentPathName : /tmp/kafka_server_4_logs - # => remoteLogSegmentDir : kafka_server_4_logs - remoteLogSegmentPathName = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", brokerEntityId, "log.dir") - remoteLogSegmentDir = os.path.basename(remoteLogSegmentPathName) - logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") - localLogSegmentPath = logPathName + "/" + remoteLogSegmentDir - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", brokerEntityId, "kafka_home") - hostname = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", brokerEntityId, "hostname") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - # localLogSegmentPath : - # .../system_test/mirror_maker_testsuite/testcase_5002/logs/broker-4/kafka_server_4_logs - # |- test_1-0 - # |- 00000000000000000000.index - # |- 00000000000000000000.log - # |- 00000000000000000020.index - # |- 00000000000000000020.log - # |- . . . - # |- test_1-1 - # |- 00000000000000000000.index - # |- 00000000000000000000.log - # |- 00000000000000000020.index - # |- 00000000000000000020.log - # |- . . . - - # loop through all topicPartition directories such as : test_1-0, test_1-1, ... - for topicPartition in os.listdir(localLogSegmentPath): - # found a topic-partition directory - if os.path.isdir(localLogSegmentPath + "/" + topicPartition): - - # log segment files are located in : localLogSegmentPath + "/" + topicPartition - # sort the log segment files under each topic-partition and verify index - for logFile in sorted(os.listdir(localLogSegmentPath + "/" + topicPartition)): - # only process index file: *.index - if logFile.endswith(".index"): - offsetLogSegmentPathName = localLogSegmentPath + "/" + topicPartition + "/" + logFile - cmdStrList = ["ssh " + hostname, - kafkaRunClassBin + " kafka.tools.DumpLogSegments", - " --file " + offsetLogSegmentPathName, - "--verify-index-only 2>&1"] - cmdStr = " ".join(cmdStrList) - - showMismatchedIndexOffset = False - - logger.debug("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - line = line.rstrip('\n') - if showMismatchedIndexOffset: - logger.debug("#### [" + line + "]", extra=d) - elif "Mismatches in :" in line: - logger.debug("#### error found [" + line + "]", extra=d) - failureCount += 1 - showMismatchedIndexOffset = True - if subproc.wait() != 0: - logger.debug("#### error found [DumpLogSegments exited abnormally]", extra=d) - failureCount += 1 - - if failureCount == 0: - validationStatusDict["Validate index log in cluster [" + clusterName + "]"] = "PASSED" - else: - validationStatusDict["Validate index log in cluster [" + clusterName + "]"] = "FAILED" - -def get_leader_for(systemTestEnv, testcaseEnv, topic, partition): - logger.info("Querying Zookeeper for leader info for topic " + topic, extra=d) - clusterConfigsList = systemTestEnv.clusterEntityConfigDictList - tcConfigsList = testcaseEnv.testcaseConfigsList - - zkDictList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigsList, "role", "zookeeper") - firstZkDict = zkDictList[0] - hostname = firstZkDict["hostname"] - zkEntityId = firstZkDict["entity_id"] - clientPort = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", zkEntityId, "clientPort") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "java_home") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - zkQueryStr = "get /brokers/topics/" + topic + "/partitions/" + str(partition) + "/state" - brokerid = '' - leaderEntityId = '' - - cmdStrList = ["ssh " + hostname, - "\"JAVA_HOME=" + javaHome, - kafkaRunClassBin + " kafka.tools.ZooKeeperMainWrapper ", - "-server " + testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"], - zkQueryStr + " 2> /dev/null | tail -1\""] - cmdStr = " ".join(cmdStrList) - logger.info("executing command [" + cmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - if "\"leader\"" in line: - line = line.rstrip('\n') - json_data = json.loads(line) - for key,val in json_data.items(): - if key == 'leader': - brokerid = str(val) - leaderEntityId = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "broker.id", brokerid, "entity_id") - break - return leaderEntityId - -def get_leader_attributes(systemTestEnv, testcaseEnv): - - logger.info("Querying Zookeeper for leader info ...", extra=d) - - # keep track of leader data in this dict such as broker id & entity id - leaderDict = {} - - clusterConfigsList = systemTestEnv.clusterEntityConfigDictList - tcConfigsList = testcaseEnv.testcaseConfigsList - - zkDictList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigsList, "role", "zookeeper") - firstZkDict = zkDictList[0] - hostname = firstZkDict["hostname"] - zkEntityId = firstZkDict["entity_id"] - clientPort = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", zkEntityId, "clientPort") - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigsList, "entity_id", zkEntityId, "java_home") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - - # this should have been updated in start_producer_in_thread - producerTopicsString = testcaseEnv.producerTopicsString - topics = producerTopicsString.split(',') - zkQueryStr = "get /brokers/topics/" + topics[0] + "/partitions/0/state" - brokerid = '' - - cmdStrList = ["ssh " + hostname, - "\"JAVA_HOME=" + javaHome, - kafkaRunClassBin + " kafka.tools.ZooKeeperMainWrapper ", - "-server " + testcaseEnv.userDefinedEnvVarDict["sourceZkConnectStr"], - zkQueryStr + " 2> /dev/null | tail -1\""] - cmdStr = " ".join(cmdStrList) - logger.info("executing command [" + cmdStr + "]", extra=d) - - subproc = system_test_utils.sys_call_return_subproc(cmdStr) - for line in subproc.stdout.readlines(): - if "\"leader\"" in line: - line = line.rstrip('\n') - json_data = json.loads(line) - for key,val in json_data.items(): - if key == 'leader': - brokerid = str(val) - - leaderDict["brokerid"] = brokerid - leaderDict["topic"] = topics[0] - leaderDict["partition"] = '0' - leaderDict["entity_id"] = system_test_utils.get_data_by_lookup_keyval( - tcConfigsList, "broker.id", brokerid, "entity_id") - leaderDict["hostname"] = system_test_utils.get_data_by_lookup_keyval( - clusterConfigsList, "entity_id", leaderDict["entity_id"], "hostname") - break - - print leaderDict - return leaderDict - -def write_consumer_properties(consumerProperties): - import tempfile - props_file_path = tempfile.gettempdir() + "/consumer.properties" - consumer_props_file=open(props_file_path,"w") - for key,value in consumerProperties.iteritems(): - consumer_props_file.write(key+"="+value+"\n") - consumer_props_file.close() - return props_file_path - diff --git a/system_test/utils/metrics.py b/system_test/utils/metrics.py deleted file mode 100644 index 3e663483202..00000000000 --- a/system_test/utils/metrics.py +++ /dev/null @@ -1,298 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# file: metrics.py -# =================================== - -import inspect -import json -import logging -import os -import signal -import subprocess -import sys -import traceback - -import csv -import time -import matplotlib as mpl -mpl.use('Agg') -import matplotlib.pyplot as plt -from collections import namedtuple -import numpy - -from pyh import * -import kafka_system_test_utils -import system_test_utils - -logger = logging.getLogger("namedLogger") -thisClassName = '(metrics)' -d = {'name_of_class': thisClassName} - -attributeNameToNameInReportedFileMap = { - 'Min': 'min', - 'Max': 'max', - 'Mean': 'mean', - '50thPercentile': 'median', - 'StdDev': 'stddev', - '95thPercentile': '95%', - '99thPercentile': '99%', - '999thPercentile': '99.9%', - 'Count': 'count', - 'OneMinuteRate': '1 min rate', - 'MeanRate': 'mean rate', - 'FiveMinuteRate': '5 min rate', - 'FifteenMinuteRate': '15 min rate', - 'Value': 'value' -} - -def getCSVFileNameFromMetricsMbeanName(mbeanName): - return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv" - -def read_metrics_definition(metricsFile): - metricsFileData = open(metricsFile, "r").read() - metricsJsonData = json.loads(metricsFileData) - allDashboards = metricsJsonData['dashboards'] - allGraphs = [] - for dashboard in allDashboards: - dashboardName = dashboard['name'] - graphs = dashboard['graphs'] - for graph in graphs: - bean = graph['bean_name'] - allGraphs.append(graph) - attributes = graph['attributes'] - #print "Filtering on attributes " + attributes - return allGraphs - -def get_dashboard_definition(metricsFile, role): - metricsFileData = open(metricsFile, "r").read() - metricsJsonData = json.loads(metricsFileData) - allDashboards = metricsJsonData['dashboards'] - dashboardsForRole = [] - for dashboard in allDashboards: - if dashboard['role'] == role: - dashboardsForRole.append(dashboard) - return dashboardsForRole - -def ensure_valid_headers(headers, attributes): - if headers[0] != "# time": - raise Exception("First column should be time") - for header in headers: - logger.debug(header, extra=d) - # there should be exactly one column with a name that matches attributes - try: - attributeColumnIndex = headers.index(attributes) - return attributeColumnIndex - except ValueError as ve: - #print "#### attributes : ", attributes - #print "#### headers : ", headers - raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) + - " headers: {0}".format(",".join(headers))) - -def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile): - if not inputCsvFiles: return - - # create empty plot - fig=plt.figure() - fig.subplots_adjust(bottom=0.2) - ax=fig.add_subplot(111) - labelx = -0.3 # axes coords - ax.set_xlabel(xLabel) - ax.set_ylabel(yLabel) - ax.grid() - #ax.yaxis.set_label_coords(labelx, 0.5) - Coordinates = namedtuple("Coordinates", 'x y') - plots = [] - coordinates = [] - # read data for all files, organize by label in a dict - for fileAndLabel in zip(inputCsvFiles, labels): - inputCsvFile = fileAndLabel[0] - label = fileAndLabel[1] - csv_reader = list(csv.reader(open(inputCsvFile, "rb"))) - x,y = [],[] - xticks_labels = [] - try: - # read first line as the headers - headers = csv_reader.pop(0) - attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute]) - logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d) - start_time = (int)(os.path.getctime(inputCsvFile) * 1000) - int(csv_reader[0][0]) - for line in csv_reader: - if(len(line) == 0): - continue - yVal = float(line[attributeColumnIndex]) - xVal = int(line[0]) - y.append(yVal) - epoch= start_time + int(line[0]) - x.append(xVal) - xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch))) - coordinates.append(Coordinates(xVal, yVal)) - p1 = ax.plot(x,y) - plots.append(p1) - except Exception as e: - logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d) - traceback.print_exc() - # find xmin, xmax, ymin, ymax from all csv files - xmin = min(map(lambda coord: coord.x, coordinates)) - xmax = max(map(lambda coord: coord.x, coordinates)) - ymin = min(map(lambda coord: coord.y, coordinates)) - ymax = max(map(lambda coord: coord.y, coordinates)) - # set x and y axes limits - plt.xlim(xmin, xmax) - plt.ylim(ymin, ymax) - # set ticks accordingly - xticks = numpy.arange(xmin, xmax, 0.2*xmax) -# yticks = numpy.arange(ymin, ymax) - plt.xticks(xticks,xticks_labels,rotation=17) -# plt.yticks(yticks) - plt.legend(plots,labels, loc=2) - plt.title(title) - plt.savefig(outputGraphFile) - -def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig): - # go through each role and plot graphs for the role's metrics - roles = set(map(lambda config: config['role'], clusterConfig)) - for role in roles: - dashboards = get_dashboard_definition(metricsDescriptionFile, role) - entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role) - for dashboard in dashboards: - graphs = dashboard['graphs'] - # draw each graph for all entities - draw_graph_for_role(graphs, entities, role, testcaseEnv) - -def draw_graph_for_role(graphs, entities, role, testcaseEnv): - for graph in graphs: - graphName = graph['graph_name'] - yLabel = graph['y_label'] - inputCsvFiles = [] - graphLegendLabels = [] - for entity in entities: - entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics") - entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name']) - if(not os.path.exists(entityMetricCsvFile)): - logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d) - else: - inputCsvFiles.append(entityMetricCsvFile) - graphLegendLabels.append(role + "-" + entity['entity_id']) -# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id']) - try: - # plot one graph per mbean attribute - labels = graph['y_label'].split(',') - fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute, - graph['attributes'].split(',')) - attributes = graph['attributes'].split(',') - for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes): - outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg" - plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2], - "time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile) -# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id']) - except Exception as e: - logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d) - traceback.print_exc() - -def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig): - metricsHtmlFile = testcaseDashboardsDir + "/metrics.html" - centralDashboard = PyH('Kafka Metrics Dashboard') - centralDashboard << h1('Kafka Metrics Dashboard', cl='center') - roles = set(map(lambda config: config['role'], clusterConfig)) - for role in roles: - entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role) - dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role, - entities, testcaseDashboardsDir) - centralDashboard << a(role, href = dashboardPagePath) - centralDashboard << br() - - centralDashboard.printOut(metricsHtmlFile) - -def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir): - # build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer - # consumer - dashboards = get_dashboard_definition(metricsDefinitionFile, role) - entityDashboard = PyH('Kafka Metrics Dashboard for ' + role) - entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center') - entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html" - for dashboard in dashboards: - # place the graph svg files in this dashboard - allGraphs = dashboard['graphs'] - for graph in allGraphs: - attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute, - graph['attributes'].split(',')) - for attribute in attributes: - graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg" - entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml") - entityDashboard.printOut(entityDashboardHtml) - return entityDashboardHtml - -def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv): - logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d) - jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi" - clusterConfig = systemTestEnv.clusterEntityConfigDictList - metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME - entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics") - dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role) - mbeansForRole = get_mbeans_for_role(dashboardsForRole) - - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home") - - for mbean in mbeansForRole: - outputCsvFile = entityMetricsDir + "/" + mbean + ".csv" - startMetricsCmdList = ["ssh " + jmxHost, - "'JAVA_HOME=" + javaHome, - "JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool", - "--jmx-url " + jmxUrl, - "--object-name " + mbean + " 1> ", - outputCsvFile + " & echo pid:$! > ", - entityMetricsDir + "/entity_pid'"] - - startMetricsCommand = " ".join(startMetricsCmdList) - logger.debug("executing command: [" + startMetricsCommand + "]", extra=d) - system_test_utils.async_sys_call(startMetricsCommand) - time.sleep(1) - - pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null" - logger.debug("executing command: [" + pidCmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) - - # keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid - # testcaseEnv.entityJmxParentPidDict: - # key: entity_id - # val: list of JMX ppid associated to that entity_id - # { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... } - for line in subproc.stdout.readlines(): - line = line.rstrip('\n') - logger.debug("line: [" + line + "]", extra=d) - if line.startswith("pid"): - logger.debug("found pid line: [" + line + "]", extra=d) - tokens = line.split(':') - thisPid = tokens[1] - if entityId not in testcaseEnv.entityJmxParentPidDict: - testcaseEnv.entityJmxParentPidDict[entityId] = [] - testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid) - #print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n" - - -def stop_metrics_collection(jmxHost, jmxPort): - logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d) - system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9") - -def get_mbeans_for_role(dashboardsForRole): - graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole)) - return set(map(lambda metric: metric['bean_name'], graphs)) diff --git a/system_test/utils/pyh.py b/system_test/utils/pyh.py deleted file mode 100644 index cff06f4c1c2..00000000000 --- a/system_test/utils/pyh.py +++ /dev/null @@ -1,161 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# @file: pyh.py -# @purpose: a HTML tag generator -# @author: Emmanuel Turlay - -__doc__ = """The pyh.py module is the core of the PyH package. PyH lets you -generate HTML tags from within your python code. -See http://code.google.com/p/pyh/ for documentation. -""" -__author__ = "Emmanuel Turlay " -__version__ = '$Revision: 63 $' -__date__ = '$Date: 2010-05-21 03:09:03 +0200 (Fri, 21 May 2010) $' - -from sys import _getframe, stdout, modules, version -nOpen={} - -nl = '\n' -doctype = '\n' -charset = '\n' - -tags = ['html', 'body', 'head', 'link', 'meta', 'div', 'p', 'form', 'legend', - 'input', 'select', 'span', 'b', 'i', 'option', 'img', 'script', - 'table', 'tr', 'td', 'th', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', - 'fieldset', 'a', 'title', 'body', 'head', 'title', 'script', 'br', 'table', - 'ul', 'li', 'ol', 'embed'] - -selfClose = ['input', 'img', 'link', 'br'] - -class Tag(list): - tagname = '' - - def __init__(self, *arg, **kw): - self.attributes = kw - if self.tagname : - name = self.tagname - self.isSeq = False - else: - name = 'sequence' - self.isSeq = True - self.id = kw.get('id', name) - #self.extend(arg) - for a in arg: self.addObj(a) - - def __iadd__(self, obj): - if isinstance(obj, Tag) and obj.isSeq: - for o in obj: self.addObj(o) - else: self.addObj(obj) - return self - - def addObj(self, obj): - if not isinstance(obj, Tag): obj = str(obj) - id=self.setID(obj) - setattr(self, id, obj) - self.append(obj) - - def setID(self, obj): - if isinstance(obj, Tag): - id = obj.id - n = len([t for t in self if isinstance(t, Tag) and t.id.startswith(id)]) - else: - id = 'content' - n = len([t for t in self if not isinstance(t, Tag)]) - if n: id = '%s_%03i' % (id, n) - if isinstance(obj, Tag): obj.id = id - return id - - def __add__(self, obj): - if self.tagname: return Tag(self, obj) - self.addObj(obj) - return self - - def __lshift__(self, obj): - self += obj - if isinstance(obj, Tag): return obj - - def render(self): - result = '' - if self.tagname: - result = '<%s%s%s>' % (self.tagname, self.renderAtt(), self.selfClose()*' /') - if not self.selfClose(): - for c in self: - if isinstance(c, Tag): - result += c.render() - else: result += c - if self.tagname: - result += '' % self.tagname - result += '\n' - return result - - def renderAtt(self): - result = '' - for n, v in self.attributes.iteritems(): - if n != 'txt' and n != 'open': - if n == 'cl': n = 'class' - result += ' %s="%s"' % (n, v) - return result - - def selfClose(self): - return self.tagname in selfClose - -def TagFactory(name): - class f(Tag): - tagname = name - f.__name__ = name - return f - -thisModule = modules[__name__] - -for t in tags: setattr(thisModule, t, TagFactory(t)) - -def ValidW3C(): - out = a(img(src='http://www.w3.org/Icons/valid-xhtml10', alt='Valid XHTML 1.0 Strict'), href='http://validator.w3.org/check?uri=referer') - return out - -class PyH(Tag): - tagname = 'html' - - def __init__(self, name='MyPyHPage'): - self += head() - self += body() - self.attributes = dict(xmlns='http://www.w3.org/1999/xhtml', lang='en') - self.head += title(name) - - def __iadd__(self, obj): - if isinstance(obj, head) or isinstance(obj, body): self.addObj(obj) - elif isinstance(obj, meta) or isinstance(obj, link): self.head += obj - else: - self.body += obj - id=self.setID(obj) - setattr(self, id, obj) - return self - - def addJS(self, *arg): - for f in arg: self.head += script(type='text/javascript', src=f) - - def addCSS(self, *arg): - for f in arg: self.head += link(rel='stylesheet', type='text/css', href=f) - - def printOut(self,file=''): - if file: f = open(file, 'w') - else: f = stdout - f.write(doctype) - f.write(self.render()) - f.flush() - if file: f.close() - diff --git a/system_test/utils/replication_utils.py b/system_test/utils/replication_utils.py deleted file mode 100644 index cfd80b2d8be..00000000000 --- a/system_test/utils/replication_utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# ================================================================= -# replication_utils.py -# - This module defines constant values specific to Kafka Replication -# and also provides helper functions for Replication system test. -# ================================================================= - -import logging -import sys - -class ReplicationUtils(object): - - thisClassName = '(ReplicationUtils)' - d = {'name_of_class': thisClassName} - - logger = logging.getLogger("namedLogger") - anonLogger = logging.getLogger("anonymousLogger") - - def __init__(self, testClassInstance): - super(ReplicationUtils, self).__init__() - self.logger.debug("#### constructor inside ReplicationUtils", extra=self.d) - - # leader attributes - self.isLeaderLogPattern = "Completed the leader state transition" - self.brokerShutDownCompletedPattern = "shut down completed" - - self.leaderAttributesDict = {} - - self.leaderAttributesDict["BROKER_SHUT_DOWN_COMPLETED_MSG"] = \ - self.brokerShutDownCompletedPattern - - self.leaderAttributesDict["REGX_BROKER_SHUT_DOWN_COMPLETED_PATTERN"] = \ - "\[(.*?)\] .* \[Kafka Server (.*?)\], " + \ - self.brokerShutDownCompletedPattern - - self.leaderAttributesDict["LEADER_ELECTION_COMPLETED_MSG"] = \ - self.isLeaderLogPattern - - self.leaderAttributesDict["REGX_LEADER_ELECTION_PATTERN"] = \ - "\[(.*?)\] .* Broker (.*?): " + \ - self.leaderAttributesDict["LEADER_ELECTION_COMPLETED_MSG"] + \ - " for topic (.*?) partition (.*?) \(.*" - - # Controller attributes - self.isControllerLogPattern = "Controller startup complete" - self.controllerAttributesDict = {} - self.controllerAttributesDict["CONTROLLER_STARTUP_COMPLETE_MSG"] = self.isControllerLogPattern - self.controllerAttributesDict["REGX_CONTROLLER_STARTUP_PATTERN"] = "\[(.*?)\] .* \[Controller (.*?)\]: " + \ - self.controllerAttributesDict["CONTROLLER_STARTUP_COMPLETE_MSG"] - - # Data Loss Percentage Threshold in Ack = 1 cases - self.ackOneDataLossThresholdPercent = 5.0 - diff --git a/system_test/utils/setup_utils.py b/system_test/utils/setup_utils.py deleted file mode 100644 index 0e8b7f97287..00000000000 --- a/system_test/utils/setup_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# ================================================================= -# setup_utils.py -# - This module provides some basic helper functions. -# ================================================================= - -import logging -import kafka_system_test_utils -import sys - -class SetupUtils(object): - - # dict to pass user-defined attributes to logger argument: "extra" - # to use: just update "thisClassName" to the appropriate value - thisClassName = '(ReplicaBasicTest)' - d = {'name_of_class': thisClassName} - - logger = logging.getLogger("namedLogger") - anonLogger = logging.getLogger("anonymousLogger") - - def __init__(self): - d = {'name_of_class': self.__class__.__name__} - self.logger.debug("#### constructor inside SetupUtils", extra=self.d) - - def log_message(self, message): - print - self.anonLogger.info("======================================================") - self.anonLogger.info(message) - self.anonLogger.info("======================================================") - diff --git a/system_test/utils/system_test_utils.py b/system_test/utils/system_test_utils.py deleted file mode 100644 index e8529cd31f9..00000000000 --- a/system_test/utils/system_test_utils.py +++ /dev/null @@ -1,638 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# system_test_utils.py -# =================================== - -import copy -import difflib -import inspect -import json -import logging -import os -import re -import signal -import socket -import subprocess -import sys -import time - -logger = logging.getLogger("namedLogger") -aLogger = logging.getLogger("anonymousLogger") -thisClassName = '(system_test_utils)' -d = {'name_of_class': thisClassName} - - -def get_current_unix_timestamp(): - ts = time.time() - return "{0:.6f}".format(ts) - - -def get_local_hostname(): - return socket.gethostname() - - -def sys_call(cmdStr): - output = "" - #logger.info("executing command [" + cmdStr + "]", extra=d) - p = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - for line in p.stdout.readlines(): - output += line - return output - - -def remote_async_sys_call(host, cmd): - cmdStr = "ssh " + host + " \"" + cmd + "\"" - logger.info("executing command [" + cmdStr + "]", extra=d) - async_sys_call(cmdStr) - - -def remote_sys_call(host, cmd): - cmdStr = "ssh " + host + " \"" + cmd + "\"" - logger.info("executing command [" + cmdStr + "]", extra=d) - sys_call(cmdStr) - - -def get_dir_paths_with_prefix(fullPath, dirNamePrefix): - dirsList = [] - for dirName in os.listdir(fullPath): - if not os.path.isfile(dirName) and dirName.startswith(dirNamePrefix): - dirsList.append(os.path.abspath(fullPath + "/" + dirName)) - return dirsList - - -def get_testcase_prop_json_pathname(testcasePathName): - testcaseDirName = os.path.basename(testcasePathName) - return testcasePathName + "/" + testcaseDirName + "_properties.json" - -def get_json_list_data(infile): - json_file_str = open(infile, "r").read() - json_data = json.loads(json_file_str) - data_list = [] - - for key,settings in json_data.items(): - if type(settings) == list: - for setting in settings: - if type(setting) == dict: - kv_dict = {} - for k,v in setting.items(): - kv_dict[k] = v - data_list.append(kv_dict) - - return data_list - - -def get_dict_from_list_of_dicts(listOfDicts, lookupKey, lookupVal): - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'} - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'} - # - # Usage: - # - # 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role") - # returns: - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'} - # - # 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role") - # returns: - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'} - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'} - - retList = [] - if ( lookupVal is None or lookupKey is None ): - for dict in listOfDicts: - for k,v in dict.items(): - if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY - retList.append( dict ) - else: - for dict in listOfDicts: - for k,v in dict.items(): - if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal - retList.append( dict ) - - return retList - - -def get_data_from_list_of_dicts(listOfDicts, lookupKey, lookupVal, fieldToRetrieve): - # Sample List of Dicts: - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'} - # {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'} - # - # Usage: - # 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role") - # => returns ['zookeeper'] - # 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role") - # => returns ['zookeeper', 'broker'] - - retList = [] - if ( lookupVal is None or lookupKey is None ): - for dict in listOfDicts: - for k,v in dict.items(): - if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY - try: - retList.append( dict[fieldToRetrieve] ) - except: - logger.debug("field not found: " + fieldToRetrieve, extra=d) - else: - for dict in listOfDicts: - for k,v in dict.items(): - if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal - try: - retList.append( dict[fieldToRetrieve] ) - except: - logger.debug("field not found: " + fieldToRetrieve, extra=d) - return retList - -def get_data_by_lookup_keyval(listOfDict, lookupKey, lookupVal, fieldToRetrieve): - returnValue = "" - returnValuesList = get_data_from_list_of_dicts(listOfDict, lookupKey, lookupVal, fieldToRetrieve) - if len(returnValuesList) > 0: - returnValue = returnValuesList[0] - - return returnValue - -def get_json_dict_data(infile): - json_file_str = open(infile, "r").read() - json_data = json.loads(json_file_str) - data_dict = {} - - for key,val in json_data.items(): - if ( type(val) != list ): - data_dict[key] = val - - return data_dict - -def get_remote_child_processes(hostname, pid): - pidStack = [] - - cmdList = ['''ssh ''' + hostname, - ''''pid=''' + pid + '''; prev_pid=""; echo $pid;''', - '''while [[ "x$pid" != "x" ]];''', - '''do prev_pid=$pid;''', - ''' for child in $(ps -o pid,ppid ax | awk "{ if ( \$2 == $pid ) { print \$1 }}");''', - ''' do echo $child; pid=$child;''', - ''' done;''', - ''' if [ $prev_pid == $pid ]; then''', - ''' break;''', - ''' fi;''', - '''done' 2> /dev/null'''] - - cmdStr = " ".join(cmdList) - logger.debug("executing command [" + cmdStr, extra=d) - - subproc = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE) - for line in subproc.stdout.readlines(): - procId = line.rstrip('\n') - pidStack.append(procId) - return pidStack - -def get_child_processes(pid): - pidStack = [] - currentPid = pid - parentPid = "" - pidStack.append(pid) - - while ( len(currentPid) > 0 ): - psCommand = subprocess.Popen("ps -o pid --ppid %s --noheaders" % currentPid, shell=True, stdout=subprocess.PIPE) - psOutput = psCommand.stdout.read() - outputLine = psOutput.rstrip('\n') - childPid = outputLine.lstrip() - - if ( len(childPid) > 0 ): - pidStack.append(childPid) - currentPid = childPid - else: - break - return pidStack - -def sigterm_remote_process(hostname, pidStack): - - while ( len(pidStack) > 0 ): - pid = pidStack.pop() - cmdStr = "ssh " + hostname + " 'kill -15 " + pid + "'" - - try: - logger.debug("executing command [" + cmdStr + "]", extra=d) - sys_call_return_subproc(cmdStr) - except: - print "WARN - pid:",pid,"not found" - raise - -def sigkill_remote_process(hostname, pidStack): - - while ( len(pidStack) > 0 ): - pid = pidStack.pop() - cmdStr = "ssh " + hostname + " 'kill -9 " + pid + "'" - - try: - logger.debug("executing command [" + cmdStr + "]", extra=d) - sys_call_return_subproc(cmdStr) - except: - print "WARN - pid:",pid,"not found" - raise - -def simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTimeInSeconds): - pausedPidStack = [] - - # pause the processes - while len(pidStack) > 0: - pid = pidStack.pop() - pausedPidStack.append(pid) - cmdStr = "ssh " + hostname + " 'kill -SIGSTOP " + pid + "'" - - try: - logger.debug("executing command [" + cmdStr + "]", extra=d) - sys_call_return_subproc(cmdStr) - except: - print "WARN - pid:",pid,"not found" - raise - - time.sleep(int(pauseTimeInSeconds)) - - # resume execution of the processes - while len(pausedPidStack) > 0: - pid = pausedPidStack.pop() - cmdStr = "ssh " + hostname + " 'kill -SIGCONT " + pid + "'" - - try: - logger.debug("executing command [" + cmdStr + "]", extra=d) - sys_call_return_subproc(cmdStr) - except: - print "WARN - pid:",pid,"not found" - raise - -def terminate_process(pidStack): - while ( len(pidStack) > 0 ): - pid = pidStack.pop() - try: - os.kill(int(pid), signal.SIGTERM) - except: - print "WARN - pid:",pid,"not found" - raise - - -def convert_keyval_to_cmd_args(configFilePathname): - cmdArg = "" - inlines = open(configFilePathname, "r").readlines() - for inline in inlines: - line = inline.rstrip() - tokens = line.split('=', 1) - - if (len(tokens) == 2): - cmdArg = cmdArg + " --" + tokens[0] + " " + tokens[1] - elif (len(tokens) == 1): - cmdArg = cmdArg + " --" + tokens[0] - else: - print "ERROR: unexpected arguments list", line - return cmdArg - - -def async_sys_call(cmd_str): - subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - -def sys_call_return_subproc(cmd_str): - p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - return p - - -def remote_host_file_exists(hostname, pathname): - cmdStr = "ssh " + hostname + " 'ls " + pathname + "'" - logger.debug("executing command: [" + cmdStr + "]", extra=d) - subproc = sys_call_return_subproc(cmdStr) - - for line in subproc.stdout.readlines(): - if "No such file or directory" in line: - return False - return True - - -def remote_host_directory_exists(hostname, path): - cmdStr = "ssh " + hostname + " 'ls -d " + path + "'" - logger.debug("executing command: [" + cmdStr + "]", extra=d) - subproc = sys_call_return_subproc(cmdStr) - - for line in subproc.stdout.readlines(): - if "No such file or directory" in line: - return False - return True - - -def remote_host_processes_stopped(hostname): - cmdStr = "ssh " + hostname + \ - " \"ps auxw | grep -v grep | grep -v Bootstrap | grep -i 'java\|run\-\|producer\|consumer\|jmxtool\|kafka' | wc -l\" 2> /dev/null" - - logger.info("executing command: [" + cmdStr + "]", extra=d) - subproc = sys_call_return_subproc(cmdStr) - - for line in subproc.stdout.readlines(): - line = line.rstrip('\n') - logger.info("no. of running processes found : [" + line + "]", extra=d) - if line == '0': - return True - return False - - -def setup_remote_hosts(systemTestEnv): - # sanity check on remote hosts to make sure: - # - all directories (eg. java_home) specified in cluster_config.json exists in all hosts - # - no conflicting running processes in remote hosts - - aLogger.info("=================================================") - aLogger.info("setting up remote hosts ...") - aLogger.info("=================================================") - - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList - - localKafkaHome = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/..") - - # when configuring "default" java_home, use JAVA_HOME environment variable, if exists - # otherwise, use the directory with the java binary - localJavaHome = os.environ.get('JAVA_HOME') - if localJavaHome is not None: - localJavaBin = localJavaHome + '/bin/java' - else: - subproc = sys_call_return_subproc("which java") - for line in subproc.stdout.readlines(): - if line.startswith("which: no "): - logger.error("No Java binary found in local host", extra=d) - return False - else: - line = line.rstrip('\n') - localJavaBin = line - matchObj = re.match("(.*)\/bin\/java$", line) - localJavaHome = matchObj.group(1) - - listIndex = -1 - for clusterEntityConfigDict in clusterEntityConfigDictList: - listIndex += 1 - - hostname = clusterEntityConfigDict["hostname"] - kafkaHome = clusterEntityConfigDict["kafka_home"] - javaHome = clusterEntityConfigDict["java_home"] - - if hostname == "localhost" and javaHome == "default": - clusterEntityConfigDictList[listIndex]["java_home"] = localJavaHome - - if hostname == "localhost" and kafkaHome == "default": - clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome - if hostname == "localhost" and kafkaHome == "system_test/migration_tool_testsuite/0.7": - clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome + "/system_test/migration_tool_testsuite/0.7" - - kafkaHome = clusterEntityConfigDict["kafka_home"] - javaHome = clusterEntityConfigDict["java_home"] - - logger.debug("checking java binary [" + localJavaBin + "] in host [" + hostname + "]", extra=d) - if not remote_host_directory_exists(hostname, javaHome): - logger.error("Directory not found: [" + javaHome + "] in host [" + hostname + "]", extra=d) - return False - - logger.debug("checking directory [" + kafkaHome + "] in host [" + hostname + "]", extra=d) - if not remote_host_directory_exists(hostname, kafkaHome): - logger.info("Directory not found: [" + kafkaHome + "] in host [" + hostname + "]", extra=d) - if hostname == "localhost": - return False - else: - localKafkaSourcePath = systemTestEnv.SYSTEM_TEST_BASE_DIR + "/.." - logger.debug("copying local copy of [" + localKafkaSourcePath + "] to " + hostname + ":" + kafkaHome, extra=d) - copy_source_to_remote_hosts(hostname, localKafkaSourcePath, kafkaHome) - - return True - -def copy_source_to_remote_hosts(hostname, sourceDir, destDir): - - cmdStr = "rsync -avz --delete-before " + sourceDir + "/ " + hostname + ":" + destDir - logger.info("executing command [" + cmdStr + "]", extra=d) - subproc = sys_call_return_subproc(cmdStr) - - for line in subproc.stdout.readlines(): - dummyVar = 1 - - -def remove_kafka_home_dir_at_remote_hosts(hostname, kafkaHome): - - if remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"): - cmdStr = "ssh " + hostname + " 'chmod -R 777 " + kafkaHome + "'" - logger.info("executing command [" + cmdStr + "]", extra=d) - sys_call(cmdStr) - - cmdStr = "ssh " + hostname + " 'rm -rf " + kafkaHome + "'" - logger.info("executing command [" + cmdStr + "]", extra=d) - #sys_call(cmdStr) - else: - logger.warn("possible destructive command [" + cmdStr + "]", extra=d) - logger.warn("check config file: system_test/cluster_config.properties", extra=d) - logger.warn("aborting test...", extra=d) - sys.exit(1) - -def get_md5_for_file(filePathName, blockSize=8192): - md5 = hashlib.md5() - f = open(filePathName, 'rb') - - while True: - data = f.read(blockSize) - if not data: - break - md5.update(data) - return md5.digest() - -def load_cluster_config(clusterConfigPathName, clusterEntityConfigDictList): - # empty the list - clusterEntityConfigDictList[:] = [] - - # retrieve each entity's data from cluster config json file - # as "dict" and enter them into a "list" - jsonFileContent = open(clusterConfigPathName, "r").read() - jsonData = json.loads(jsonFileContent) - for key, cfgList in jsonData.items(): - if key == "cluster_config": - for cfg in cfgList: - clusterEntityConfigDictList.append(cfg) - -def setup_remote_hosts_with_testcase_level_cluster_config(systemTestEnv, testCasePathName): - # ======================================================================= - # starting a new testcase, check for local cluster_config.json - # ======================================================================= - # 1. if there is a xxxx_testsuite/testcase_xxxx/cluster_config.json - # => load it into systemTestEnv.clusterEntityConfigDictList - # 2. if there is NO testcase_xxxx/cluster_config.json but has a xxxx_testsuite/cluster_config.json - # => retore systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite - # 3. if there is NO testcase_xxxx/cluster_config.json NOR xxxx_testsuite/cluster_config.json - # => restore system_test/cluster_config.json - - testCaseLevelClusterConfigPathName = testCasePathName + "/cluster_config.json" - - if os.path.isfile(testCaseLevelClusterConfigPathName): - # if there is a cluster_config.json in this directory, load it and use it for this testsuite - logger.info("found a new cluster_config : " + testCaseLevelClusterConfigPathName, extra=d) - - # empty the current cluster config list - systemTestEnv.clusterEntityConfigDictList[:] = [] - - # load the cluster config for this testcase level - load_cluster_config(testCaseLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList) - - # back up this testcase level cluster config - systemTestEnv.clusterEntityConfigDictListLastFoundInTestCase = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList) - - elif len(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) > 0: - # if there is NO testcase_xxxx/cluster_config.json, but has a xxxx_testsuite/cluster_config.json - # => restore the config in xxxx_testsuite/cluster_config.json - - # empty the current cluster config list - systemTestEnv.clusterEntityConfigDictList[:] = [] - - # restore the system_test/cluster_config.json - systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) - - else: - # if there is NONE, restore the config in system_test/cluster_config.json - - # empty the current cluster config list - systemTestEnv.clusterEntityConfigDictList[:] = [] - - # restore the system_test/cluster_config.json - systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel) - - # set up remote hosts - if not setup_remote_hosts(systemTestEnv): - logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d) - print - sys.exit(1) - print - -def setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName): - # ======================================================================= - # starting a new testsuite, check for local cluster_config.json: - # ======================================================================= - # 1. if there is a xxxx_testsuite/cluster_config.son - # => load it into systemTestEnv.clusterEntityConfigDictList - # 2. if there is NO xxxx_testsuite/cluster_config.son - # => restore system_test/cluster_config.json - - testSuiteLevelClusterConfigPathName = testModulePathName + "/cluster_config.json" - - if os.path.isfile(testSuiteLevelClusterConfigPathName): - # if there is a cluster_config.json in this directory, load it and use it for this testsuite - logger.info("found a new cluster_config : " + testSuiteLevelClusterConfigPathName, extra=d) - - # empty the current cluster config list - systemTestEnv.clusterEntityConfigDictList[:] = [] - - # load the cluster config for this testsuite level - load_cluster_config(testSuiteLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList) - - # back up this testsuite level cluster config - systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList) - - else: - # if there is NONE, restore the config in system_test/cluster_config.json - - # empty the last testsuite level cluster config list - systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite[:] = [] - - # empty the current cluster config list - systemTestEnv.clusterEntityConfigDictList[:] = [] - - # restore the system_test/cluster_config.json - systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel) - - # set up remote hosts - if not setup_remote_hosts(systemTestEnv): - logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d) - print - sys.exit(1) - print - -# ================================================= -# lists_diff_count -# - find the no. of different items in both lists -# - both lists need not be sorted -# - input lists won't be changed -# ================================================= -def lists_diff_count(a, b): - c = list(b) - d = [] - for item in a: - try: - c.remove(item) - except: - d.append(item) - - if len(d) > 0: - print "#### Mismatch MessageID" - print d - - return len(c) + len(d) - -# ================================================= -# subtract_list -# - subtract items in listToSubtract from mainList -# and return the resulting list -# - both lists need not be sorted -# - input lists won't be changed -# ================================================= -def subtract_list(mainList, listToSubtract): - remainingList = list(mainList) - for item in listToSubtract: - try: - remainingList.remove(item) - except: - pass - return remainingList - -# ================================================= -# diff_lists -# - find the diff of 2 lists and return the -# total no. of mismatch from both lists -# - diff of both lists includes: -# - no. of items mismatch -# - ordering of the items -# -# sample lists: -# a = ['8','4','3','2','1'] -# b = ['8','3','4','2','1'] -# -# difflib will return the following: -# 8 -# + 3 -# 4 -# - 3 -# 2 -# 1 -# -# diff_lists(a,b) returns 2 and prints the following: -# #### only in seq 2 : + 3 -# #### only in seq 1 : - 3 -# ================================================= -def diff_lists(a, b): - mismatchCount = 0 - d = difflib.Differ() - diff = d.compare(a,b) - - for item in diff: - result = item[0:1].strip() - if len(result) > 0: - mismatchCount += 1 - if '-' in result: - logger.debug("#### only in seq 1 : " + item, extra=d) - elif '+' in result: - logger.debug("#### only in seq 2 : " + item, extra=d) - - return mismatchCount - diff --git a/system_test/utils/testcase_env.py b/system_test/utils/testcase_env.py deleted file mode 100644 index 1d2fb5762f5..00000000000 --- a/system_test/utils/testcase_env.py +++ /dev/null @@ -1,173 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/usr/bin/env python - -# =================================== -# testcase_env.py -# =================================== - -import json -import os -import sys -import thread - -import system_test_utils - -class TestcaseEnv(): - def __init__(self, systemTestEnv, classInstance): - self.systemTestEnv = systemTestEnv - - # ================================ - # Generic testcase environment - # ================================ - - # dictionary of entity_id to ppid for Zookeeper entities - # key: entity_id - # val: ppid of Zookeeper associated to that entity_id - # { 0: 12345, 1: 12389, ... } - self.entityZkParentPidDict = {} - - # dictionary of entity_id to ppid for broker entities - # key: entity_id - # val: ppid of broker associated to that entity_id - # { 0: 12345, 1: 12389, ... } - self.entityBrokerParentPidDict = {} - - # dictionary of entity_id to ppid for mirror-maker entities - # key: entity_id - # val: ppid of broker associated to that entity_id - # { 0: 12345, 1: 12389, ... } - self.entityMirrorMakerParentPidDict = {} - - # dictionary of entity_id to ppid for console-consumer entities - # key: entity_id - # val: ppid of console consumer associated to that entity_id - # { 0: 12345, 1: 12389, ... } - self.entityConsoleConsumerParentPidDict = {} - - # dictionary of entity_id to ppid for migration tool entities - # key: entity_id - # val: ppid of broker associated to that entity_id - # { 0: 12345, 1: 12389, ... } - self.entityMigrationToolParentPidDict = {} - - # dictionary of entity_id to list of JMX ppid - # key: entity_id - # val: list of JMX ppid associated to that entity_id - # { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... } - self.entityJmxParentPidDict = {} - - # dictionary of hostname-topic-ppid for consumer - # key: hostname - # val: dict of topic-ppid - # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } - self.consumerHostParentPidDict = {} - - # dictionary of hostname-topic-ppid for producer - # key: hostname - # val: dict of topic-ppid - # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } - self.producerHostParentPidDict = {} - - # list of testcase configs - self.testcaseConfigsList = [] - - # dictionary to keep track of testcase arguments such as replica_factor, num_partition - self.testcaseArgumentsDict = {} - - - # gather the test case related info and add to an SystemTestEnv object - self.testcaseResultsDict = {} - self.testcaseResultsDict["_test_class_name"] = classInstance.__class__.__name__ - self.testcaseResultsDict["_test_case_name"] = "" - self.validationStatusDict = {} - self.testcaseResultsDict["validation_status"] = self.validationStatusDict - self.systemTestEnv.systemTestResultsList.append(self.testcaseResultsDict) - - # FIXME: in a distributed environement, kafkaBaseDir could be different in individual host - # => TBD - self.kafkaBaseDir = "" - - self.systemTestBaseDir = systemTestEnv.SYSTEM_TEST_BASE_DIR - - # to be initialized in the Test Module - self.testSuiteBaseDir = "" - self.testCaseBaseDir = "" - self.testCaseLogsDir = "" - self.testCaseDashboardsDir = "" - self.testcasePropJsonPathName = "" - self.testcaseNonEntityDataDict = {} - - # ================================ - # dictionary to keep track of - # user-defined environment variables - # ================================ - # LEADER_ELECTION_COMPLETED_MSG = "completed the leader state transition" - # REGX_LEADER_ELECTION_PATTERN = "\[(.*?)\] .* Broker (.*?) " + \ - # LEADER_ELECTION_COMPLETED_MSG + \ - # " for topic (.*?) partition (.*?) \(.*" - # zkConnectStr = "" - # consumerLogPathName = "" - # consumerConfigPathName = "" - # producerLogPathName = "" - # producerConfigPathName = "" - self.userDefinedEnvVarDict = {} - - # Lock object for producer threads synchronization - self.lock = thread.allocate_lock() - - self.numProducerThreadsRunning = 0 - - # to be used when validating data match - these variables will be - # updated by kafka_system_test_utils.start_producer_in_thread - self.producerTopicsString = "" - self.consumerTopicsString = "" - - def initWithKnownTestCasePathName(self, testCasePathName): - testcaseDirName = os.path.basename(testCasePathName) - self.testcaseResultsDict["_test_case_name"] = testcaseDirName - self.testCaseBaseDir = testCasePathName - self.testCaseLogsDir = self.testCaseBaseDir + "/logs" - self.testCaseDashboardsDir = self.testCaseBaseDir + "/dashboards" - - # find testcase properties json file - self.testcasePropJsonPathName = system_test_utils.get_testcase_prop_json_pathname(testCasePathName) - - # get the dictionary that contains the testcase arguments and description - self.testcaseNonEntityDataDict = system_test_utils.get_json_dict_data(self.testcasePropJsonPathName) - - def printTestCaseDescription(self, testcaseDirName): - testcaseDescription = "" - for k,v in self.testcaseNonEntityDataDict.items(): - if ( k == "description" ): - testcaseDescription = v - - print "\n" - print "=======================================================================================" - print "Test Case Name :", testcaseDirName - print "=======================================================================================" - print "Description :" - for step in sorted(testcaseDescription.iterkeys()): - print " ", step, ":", testcaseDescription[step] - print "=======================================================================================" - print "Test Case Args :" - for k,v in self.testcaseArgumentsDict.items(): - print " ", k, " : ", v - self.testcaseResultsDict["arg : " + k] = v - print "=======================================================================================" - -