MINOR: Next release will be 1.0.0

Author: Ismael Juma <ismael@juma.me.uk>

Reviewers: Guozhang Wang <wangguoz@gmail.com>

Closes #3580 from ijuma/bump-to-1.0.0-SNAPSHOT
This commit is contained in:
Ismael Juma 2017-07-26 13:01:41 -07:00 committed by Guozhang Wang
parent f8498ec9e2
commit 5effe72390
7 changed files with 21 additions and 31 deletions

View File

@ -71,8 +71,8 @@ object ApiVersion {
"0.11.0-IV2" -> KAFKA_0_11_0_IV2,
"0.11.0" -> KAFKA_0_11_0_IV2,
// Introduced LeaderAndIsrRequest V1, UpdateMetadataRequest V4 and FetchRequest V6 via KIP-112
"0.11.1-IV0" -> KAFKA_0_11_1_IV0,
"0.11.1" -> KAFKA_0_11_1_IV0
"1.0-IV0" -> KAFKA_1_0_IV0,
"1.0" -> KAFKA_1_0_IV0
)
private val versionPattern = "\\.".r
@ -175,8 +175,8 @@ case object KAFKA_0_11_0_IV2 extends ApiVersion {
val id: Int = 12
}
case object KAFKA_0_11_1_IV0 extends ApiVersion {
val version: String = "0.11.1-IV0"
case object KAFKA_1_0_IV0 extends ApiVersion {
val version: String = "1.0-IV0"
val messageFormatVersion: Byte = RecordBatch.MAGIC_VALUE_V2
val id: Int = 13
}

View File

@ -394,7 +394,7 @@ class ControllerBrokerRequestBatch(controller: KafkaController) extends Logging
def sendRequestsToBrokers(controllerEpoch: Int) {
try {
val leaderAndIsrRequestVersion: Short =
if (controller.config.interBrokerProtocolVersion >= KAFKA_0_11_1_IV0) 1
if (controller.config.interBrokerProtocolVersion >= KAFKA_1_0_IV0) 1
else 0
leaderAndIsrRequestMap.foreach { case (broker, leaderAndIsrPartitionStates) =>
@ -425,7 +425,7 @@ class ControllerBrokerRequestBatch(controller: KafkaController) extends Logging
val partitionStates = Map(updateMetadataRequestPartitionInfoMap.toArray:_*)
val updateMetadataRequestVersion: Short =
if (controller.config.interBrokerProtocolVersion >= KAFKA_0_11_1_IV0) 4
if (controller.config.interBrokerProtocolVersion >= KAFKA_1_0_IV0) 4
else if (controller.config.interBrokerProtocolVersion >= KAFKA_0_10_2_IV0) 3
else if (controller.config.interBrokerProtocolVersion >= KAFKA_0_10_0_IV1) 2
else if (controller.config.interBrokerProtocolVersion >= KAFKA_0_9_0) 1

View File

@ -300,7 +300,7 @@ class ReplicaManager(val config: KafkaConfig,
// A follower can lag behind leader for up to config.replicaLagTimeMaxMs x 1.5 before it is removed from ISR
scheduler.schedule("isr-expiration", maybeShrinkIsr _, period = config.replicaLagTimeMaxMs / 2, unit = TimeUnit.MILLISECONDS)
scheduler.schedule("isr-change-propagation", maybePropagateIsrChanges _, period = 2500L, unit = TimeUnit.MILLISECONDS)
val haltBrokerOnFailure = config.interBrokerProtocolVersion < KAFKA_0_11_1_IV0
val haltBrokerOnFailure = config.interBrokerProtocolVersion < KAFKA_1_0_IV0
logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler", haltBrokerOnFailure)
logDirFailureHandler.start()
}

View File

@ -17,9 +17,9 @@
<script><!--#include virtual="js/templateData.js" --></script>
<h4><a id="upgrade_11_1_0" href="#upgrade_11_1_0">Upgrading from 0.8.x, 0.9.x, 0.10.0.x, 0.10.1.x, 0.10.2.x or 0.11.0.0 to 0.11.1.0</a></h4>
<p>Kafka 0.11.1.0 introduces wire protocol changes. By following the recommended rolling upgrade plan below,
you guarantee no downtime during the upgrade. However, please review the <a href="#upgrade_1110_notable">notable changes in 0.11.1.0</a> before upgrading.
<h4><a id="upgrade_1_0_0" href="#upgrade_1_0_0">Upgrading from 0.8.x, 0.9.x, 0.10.0.x, 0.10.1.x, 0.10.2.x or 0.11.0.x to 1.0.0</a></h4>
<p>Kafka 1.0.0 introduces wire protocol changes. By following the recommended rolling upgrade plan below,
you guarantee no downtime during the upgrade. However, please review the <a href="#upgrade_100_notable">notable changes in 1.0.0</a> before upgrading.
</p>
<p><b>For a rolling upgrade:</b></p>
@ -33,7 +33,7 @@
</ul>
</li>
<li> Upgrade the brokers one at a time: shut down the broker, update the code, and restart it. </li>
<li> Once the entire cluster is upgraded, bump the protocol version by editing <code>inter.broker.protocol.version</code> and setting it to 0.11.1.
<li> Once the entire cluster is upgraded, bump the protocol version by editing <code>inter.broker.protocol.version</code> and setting it to 1.0.0.
<li> Restart the brokers one by one for the new protocol version to take effect. </li>
</ol>
@ -46,8 +46,12 @@
Similarly for the message format version.</li>
</ol>
<h5><a id="upgrade_1110_notable" href="#upgrade_1100_notable">Notable changes in 0.11.1.0</a></h5>
<h5><a id="upgrade_100_notable" href="#upgrade_100_notable">Notable changes in 1.0.0</a></h5>
<ul>
<li>Topic deletion is now enabled by default, since the functionality is now stable. Users who wish to
to retain the previous behavior should set the broker config <code>delete.topic.enable</code> to <code>false</code>. Keep in mind that topic deletion removes data and the operation is not reversible (i.e. there is no "undelete" operation)</li>
<li>For topics that support timestamp search if no offset can be found for a partition, that partition is now included in the search result with a null offset value. Previously, the partition was not included in the map.
This change was made to make the search behavior consistent with the case of topics not supporting timestamp search.
<li>If the <code>inter.broker.protocol.version</code> is 0.11.1 or later, a broker will now stay online to serve replicas
on live log directories even if there are offline log directories. A log directory may become offline due to IOException
caused by hardware failure. Users need to monitor the per-broker metric <code>offlineLogDirectoryCount</code> to check
@ -56,7 +60,7 @@
if the version of client's FetchRequest or ProducerRequest does not support KafkaStorageException. </li>
</ul>
<h5><a id="upgrade_1110_new_protocols" href="#upgrade_1110_new_protocols">New Protocol Versions</a></h5>
<h5><a id="upgrade_100_new_protocols" href="#upgrade_100_new_protocols">New Protocol Versions</a></h5>
<ul>
<li> <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-112%3A+Handle+disk+failure+for+JBOD">KIP-112</a>: LeaderAndIsrRequest v1 introduces a partition-level <code>is_new</code> field. </li>
<li> <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-112%3A+Handle+disk+failure+for+JBOD">KIP-112</a>: UpdateMetadataRequest v4 introduces a partition-level <code>offline_replicas</code> field. </li>
@ -65,20 +69,6 @@
<li> <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-112%3A+Handle+disk+failure+for+JBOD">KIP-112</a>: FetchResponse v6 introduces error code for KafkaStorageException. </li>
</ul>
<h4><a id="upgrade_1_0_0" href="#upgrade_1_0_0"</h4> Upgrading from 0.8.x, 0.9.x, 0.10.0.x, 0.10.1.x, 0.10.2 or 0.11.0.0 to 1.0.0</a></h4>
<p>1.0.0 is fully compatible with 0.11.0.0. The upgrade can be done one broker at a time by simply bringing it down, updating the code, and restarting it.
To upgrade from earlier versions, please review the <a href="#upgrade_11_0_0">0.11.0.0 upgrade instructions</a>.
</p>
<h5><a id="upgrade_100_notable" href="#upgrade_100_notable">Notable changes in 1.0.0</a></h5>
<ul>
<li>Topic deletion is now enabled by default, since the functionality is now stable. Users who wish to
to retain the previous behavior should set the broker config <code>delete.topic.enable</code> to <code>false</code>. Keep in mind that topic deletion removes data and the operation is not reversible (i.e. there is no "undelete" operation)</li>
<li>For topics that support timestamp search if no offset can be found for a partition, that partition is now included in the search result with a null offset value. Previously, the partition was not included in the map.
This change was made to make the search behavior consistent with the case of topics not supporting timestamp search.
</ul>
<h4><a id="upgrade_11_0_0" href="#upgrade_11_0_0">Upgrading from 0.8.x, 0.9.x, 0.10.0.x, 0.10.1.x or 0.10.2.x to 0.11.0.0</a></h4>
<p>Kafka 0.11.0.0 introduces a new message format version as well as wire protocol changes. By following the recommended rolling upgrade plan below,
you guarantee no downtime during the upgrade. However, please review the <a href="#upgrade_1100_notable">notable changes in 0.11.0.0</a> before upgrading.

View File

@ -16,7 +16,7 @@
group=org.apache.kafka
# NOTE: When you change this version number, you should also make sure to update
# the version numbers in tests/kafkatest/__init__.py and kafka-merge-pr.py.
version=0.11.1.0-SNAPSHOT
version=1.0.0-SNAPSHOT
scalaVersion=2.11.11
task=build
org.gradle.jvmargs=-XX:MaxPermSize=512m -Xmx1024m -Xss2m

View File

@ -72,7 +72,7 @@ RELEASE_BRANCH_PREFIX = "0."
DEV_BRANCH_NAME = "trunk"
DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "0.11.1.0")
DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "1.0.0")
def get_json(url):
try:

View File

@ -21,5 +21,5 @@
#
# Instead, in development branches, the version should have a suffix of the form ".devN"
#
# For example, when Kafka is at version 0.9.0.0-SNAPSHOT, this should be something like "0.9.0.0.dev0"
__version__ = '0.11.1.0.dev0'
# For example, when Kafka is at version 1.0.0-SNAPSHOT, this should be something like "1.0.0.dev0"
__version__ = '1.0.0.dev0'