mirror of https://github.com/apache/kafka.git
KAFKA-18050 Upgrade the checkstyle version to 10.20.2 (#17999)
Reviewers: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
4362ab7090
commit
2b43c49f51
|
@ -39,7 +39,9 @@
|
|||
<module name="EqualsHashCode"/>
|
||||
<module name="SimplifyBooleanExpression"/>
|
||||
<module name="OneStatementPerLine"/>
|
||||
<module name="UnnecessaryParentheses" />
|
||||
<module name="UnnecessaryParentheses">
|
||||
<property name="tokens" value="IDENT, NUM_DOUBLE, LAMBDA, TEXT_BLOCK_LITERAL_BEGIN, UNARY_MINUS, UNARY_PLUS, INC, DEC, POST_INC, POST_DEC" />
|
||||
</module>
|
||||
<module name="SimplifyBooleanReturn"/>
|
||||
|
||||
<!-- style -->
|
||||
|
|
|
@ -44,7 +44,7 @@ public final class OffsetsForLeaderEpochUtils {
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(OffsetsForLeaderEpochUtils.class);
|
||||
|
||||
private OffsetsForLeaderEpochUtils(){}
|
||||
private OffsetsForLeaderEpochUtils() {}
|
||||
|
||||
static AbstractRequest.Builder<OffsetsForLeaderEpochRequest> prepareRequest(
|
||||
Map<TopicPartition, SubscriptionState.FetchPosition> requestData) {
|
||||
|
|
|
@ -177,7 +177,7 @@ public class ListConsumerGroupOffsetsHandlerTest {
|
|||
Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMapTwo =
|
||||
Collections.singletonMap(t2p2, new OffsetAndMetadata(10L));
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> expectedResult =
|
||||
new HashMap<String, Map<TopicPartition, OffsetAndMetadata>>() {{
|
||||
new HashMap<>() {{
|
||||
put(groupZero, offsetAndMetadataMapZero);
|
||||
put(groupOne, offsetAndMetadataMapOne);
|
||||
put(groupTwo, offsetAndMetadataMapTwo);
|
||||
|
@ -304,7 +304,7 @@ public class ListConsumerGroupOffsetsHandlerTest {
|
|||
responseDataTwo.put(t2p2, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE));
|
||||
|
||||
Map<String, Map<TopicPartition, PartitionData>> responseData =
|
||||
new HashMap<String, Map<TopicPartition, PartitionData>>() {{
|
||||
new HashMap<>() {{
|
||||
put(groupZero, responseDataZero);
|
||||
put(groupOne, responseDataOne);
|
||||
put(groupTwo, responseDataTwo);
|
||||
|
|
|
@ -1196,7 +1196,7 @@ public class AbstractHerderTest {
|
|||
keys.putAll(configDef.configKeys());
|
||||
}
|
||||
|
||||
protected void addValue(List<ConfigValue> values, String name, String value, String...errors) {
|
||||
protected void addValue(List<ConfigValue> values, String name, String value, String... errors) {
|
||||
values.add(new ConfigValue(name, value, new ArrayList<>(), Arrays.asList(errors)));
|
||||
}
|
||||
|
||||
|
@ -1211,7 +1211,7 @@ public class AbstractHerderTest {
|
|||
assertNull(info.configKey());
|
||||
}
|
||||
|
||||
protected void assertInfoValue(ConfigInfos infos, String name, String value, String...errors) {
|
||||
protected void assertInfoValue(ConfigInfos infos, String name, String value, String... errors) {
|
||||
ConfigValueInfo info = findInfo(infos, name).configValue();
|
||||
assertEquals(name, info.name());
|
||||
assertEquals(value, info.value());
|
||||
|
|
|
@ -36,8 +36,7 @@ public class HasHeaderKey<R extends ConnectRecord<R>> implements Predicate<R>, V
|
|||
public static final String OVERVIEW_DOC = "A predicate which is true for records with at least one header with the configured name.";
|
||||
public static final ConfigDef CONFIG_DEF = new ConfigDef()
|
||||
.define(NAME_CONFIG, ConfigDef.Type.STRING, ConfigDef.NO_DEFAULT_VALUE,
|
||||
new ConfigDef.NonEmptyString(), ConfigDef.Importance.MEDIUM,
|
||||
"The header name.");
|
||||
new ConfigDef.NonEmptyString(), ConfigDef.Importance.MEDIUM, "The header name.");
|
||||
private String name;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -420,7 +420,7 @@ public class ConfigCommandTest {
|
|||
assertEquals("[[1, 2], [3, 4]]", addedProps.getProperty("nested"));
|
||||
}
|
||||
|
||||
public void testExpectedEntityTypeNames(List<String> expectedTypes, List<String> expectedNames, List<String> connectOpts, String...args) {
|
||||
public void testExpectedEntityTypeNames(List<String> expectedTypes, List<String> expectedNames, List<String> connectOpts, String... args) {
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList(connectOpts.get(0), connectOpts.get(1), "--describe"), Arrays.asList(args)));
|
||||
createOpts.checkArgs();
|
||||
assertEquals(createOpts.entityTypes().toSeq(), seq(expectedTypes));
|
||||
|
@ -1434,7 +1434,7 @@ public class ConfigCommandTest {
|
|||
}
|
||||
|
||||
@SafeVarargs
|
||||
public static <K, V> Map<K, V> concat(Map<K, V>...maps) {
|
||||
public static <K, V> Map<K, V> concat(Map<K, V>... maps) {
|
||||
Map<K, V> res = new HashMap<>();
|
||||
Stream.of(maps)
|
||||
.map(Map::entrySet)
|
||||
|
|
|
@ -62,10 +62,10 @@ public class UserScramCredentialsCommandTest {
|
|||
}
|
||||
}
|
||||
|
||||
private ConfigCommandResult runConfigCommandViaBroker(String...args) {
|
||||
private ConfigCommandResult runConfigCommandViaBroker(String... args) {
|
||||
AtomicReference<OptionalInt> exitStatus = new AtomicReference<>(OptionalInt.empty());
|
||||
Exit.setExitProcedure((status, __) -> {
|
||||
exitStatus.set(OptionalInt.of((Integer) status));
|
||||
exitStatus.set(OptionalInt.of(status));
|
||||
throw new RuntimeException();
|
||||
});
|
||||
|
||||
|
|
|
@ -58,10 +58,7 @@ versions += [
|
|||
// but currently, tests are failing in >=3.1.2. Therefore, we are temporarily using version 3.1.1.
|
||||
// The failing tests should be fixed under KAFKA-18089, allowing us to upgrade to >=3.1.2.
|
||||
caffeine: "3.1.1",
|
||||
// when updating checkstyle, check whether the exclusion of
|
||||
// CVE-2023-2976 and CVE-2020-8908 can be dropped from
|
||||
// gradle/resources/dependencycheck-suppressions.xml
|
||||
checkstyle: project.hasProperty('checkstyleVersion') ? checkstyleVersion : "8.36.2",
|
||||
checkstyle: project.hasProperty('checkstyleVersion') ? checkstyleVersion : "10.20.2",
|
||||
commonsCli: "1.4",
|
||||
commonsIo: "2.14.0", // ZooKeeper dependency. Do not use, this is going away.
|
||||
commonsValidator: "1.9.0",
|
||||
|
|
|
@ -23,17 +23,6 @@
|
|||
]]></notes>
|
||||
<cve>CVE-2023-35116</cve>
|
||||
</suppress>
|
||||
<suppress>
|
||||
<notes><![CDATA[
|
||||
This older version of Guava is only included in checkstyle.
|
||||
CVE-2023-2976 and CVE-2020-8908 are irrelevant for checkstyle,
|
||||
as it is not executed with elevated privileges.
|
||||
This suppression will no longer be needed when checkstyle
|
||||
is updated to 10.5.0 or later.
|
||||
]]></notes>
|
||||
<cve>CVE-2020-8908</cve>
|
||||
<cve>CVE-2023-2976</cve>
|
||||
</suppress>
|
||||
<suppress>
|
||||
<notes><![CDATA[
|
||||
Kafka does not use CgiServlet
|
||||
|
|
|
@ -2155,23 +2155,17 @@ public class ReplicationControlManagerTest {
|
|||
new ReassignablePartition().setPartitionIndex(0).
|
||||
setReplicas(asList(1, 2, 3, 4, 0)))))));
|
||||
assertEquals(new AlterPartitionReassignmentsResponseData().
|
||||
setErrorMessage(null).setResponses(asList(
|
||||
setErrorMessage(null).
|
||||
setResponses(asList(
|
||||
new ReassignableTopicResponse().setName("foo").setPartitions(asList(
|
||||
new ReassignablePartitionResponse().setPartitionIndex(0).
|
||||
setErrorMessage(null),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(1).
|
||||
setErrorMessage(null),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(2).
|
||||
setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()).
|
||||
setErrorMessage("The manual partition assignment includes broker 5, " +
|
||||
"but no such broker is registered."),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(3).
|
||||
setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()).
|
||||
setErrorMessage("The manual partition assignment includes an empty " +
|
||||
"replica list."))),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(null),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(1).setErrorMessage(null),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(2).setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()).
|
||||
setErrorMessage("The manual partition assignment includes broker 5, but no such broker is registered."),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(3).setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()).
|
||||
setErrorMessage("The manual partition assignment includes an empty replica list."))),
|
||||
new ReassignableTopicResponse().setName("bar").setPartitions(singletonList(
|
||||
new ReassignablePartitionResponse().setPartitionIndex(0).
|
||||
setErrorMessage(null))))),
|
||||
new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(null))))),
|
||||
alterResult.response());
|
||||
ctx.replay(alterResult.records());
|
||||
assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).setIsr(new int[] {1, 2, 4}).
|
||||
|
|
|
@ -99,7 +99,8 @@ public class HighAvailabilityTaskAssignorIntegrationTest {
|
|||
@ValueSource(strings = {
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY})
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
|
||||
})
|
||||
public void shouldScaleOutWithWarmupTasksAndInMemoryStores(final String rackAwareStrategy, final TestInfo testInfo) throws InterruptedException {
|
||||
// NB: this test takes at least a minute to run, because it needs a probing rebalance, and the minimum
|
||||
// value is one minute
|
||||
|
@ -110,7 +111,8 @@ public class HighAvailabilityTaskAssignorIntegrationTest {
|
|||
@ValueSource(strings = {
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY})
|
||||
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
|
||||
})
|
||||
public void shouldScaleOutWithWarmupTasksAndPersistentStores(final String rackAwareStrategy, final TestInfo testInfo) throws InterruptedException {
|
||||
// NB: this test takes at least a minute to run, because it needs a probing rebalance, and the minimum
|
||||
// value is one minute
|
||||
|
|
|
@ -985,7 +985,8 @@ public class QueryableStateIntegrationTest {
|
|||
CLUSTER.bootstrapServers(),
|
||||
StringSerializer.class,
|
||||
StringSerializer.class,
|
||||
new Properties()),
|
||||
new Properties()
|
||||
),
|
||||
mockTime);
|
||||
|
||||
final KStream<String, String> s1 = builder.stream(streamOne);
|
||||
|
|
|
@ -43,6 +43,6 @@ import org.junit.platform.suite.api.Suite;
|
|||
StreamThreadStateStoreProviderTest.class,
|
||||
WrappingStoreProviderTest.class,
|
||||
QueryableStateIntegrationTest.class,
|
||||
})
|
||||
})
|
||||
public class StoreQuerySuite {
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.slf4j.Logger;
|
|||
|
||||
public final class StreamStreamJoinUtil {
|
||||
|
||||
private StreamStreamJoinUtil(){
|
||||
private StreamStreamJoinUtil() {
|
||||
}
|
||||
|
||||
public static <KIn, VIn, KOut, VOut> boolean skipRecord(
|
||||
|
|
|
@ -235,7 +235,7 @@ public class RocksDBStoreTest extends AbstractKeyValueStoreTest {
|
|||
}
|
||||
|
||||
public static class RocksDBConfigSetterWithUserProvidedStatistics implements RocksDBConfigSetter {
|
||||
public RocksDBConfigSetterWithUserProvidedStatistics(){}
|
||||
public RocksDBConfigSetterWithUserProvidedStatistics() {}
|
||||
|
||||
public void setConfig(final String storeName, final Options options, final Map<String, Object> configs) {
|
||||
lastStatistics = new Statistics();
|
||||
|
@ -306,7 +306,7 @@ public class RocksDBStoreTest extends AbstractKeyValueStoreTest {
|
|||
|
||||
|
||||
public static class RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig implements RocksDBConfigSetter {
|
||||
public RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig(){}
|
||||
public RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig() {}
|
||||
|
||||
public void setConfig(final String storeName, final Options options, final Map<String, Object> configs) {
|
||||
options.setTableFormatConfig(new BlockBasedTableConfig());
|
||||
|
@ -335,7 +335,7 @@ public class RocksDBStoreTest extends AbstractKeyValueStoreTest {
|
|||
}
|
||||
|
||||
public static class RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig implements RocksDBConfigSetter {
|
||||
public RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig(){}
|
||||
public RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig() {}
|
||||
|
||||
public void setConfig(final String storeName, final Options options, final Map<String, Object> configs) {
|
||||
options.setTableFormatConfig(new PlainTableConfig());
|
||||
|
|
|
@ -133,6 +133,7 @@ public class TestUtils {
|
|||
|
||||
private WrapperRecorder recorder;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void configure(final Map<String, ?> configs) {
|
||||
if (configs.containsKey(PROCESSOR_WRAPPER_COUNTER_CONFIG)) {
|
||||
|
|
|
@ -148,7 +148,7 @@ public class ToolsUtils {
|
|||
* @param <T> Element type.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> Set<T> minus(Set<T> set, T...toRemove) {
|
||||
public static <T> Set<T> minus(Set<T> set, T... toRemove) {
|
||||
Set<T> res = new HashSet<>(set);
|
||||
for (T t : toRemove)
|
||||
res.remove(t);
|
||||
|
|
|
@ -34,7 +34,7 @@ import com.fasterxml.jackson.databind.node.NullNode;
|
|||
@JsonSubTypes.Type(value = TaskRunning.class, name = TaskStateType.Constants.RUNNING_VALUE),
|
||||
@JsonSubTypes.Type(value = TaskStopping.class, name = TaskStateType.Constants.STOPPING_VALUE),
|
||||
@JsonSubTypes.Type(value = TaskDone.class, name = TaskStateType.Constants.DONE_VALUE)
|
||||
})
|
||||
})
|
||||
public abstract class TaskState extends Message {
|
||||
private final TaskSpec spec;
|
||||
|
||||
|
|
Loading…
Reference in New Issue