serializer) {
this.serializer = serializer;
}
/**
- * WindowedStreamsPartitioner determines the partition number for a message with the given windowed key and value
+ * WindowedStreamPartitioner determines the partition number for a message with the given windowed key and value
* and the current number of partitions. The partition number id determined by the original key of the windowed key
* using the same logic as DefaultPartitioner so that the topic is partitioned by the original key.
*
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/StreamsPartitioner.java b/streams/src/main/java/org/apache/kafka/streams/processor/StreamPartitioner.java
similarity index 86%
rename from streams/src/main/java/org/apache/kafka/streams/processor/StreamsPartitioner.java
rename to streams/src/main/java/org/apache/kafka/streams/processor/StreamPartitioner.java
index f8d199d91f1..f14d9d943f5 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/StreamsPartitioner.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/StreamPartitioner.java
@@ -33,19 +33,19 @@ package org.apache.kafka.streams.processor;
* An upstream topology producing messages to that topic can use a custom stream partitioner to precisely and consistently
* determine to which partition each message should be written.
*
- * To do this, create a StreamsPartitioner implementation, and when you build your topology specify that custom partitioner
- * when {@link TopologyBuilder#addSink(String, String, org.apache.kafka.common.serialization.Serializer, org.apache.kafka.common.serialization.Serializer, StreamsPartitioner, String...) adding a sink}
+ * To do this, create a StreamPartitioner implementation, and when you build your topology specify that custom partitioner
+ * when {@link TopologyBuilder#addSink(String, String, org.apache.kafka.common.serialization.Serializer, org.apache.kafka.common.serialization.Serializer, StreamPartitioner, String...) adding a sink}
* for that topic.
*
- * All StreamsPartitioner implementations should be stateless and a pure function so they can be shared across topic and sink nodes.
+ * All StreamPartitioner implementations should be stateless and a pure function so they can be shared across topic and sink nodes.
*
* @param the type of keys
* @param the type of values
* @see TopologyBuilder#addSink(String, String, org.apache.kafka.common.serialization.Serializer,
- * org.apache.kafka.common.serialization.Serializer, StreamsPartitioner, String...)
- * @see TopologyBuilder#addSink(String, String, StreamsPartitioner, String...)
+ * org.apache.kafka.common.serialization.Serializer, StreamPartitioner, String...)
+ * @see TopologyBuilder#addSink(String, String, StreamPartitioner, String...)
*/
-public interface StreamsPartitioner {
+public interface StreamPartitioner {
/**
* Determine the partition number for a message with the given key and value and the current number of partitions.
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/TopologyBuilder.java b/streams/src/main/java/org/apache/kafka/streams/processor/TopologyBuilder.java
index f4e68213765..a6b54b7998b 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/TopologyBuilder.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/TopologyBuilder.java
@@ -135,9 +135,9 @@ public class TopologyBuilder {
public final String topic;
private Serializer keySerializer;
private Serializer valSerializer;
- private final StreamsPartitioner partitioner;
+ private final StreamPartitioner partitioner;
- private SinkNodeFactory(String name, String[] parents, String topic, Serializer keySerializer, Serializer valSerializer, StreamsPartitioner partitioner) {
+ private SinkNodeFactory(String name, String[] parents, String topic, Serializer keySerializer, Serializer valSerializer, StreamPartitioner partitioner) {
super(name);
this.parents = parents.clone();
this.topic = topic;
@@ -245,9 +245,9 @@ public class TopologyBuilder {
* @param parentNames the name of one or more source or processor nodes whose output message this sink should consume
* and write to its topic
* @return this builder instance so methods can be chained together; never null
- * @see #addSink(String, String, StreamsPartitioner, String...)
+ * @see #addSink(String, String, StreamPartitioner, String...)
* @see #addSink(String, String, Serializer, Serializer, String...)
- * @see #addSink(String, String, Serializer, Serializer, StreamsPartitioner, String...)
+ * @see #addSink(String, String, Serializer, Serializer, StreamPartitioner, String...)
*/
public final TopologyBuilder addSink(String name, String topic, String... parentNames) {
return addSink(name, topic, (Serializer) null, (Serializer) null, parentNames);
@@ -260,7 +260,7 @@ public class TopologyBuilder {
* {@link org.apache.kafka.streams.StreamsConfig#VALUE_SERIALIZER_CLASS_CONFIG default value serializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}.
*
- * The sink will also use the specified {@link StreamsPartitioner} to determine how messages are distributed among
+ * The sink will also use the specified {@link StreamPartitioner} to determine how messages are distributed among
* the named Kafka topic's partitions. Such control is often useful with topologies that use
* {@link #addStateStore(StateStoreSupplier, String...) state stores}
* in its processors. In most other cases, however, a partitioner need not be specified and Kafka will automatically distribute
@@ -274,9 +274,9 @@ public class TopologyBuilder {
* @return this builder instance so methods can be chained together; never null
* @see #addSink(String, String, String...)
* @see #addSink(String, String, Serializer, Serializer, String...)
- * @see #addSink(String, String, Serializer, Serializer, StreamsPartitioner, String...)
+ * @see #addSink(String, String, Serializer, Serializer, StreamPartitioner, String...)
*/
- public final TopologyBuilder addSink(String name, String topic, StreamsPartitioner partitioner, String... parentNames) {
+ public final TopologyBuilder addSink(String name, String topic, StreamPartitioner partitioner, String... parentNames) {
return addSink(name, topic, (Serializer) null, (Serializer) null, partitioner, parentNames);
}
@@ -284,7 +284,7 @@ public class TopologyBuilder {
* Add a new sink that forwards messages from upstream parent processor and/or source nodes to the named Kafka topic.
* The sink will use the specified key and value serializers.
*
- * The sink will also use the specified {@link StreamsPartitioner} to determine how messages are distributed among
+ * The sink will also use the specified {@link StreamPartitioner} to determine how messages are distributed among
* the named Kafka topic's partitions. Such control is often useful with topologies that use
* {@link #addStateStore(StateStoreSupplier, String...) state stores}
* in its processors. In most other cases, however, a partitioner need not be specified and Kafka will automatically distribute
@@ -302,11 +302,11 @@ public class TopologyBuilder {
* and write to its topic
* @return this builder instance so methods can be chained together; never null
* @see #addSink(String, String, String...)
- * @see #addSink(String, String, StreamsPartitioner, String...)
- * @see #addSink(String, String, Serializer, Serializer, StreamsPartitioner, String...)
+ * @see #addSink(String, String, StreamPartitioner, String...)
+ * @see #addSink(String, String, Serializer, Serializer, StreamPartitioner, String...)
*/
public final TopologyBuilder addSink(String name, String topic, Serializer keySerializer, Serializer valSerializer, String... parentNames) {
- return addSink(name, topic, keySerializer, valSerializer, (StreamsPartitioner) null, parentNames);
+ return addSink(name, topic, keySerializer, valSerializer, (StreamPartitioner) null, parentNames);
}
/**
@@ -326,10 +326,10 @@ public class TopologyBuilder {
* and write to its topic
* @return this builder instance so methods can be chained together; never null
* @see #addSink(String, String, String...)
- * @see #addSink(String, String, StreamsPartitioner, String...)
+ * @see #addSink(String, String, StreamPartitioner, String...)
* @see #addSink(String, String, Serializer, Serializer, String...)
*/
- public final TopologyBuilder addSink(String name, String topic, Serializer keySerializer, Serializer valSerializer, StreamsPartitioner partitioner, String... parentNames) {
+ public final TopologyBuilder addSink(String name, String topic, Serializer keySerializer, Serializer valSerializer, StreamPartitioner partitioner, String... parentNames) {
if (nodeFactories.containsKey(name))
throw new TopologyException("Processor " + name + " is already added.");
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractTask.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractTask.java
index ef4c3c74355..68680ab5d17 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractTask.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractTask.java
@@ -57,7 +57,8 @@ public abstract class AbstractTask {
// create the processor state manager
try {
- File stateFile = new File(config.getString(StreamsConfig.STATE_DIR_CONFIG), id.toString());
+ File jobStateDir = StreamThread.makeStateDir(jobId, config.getString(StreamsConfig.STATE_DIR_CONFIG));
+ File stateFile = new File(jobStateDir.getCanonicalPath(), id.toString());
// if partitions is null, this is a standby task
this.stateMgr = new ProcessorStateManager(jobId, id.partition, partitions, stateFile, restoreConsumer, isStandby);
} catch (IOException e) {
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollector.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollector.java
index 25c663d1f19..fe0472eb9dc 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollector.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollector.java
@@ -24,7 +24,7 @@ import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.Serializer;
-import org.apache.kafka.streams.processor.StreamsPartitioner;
+import org.apache.kafka.streams.processor.StreamPartitioner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -72,7 +72,7 @@ public class RecordCollector {
}
public void send(ProducerRecord record, Serializer keySerializer, Serializer valueSerializer,
- StreamsPartitioner partitioner) {
+ StreamPartitioner partitioner) {
byte[] keyBytes = keySerializer.serialize(record.topic(), record.key());
byte[] valBytes = valueSerializer.serialize(record.topic(), record.value());
Integer partition = null;
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java
index 88b3f56d4f0..7ab59ee6ca3 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java
@@ -20,18 +20,18 @@ package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.streams.processor.ProcessorContext;
-import org.apache.kafka.streams.processor.StreamsPartitioner;
+import org.apache.kafka.streams.processor.StreamPartitioner;
public class SinkNode extends ProcessorNode {
private final String topic;
private Serializer keySerializer;
private Serializer valSerializer;
- private final StreamsPartitioner partitioner;
+ private final StreamPartitioner partitioner;
private ProcessorContext context;
- public SinkNode(String name, String topic, Serializer keySerializer, Serializer valSerializer, StreamsPartitioner partitioner) {
+ public SinkNode(String name, String topic, Serializer keySerializer, Serializer valSerializer, StreamPartitioner partitioner) {
super(name);
this.topic = topic;
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
index e5d09221bcf..f118f60bfaf 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
@@ -104,6 +104,18 @@ public class StreamThread extends Thread {
private final Map>> standbyRecords;
private boolean processStandbyRecords = false;
+ static File makeStateDir(String jobId, String baseDirName) {
+ File baseDir = new File(baseDirName);
+ if (!baseDir.exists())
+ baseDir.mkdir();
+
+ File stateDir = new File(baseDir, jobId);
+ if (!stateDir.exists())
+ stateDir.mkdir();
+
+ return stateDir;
+ }
+
final ConsumerRebalanceListener rebalanceListener = new ConsumerRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection assignment) {
@@ -167,8 +179,7 @@ public class StreamThread extends Thread {
this.standbyRecords = new HashMap<>();
// read in task specific config values
- this.stateDir = new File(this.config.getString(StreamsConfig.STATE_DIR_CONFIG));
- this.stateDir.mkdir();
+ this.stateDir = makeStateDir(this.jobId, this.config.getString(StreamsConfig.STATE_DIR_CONFIG));
this.pollTimeMs = config.getLong(StreamsConfig.POLL_MS_CONFIG);
this.commitTimeMs = config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG);
this.cleanTimeMs = config.getLong(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG);
@@ -452,14 +463,15 @@ public class StreamThread extends Thread {
if (stateDirs != null) {
for (File dir : stateDirs) {
try {
- TaskId id = TaskId.parse(dir.getName());
+ String dirName = dir.getName();
+ TaskId id = TaskId.parse(dirName.substring(dirName.lastIndexOf("-") + 1));
// try to acquire the exclusive lock on the state directory
FileLock directoryLock = null;
try {
directoryLock = ProcessorStateManager.lockStateDirectory(dir);
if (directoryLock != null) {
- log.info("Deleting obsolete state directory {} after delayed {} ms.", dir.getAbsolutePath(), cleanTimeMs);
+ log.info("Deleting obsolete state directory {} for task {} after delayed {} ms.", dir.getAbsolutePath(), id, cleanTimeMs);
Utils.delete(dir);
}
} catch (IOException e) {
diff --git a/streams/src/main/java/org/apache/kafka/streams/examples/WallclockTimestampExtractor.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/WallclockTimestampExtractor.java
similarity index 95%
rename from streams/src/main/java/org/apache/kafka/streams/examples/WallclockTimestampExtractor.java
rename to streams/src/main/java/org/apache/kafka/streams/processor/internals/WallclockTimestampExtractor.java
index 26281d69d02..60b3b96dd89 100644
--- a/streams/src/main/java/org/apache/kafka/streams/examples/WallclockTimestampExtractor.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/WallclockTimestampExtractor.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.kafka.streams.examples;
+package org.apache.kafka.streams.processor.internals;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.processor.TimestampExtractor;
diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java
index 777fae5e6d7..b2af9048af5 100644
--- a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java
@@ -22,7 +22,7 @@ import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
-import org.apache.kafka.streams.examples.WallclockTimestampExtractor;
+import org.apache.kafka.streams.processor.internals.WallclockTimestampExtractor;
import org.apache.kafka.streams.processor.internals.StreamThread;
import org.junit.Before;
import org.junit.Test;
diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamsPartitionerTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java
similarity index 95%
rename from streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamsPartitionerTest.java
rename to streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java
index 18494fd933b..1b8cbb8a449 100644
--- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamsPartitionerTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java
@@ -33,7 +33,7 @@ import java.util.Random;
import static org.junit.Assert.assertEquals;
-public class WindowedStreamsPartitionerTest {
+public class WindowedStreamPartitionerTest {
private String topicName = "topic";
@@ -59,7 +59,7 @@ public class WindowedStreamsPartitionerTest {
DefaultPartitioner defaultPartitioner = new DefaultPartitioner();
WindowedSerializer windowedSerializer = new WindowedSerializer<>(keySerializer);
- WindowedStreamsPartitioner streamPartitioner = new WindowedStreamsPartitioner<>(windowedSerializer);
+ WindowedStreamPartitioner streamPartitioner = new WindowedStreamPartitioner<>(windowedSerializer);
for (int k = 0; k < 10; k++) {
Integer key = rand.nextInt();
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java
index 60bd3090571..cb6ea056beb 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java
@@ -32,7 +32,7 @@ import org.apache.kafka.streams.processor.AbstractProcessor;
import org.apache.kafka.streams.processor.Processor;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.ProcessorSupplier;
-import org.apache.kafka.streams.processor.StreamsPartitioner;
+import org.apache.kafka.streams.processor.StreamPartitioner;
import org.apache.kafka.streams.processor.TimestampExtractor;
import org.apache.kafka.streams.processor.TopologyBuilder;
import org.apache.kafka.streams.state.KeyValueIterator;
@@ -194,8 +194,8 @@ public class ProcessorTopologyTest {
assertNull(driver.readOutput(topic));
}
- protected StreamsPartitioner constantPartitioner(final Integer partition) {
- return new StreamsPartitioner() {
+ protected StreamPartitioner constantPartitioner(final Integer partition) {
+ return new StreamPartitioner() {
@Override
public Integer partition(K key, V value, int numPartitions) {
return partition;
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StandbyTaskTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StandbyTaskTest.java
index fd604b675f7..ffcf9ae15de 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StandbyTaskTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StandbyTaskTest.java
@@ -93,8 +93,8 @@ public class StandbyTaskTest {
setProperty(StreamsConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
setProperty(StreamsConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
setProperty(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, "org.apache.kafka.test.MockTimestampExtractor");
+ setProperty(StreamsConfig.JOB_ID_CONFIG, jobId);
setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:2171");
- setProperty(StreamsConfig.JOB_ID_CONFIG, "standby-task-test");
setProperty(StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG, "3");
setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath());
}
@@ -200,7 +200,7 @@ public class StandbyTaskTest {
task.close();
- File taskDir = new File(baseDir, taskId.toString());
+ File taskDir = new File(StreamThread.makeStateDir(jobId, baseDir.getCanonicalPath()), taskId.toString());
OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
Map offsets = checkpoint.read();
@@ -298,7 +298,7 @@ public class StandbyTaskTest {
task.close();
- File taskDir = new File(baseDir, taskId.toString());
+ File taskDir = new File(StreamThread.makeStateDir(jobId, baseDir.getCanonicalPath()), taskId.toString());
OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
Map offsets = checkpoint.read();
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java
index 2d531bcc770..039cb968191 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java
@@ -59,8 +59,9 @@ import java.util.UUID;
public class StreamThreadTest {
- private String clientId = "clientId";
- private UUID processId = UUID.randomUUID();
+ private final String clientId = "clientId";
+ private final String jobId = "stream-thread-test";
+ private final UUID processId = UUID.randomUUID();
private TopicPartition t1p1 = new TopicPartition("topic1", 1);
private TopicPartition t1p2 = new TopicPartition("topic1", 2);
@@ -117,8 +118,8 @@ public class StreamThreadTest {
setProperty(StreamsConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
setProperty(StreamsConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
setProperty(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, "org.apache.kafka.test.MockTimestampExtractor");
+ setProperty(StreamsConfig.JOB_ID_CONFIG, jobId);
setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:2171");
- setProperty(StreamsConfig.JOB_ID_CONFIG, "stream-thread-test");
setProperty(StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG, "3");
}
};
@@ -128,13 +129,14 @@ public class StreamThreadTest {
public boolean committed = false;
public TestStreamTask(TaskId id,
+ String jobId,
Collection partitions,
ProcessorTopology topology,
Consumer consumer,
Producer producer,
Consumer restoreConsumer,
StreamsConfig config) {
- super(id, "jobId", partitions, topology, consumer, producer, restoreConsumer, config, null);
+ super(id, jobId, partitions, topology, consumer, producer, restoreConsumer, config, null);
}
@Override
@@ -161,11 +163,11 @@ public class StreamThreadTest {
builder.addSource("source3", "topic3");
builder.addProcessor("processor", new MockProcessorSupplier(), "source2", "source3");
- StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, "test", clientId, processId, new Metrics(), new SystemTime()) {
+ StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, jobId, clientId, processId, new Metrics(), new SystemTime()) {
@Override
protected StreamTask createStreamTask(TaskId id, Collection partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
- return new TestStreamTask(id, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
+ return new TestStreamTask(id, jobId, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
}
};
@@ -264,10 +266,12 @@ public class StreamThreadTest {
StreamsConfig config = new StreamsConfig(props);
- File stateDir1 = new File(baseDir, task1.toString());
- File stateDir2 = new File(baseDir, task2.toString());
- File stateDir3 = new File(baseDir, task3.toString());
- File extraDir = new File(baseDir, "X");
+ File jobDir = new File(baseDir, jobId);
+ jobDir.mkdir();
+ File stateDir1 = new File(jobDir, task1.toString());
+ File stateDir2 = new File(jobDir, task2.toString());
+ File stateDir3 = new File(jobDir, task3.toString());
+ File extraDir = new File(jobDir, "X");
stateDir1.mkdir();
stateDir2.mkdir();
stateDir3.mkdir();
@@ -281,7 +285,7 @@ public class StreamThreadTest {
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source1", "topic1");
- StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, "test", clientId, processId, new Metrics(), mockTime) {
+ StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, jobId, clientId, processId, new Metrics(), mockTime) {
@Override
public void maybeClean() {
super.maybeClean();
@@ -290,7 +294,7 @@ public class StreamThreadTest {
@Override
protected StreamTask createStreamTask(TaskId id, Collection partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
- return new TestStreamTask(id, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
+ return new TestStreamTask(id, jobId, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
}
};
@@ -403,7 +407,7 @@ public class StreamThreadTest {
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source1", "topic1");
- StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, "test", clientId, processId, new Metrics(), mockTime) {
+ StreamThread thread = new StreamThread(builder, config, producer, consumer, mockRestoreConsumer, jobId, clientId, processId, new Metrics(), mockTime) {
@Override
public void maybeCommit() {
super.maybeCommit();
@@ -412,7 +416,7 @@ public class StreamThreadTest {
@Override
protected StreamTask createStreamTask(TaskId id, Collection partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
- return new TestStreamTask(id, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
+ return new TestStreamTask(id, jobId, partitionsForTask, topology, consumer, producer, mockRestoreConsumer, config);
}
};
diff --git a/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java b/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java
index 36e487b617e..1e9c3bae839 100644
--- a/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java
+++ b/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java
@@ -29,7 +29,7 @@ import org.apache.kafka.streams.StreamsMetrics;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.StateRestoreCallback;
import org.apache.kafka.streams.processor.StateStore;
-import org.apache.kafka.streams.processor.StreamsPartitioner;
+import org.apache.kafka.streams.processor.StreamPartitioner;
import org.apache.kafka.streams.processor.TaskId;
import org.apache.kafka.streams.processor.internals.RecordCollector;
import org.apache.kafka.test.MockProcessorContext;
@@ -249,7 +249,7 @@ public class KeyValueStoreTestDriver {
}
@Override
public void send(ProducerRecord record, Serializer keySerializer, Serializer valueSerializer,
- StreamsPartitioner partitioner) {
+ StreamPartitioner partitioner) {
recordFlushed(record.key(), record.value());
}
};
diff --git a/streams/src/test/java/org/apache/kafka/test/KStreamTestDriver.java b/streams/src/test/java/org/apache/kafka/test/KStreamTestDriver.java
index 8f8e00f7d4a..2dc567ea349 100644
--- a/streams/src/test/java/org/apache/kafka/test/KStreamTestDriver.java
+++ b/streams/src/test/java/org/apache/kafka/test/KStreamTestDriver.java
@@ -24,7 +24,7 @@ import org.apache.kafka.streams.kstream.KStreamBuilder;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.StateStore;
import org.apache.kafka.streams.processor.StateStoreSupplier;
-import org.apache.kafka.streams.processor.StreamsPartitioner;
+import org.apache.kafka.streams.processor.StreamPartitioner;
import org.apache.kafka.streams.processor.internals.ProcessorNode;
import org.apache.kafka.streams.processor.internals.ProcessorTopology;
import org.apache.kafka.streams.processor.internals.RecordCollector;
@@ -130,7 +130,7 @@ public class KStreamTestDriver {
@Override
public void send(ProducerRecord record, Serializer keySerializer, Serializer valueSerializer,
- StreamsPartitioner partitioner) {
+ StreamPartitioner partitioner) {
// The serialization is skipped.
process(record.topic(), record.key(), record.value());
}