mirror of https://github.com/apache/kafka.git
MINOR: Delete temporary directories after using them in RaftManagerTest Updated (#20550)
Follow-up to [#11193](https://github.com/apache/kafka/pull/11193). This change adds cleanup of the temporary log and metadata directories created by RaftManagerTest so they are removed after each test run. Without this cleanup, the directories remain until the entire test suite completes, leaving extra files in the system temporary directory. Testing: - Ran `./gradlew core:test --tests kafka.raft.RaftManagerTest` and confirmed all tests pass. Reviewers: TengYao Chi <kitingiao@gmail.com>, Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
5ed4a48829
commit
b72db2b2c7
|
@ -30,6 +30,7 @@ import org.apache.kafka.common.Uuid
|
||||||
import org.apache.kafka.common.metrics.Metrics
|
import org.apache.kafka.common.metrics.Metrics
|
||||||
import org.apache.kafka.common.network.ListenerName
|
import org.apache.kafka.common.network.ListenerName
|
||||||
import org.apache.kafka.common.utils.Time
|
import org.apache.kafka.common.utils.Time
|
||||||
|
import org.apache.kafka.common.utils.Utils
|
||||||
import org.apache.kafka.network.SocketServerConfigs
|
import org.apache.kafka.network.SocketServerConfigs
|
||||||
import org.apache.kafka.raft.{Endpoints, MetadataLogConfig, QuorumConfig}
|
import org.apache.kafka.raft.{Endpoints, MetadataLogConfig, QuorumConfig}
|
||||||
import org.apache.kafka.server.ProcessRole
|
import org.apache.kafka.server.ProcessRole
|
||||||
|
@ -126,17 +127,21 @@ class RaftManagerTest {
|
||||||
|
|
||||||
val logDir = TestUtils.tempDir()
|
val logDir = TestUtils.tempDir()
|
||||||
val nodeId = 1
|
val nodeId = 1
|
||||||
val raftManager = createRaftManager(
|
try {
|
||||||
new TopicPartition("__raft_id_test", 0),
|
val raftManager = createRaftManager(
|
||||||
createConfig(
|
new TopicPartition("__raft_id_test", 0),
|
||||||
processRolesSet,
|
createConfig(
|
||||||
nodeId,
|
processRolesSet,
|
||||||
Seq(logDir.toPath),
|
nodeId,
|
||||||
None
|
Seq(logDir.toPath),
|
||||||
|
None
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
assertEquals(nodeId, raftManager.client.nodeId.getAsInt)
|
||||||
assertEquals(nodeId, raftManager.client.nodeId.getAsInt)
|
raftManager.shutdown()
|
||||||
raftManager.shutdown()
|
} finally {
|
||||||
|
Utils.delete(logDir)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ParameterizedTest
|
@ParameterizedTest
|
||||||
|
@ -155,22 +160,27 @@ class RaftManagerTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
val nodeId = 1
|
val nodeId = 1
|
||||||
val raftManager = createRaftManager(
|
try {
|
||||||
new TopicPartition("__raft_id_test", 0),
|
val raftManager = createRaftManager(
|
||||||
createConfig(
|
new TopicPartition("__raft_id_test", 0),
|
||||||
Set(ProcessRole.ControllerRole),
|
createConfig(
|
||||||
nodeId,
|
Set(ProcessRole.ControllerRole),
|
||||||
logDir,
|
nodeId,
|
||||||
metadataDir
|
logDir,
|
||||||
|
metadataDir
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME)
|
val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME)
|
||||||
assertTrue(fileLocked(lockPath))
|
assertTrue(fileLocked(lockPath))
|
||||||
|
|
||||||
raftManager.shutdown()
|
raftManager.shutdown()
|
||||||
|
|
||||||
assertFalse(fileLocked(lockPath))
|
assertFalse(fileLocked(lockPath))
|
||||||
|
} finally {
|
||||||
|
logDir.foreach(p => Utils.delete(p.toFile))
|
||||||
|
metadataDir.foreach(p => Utils.delete(p.toFile))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -179,22 +189,27 @@ class RaftManagerTest {
|
||||||
val metadataDir = Some(TestUtils.tempDir().toPath)
|
val metadataDir = Some(TestUtils.tempDir().toPath)
|
||||||
|
|
||||||
val nodeId = 1
|
val nodeId = 1
|
||||||
val raftManager = createRaftManager(
|
try {
|
||||||
new TopicPartition("__raft_id_test", 0),
|
val raftManager = createRaftManager(
|
||||||
createConfig(
|
new TopicPartition("__raft_id_test", 0),
|
||||||
Set(ProcessRole.BrokerRole),
|
createConfig(
|
||||||
nodeId,
|
Set(ProcessRole.BrokerRole),
|
||||||
logDir,
|
nodeId,
|
||||||
metadataDir
|
logDir,
|
||||||
|
metadataDir
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME)
|
val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME)
|
||||||
assertTrue(fileLocked(lockPath))
|
assertTrue(fileLocked(lockPath))
|
||||||
|
|
||||||
raftManager.shutdown()
|
raftManager.shutdown()
|
||||||
|
|
||||||
assertFalse(fileLocked(lockPath))
|
assertFalse(fileLocked(lockPath))
|
||||||
|
} finally {
|
||||||
|
logDir.foreach(p => Utils.delete(p.toFile))
|
||||||
|
metadataDir.foreach(p => Utils.delete(p.toFile))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def createMetadataLog(config: KafkaConfig): Unit = {
|
def createMetadataLog(config: KafkaConfig): Unit = {
|
||||||
|
|
Loading…
Reference in New Issue