KAFKA-3330; Truncate log cleaner offset checkpoint if the log is truncated

becketqin Can you take a look?

Author: Dong Lin <lindong28@gmail.com>

Reviewers: Ismael Juma <ismael@juma.me.uk>, Jun Rao <junrao@gmail.com>

Closes #1009 from lindong28/KAFKA-3330
This commit is contained in:
Dong Lin 2016-03-17 15:21:20 -07:00 committed by Jun Rao
parent 61281f5c53
commit 579d473ce9
3 changed files with 25 additions and 2 deletions

View File

@ -133,6 +133,13 @@ class LogCleaner(val config: CleanerConfig,
cleanerManager.updateCheckpoints(dataDir, update=None) cleanerManager.updateCheckpoints(dataDir, update=None)
} }
/**
* Truncate cleaner offset checkpoint for the given partition if its checkpointed offset is larger than the given offset
*/
def maybeTruncateCheckpoint(dataDir: File, topicAndPartition: TopicAndPartition, offset: Long) {
cleanerManager.maybeTruncateCheckpoint(dataDir, topicAndPartition, offset)
}
/** /**
* Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition. * Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition.
* This call blocks until the cleaning of the partition is aborted and paused. * This call blocks until the cleaning of the partition is aborted and paused.

View File

@ -210,6 +210,18 @@ private[log] class LogCleanerManager(val logDirs: Array[File], val logs: Pool[To
} }
} }
def maybeTruncateCheckpoint(dataDir: File, topicAndPartition: TopicAndPartition, offset: Long) {
inLock(lock) {
if (logs.get(topicAndPartition).config.compact) {
val checkpoint = checkpoints(dataDir)
val existing = checkpoint.read()
if (existing.getOrElse(topicAndPartition, 0L) > offset)
checkpoint.write(existing + (topicAndPartition -> offset))
}
}
}
/** /**
* Save out the endOffset and remove the given log from the in-progress set, if not aborted. * Save out the endOffset and remove the given log from the in-progress set, if not aborted.
*/ */

View File

@ -286,8 +286,10 @@ class LogManager(val logDirs: Array[File],
if (needToStopCleaner && cleaner != null) if (needToStopCleaner && cleaner != null)
cleaner.abortAndPauseCleaning(topicAndPartition) cleaner.abortAndPauseCleaning(topicAndPartition)
log.truncateTo(truncateOffset) log.truncateTo(truncateOffset)
if (needToStopCleaner && cleaner != null) if (needToStopCleaner && cleaner != null) {
cleaner.maybeTruncateCheckpoint(log.dir.getParentFile, topicAndPartition, log.activeSegment.baseOffset)
cleaner.resumeCleaning(topicAndPartition) cleaner.resumeCleaning(topicAndPartition)
}
} }
} }
checkpointRecoveryPointOffsets() checkpointRecoveryPointOffsets()
@ -305,8 +307,10 @@ class LogManager(val logDirs: Array[File],
if (cleaner != null) if (cleaner != null)
cleaner.abortAndPauseCleaning(topicAndPartition) cleaner.abortAndPauseCleaning(topicAndPartition)
log.truncateFullyAndStartAt(newOffset) log.truncateFullyAndStartAt(newOffset)
if (cleaner != null) if (cleaner != null) {
cleaner.maybeTruncateCheckpoint(log.dir.getParentFile, topicAndPartition, log.activeSegment.baseOffset)
cleaner.resumeCleaning(topicAndPartition) cleaner.resumeCleaning(topicAndPartition)
}
} }
checkpointRecoveryPointOffsets() checkpointRecoveryPointOffsets()
} }