MINOR: Update Scalafmt to latest version (#12475)

Reviewers: Divij Vaidya <diviv@amazon.com>, Chris Egerton <fearthecellos@gmail.com>
This commit is contained in:
Matthew de Detrich 2022-09-12 16:05:15 +02:00 committed by GitHub
parent b449b8032e
commit e138772ba5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 41 additions and 37 deletions

View File

@ -30,7 +30,7 @@ buildscript {
}
plugins {
id 'com.diffplug.spotless' version '5.12.5'
id 'com.diffplug.spotless' version '6.10.0'
id 'com.github.ben-manes.versions' version '0.42.0'
id 'idea'
id 'java-library'
@ -47,7 +47,7 @@ plugins {
spotless {
scala {
target 'streams/**/*.scala'
scalafmt("$versions.scalafmt").configFile('checkstyle/.scalafmt.conf')
scalafmt("$versions.scalafmt").configFile('checkstyle/.scalafmt.conf').scalaMajorVersion(versions.baseScala)
licenseHeaderFile 'checkstyle/java.header', 'package'
}
}

View File

@ -12,7 +12,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docstrings = JavaDoc
version = 3.5.9
runner.dialect = scala213
docstrings.style = Asterisk
docstrings.wrap = false
maxColumn = 120
continuationIndent.defnSite = 2
assumeStandardLibraryStripMargin = true

View File

@ -111,7 +111,10 @@ versions += [
reload4j: "1.2.19",
rocksDB: "6.29.4.1",
scalaCollectionCompat: "2.6.0",
scalafmt: "2.7.5",
// When updating the scalafmt version please also update the version field in checkstyle/.scalafmt.conf. scalafmt now
// has the version field as mandatory in its configuration, see
// https://github.com/scalameta/scalafmt/releases/tag/v3.1.0.
scalafmt: "3.5.9",
scalaJava8Compat : "1.0.2",
scoverage: "1.4.11",
slf4j: "1.7.36",

View File

@ -24,9 +24,8 @@ import java.lang.{Iterable => JIterable}
import org.apache.kafka.streams.processor.ProcessorContext
/**
* Implicit classes that offer conversions of Scala function literals to
* SAM (Single Abstract Method) objects in Java. These make the Scala APIs much
* more expressive, with less boilerplate and more succinct.
* Implicit classes that offer conversions of Scala function literals to SAM (Single Abstract Method) objects in Java.
* These make the Scala APIs much more expressive, with less boilerplate and more succinct.
*/
private[scala] object FunctionsCompatConversions {

View File

@ -19,22 +19,21 @@ package org.apache.kafka.streams.scala
import org.apache.kafka.common.serialization.Serde
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.{
KStream => KStreamJ,
KGroupedStream => KGroupedStreamJ,
TimeWindowedKStream => TimeWindowedKStreamJ,
SessionWindowedKStream => SessionWindowedKStreamJ,
CogroupedKStream => CogroupedKStreamJ,
TimeWindowedCogroupedKStream => TimeWindowedCogroupedKStreamJ,
SessionWindowedCogroupedKStream => SessionWindowedCogroupedKStreamJ,
KGroupedStream => KGroupedStreamJ,
KGroupedTable => KGroupedTableJ,
KStream => KStreamJ,
KTable => KTableJ,
KGroupedTable => KGroupedTableJ
SessionWindowedCogroupedKStream => SessionWindowedCogroupedKStreamJ,
SessionWindowedKStream => SessionWindowedKStreamJ,
TimeWindowedCogroupedKStream => TimeWindowedCogroupedKStreamJ,
TimeWindowedKStream => TimeWindowedKStreamJ
}
import org.apache.kafka.streams.processor.StateStore
import org.apache.kafka.streams.scala.kstream._
/**
* Implicit conversions between the Scala wrapper objects and the underlying Java
* objects.
* Implicit conversions between the Scala wrapper objects and the underlying Java objects.
*/
object ImplicitConversions {

View File

@ -18,11 +18,11 @@ package org.apache.kafka.streams.scala
package kstream
import org.apache.kafka.streams.kstream.{
CogroupedKStream => CogroupedKStreamJ,
SessionWindows,
SlidingWindows,
Window,
Windows,
CogroupedKStream => CogroupedKStreamJ
Windows
}
import org.apache.kafka.streams.scala.FunctionsCompatConversions.{AggregatorFromFunction, InitializerFromFunction}

View File

@ -20,12 +20,12 @@ package kstream
import org.apache.kafka.streams.kstream.internals.KTableImpl
import org.apache.kafka.streams.scala.serialization.Serdes
import org.apache.kafka.streams.kstream.{
KGroupedStream => KGroupedStreamJ,
KTable => KTableJ,
SessionWindows,
SlidingWindows,
Window,
Windows,
KGroupedStream => KGroupedStreamJ,
KTable => KTableJ
Windows
}
import org.apache.kafka.streams.scala.FunctionsCompatConversions.{
AggregatorFromFunction,

View File

@ -21,11 +21,11 @@ import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.{
GlobalKTable,
JoinWindows,
KStream => KStreamJ,
Printed,
TransformerSupplier,
ValueTransformerSupplier,
ValueTransformerWithKeySupplier,
KStream => KStreamJ
ValueTransformerWithKeySupplier
}
import org.apache.kafka.streams.processor.TopicNameExtractor
import org.apache.kafka.streams.processor.api.{FixedKeyProcessorSupplier, ProcessorSupplier}
@ -334,7 +334,7 @@ class KStream[K, V](val inner: KStreamJ[K, V]) {
* @see `org.apache.kafka.streams.kstream.KStream#branch`
* @deprecated since 2.8. Use `split` instead.
*/
//noinspection ScalaUnnecessaryParentheses
// noinspection ScalaUnnecessaryParentheses
@deprecated("use `split()` instead", "2.8")
def branch(predicates: ((K, V) => Boolean)*): Array[KStream[K, V]] =
inner.branch(predicates.map(_.asPredicate): _*).map(kstream => new KStream(kstream))

View File

@ -18,7 +18,7 @@ package org.apache.kafka.streams.scala
package kstream
import org.apache.kafka.common.utils.Bytes
import org.apache.kafka.streams.kstream.{TableJoined, ValueJoiner, ValueTransformerWithKeySupplier, KTable => KTableJ}
import org.apache.kafka.streams.kstream.{KTable => KTableJ, TableJoined, ValueJoiner, ValueTransformerWithKeySupplier}
import org.apache.kafka.streams.scala.FunctionsCompatConversions.{
FunctionFromFunction,
KeyValueMapperFromFunction,

View File

@ -20,7 +20,7 @@ import java.nio.ByteBuffer
import java.util
import java.util.UUID
import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer, Serdes => JSerdes}
import org.apache.kafka.common.serialization.{Deserializer, Serde, Serdes => JSerdes, Serializer}
import org.apache.kafka.streams.kstream.WindowedSerdes
object Serdes extends LowPrioritySerdes {

View File

@ -25,16 +25,16 @@ import org.apache.kafka.streams.kstream.{
Aggregator,
Initializer,
JoinWindows,
KeyValueMapper,
Reducer,
Transformer,
ValueJoiner,
ValueMapper,
KGroupedStream => KGroupedStreamJ,
KStream => KStreamJ,
KTable => KTableJ,
KeyValueMapper,
Materialized => MaterializedJ,
StreamJoined => StreamJoinedJ
Reducer,
StreamJoined => StreamJoinedJ,
Transformer,
ValueJoiner,
ValueMapper
}
import org.apache.kafka.streams.processor.{api, ProcessorContext}
import org.apache.kafka.streams.processor.api.{Processor, ProcessorSupplier}
@ -42,7 +42,7 @@ import org.apache.kafka.streams.scala.ImplicitConversions._
import org.apache.kafka.streams.scala.serialization.{Serdes => NewSerdes}
import org.apache.kafka.streams.scala.serialization.Serdes._
import org.apache.kafka.streams.scala.kstream._
import org.apache.kafka.streams.{KeyValue, StreamsConfig, TopologyDescription, StreamsBuilder => StreamsBuilderJ}
import org.apache.kafka.streams.{KeyValue, StreamsBuilder => StreamsBuilderJ, StreamsConfig, TopologyDescription}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api._

View File

@ -191,7 +191,7 @@ class KStreamTest extends TestDriver {
testDriver.close()
}
//noinspection ScalaDeprecation
// noinspection ScalaDeprecation
@Test
def testJoinCorrectlyRecords(): Unit = {
val builder = new StreamsBuilder()

View File

@ -19,11 +19,11 @@ package org.apache.kafka.streams.scala.kstream
import org.apache.kafka.streams.kstream.Suppressed.BufferConfig
import org.apache.kafka.streams.kstream.{
Named,
SlidingWindows,
SessionWindows,
SlidingWindows,
Suppressed => JSuppressed,
TimeWindows,
Windowed,
Suppressed => JSuppressed
Windowed
}
import org.apache.kafka.streams.scala.ImplicitConversions._
import org.apache.kafka.streams.scala.serialization.Serdes._