KAFKA-4679: Remove unstable markers from Connect APIs

ewencp ignore this PR if you are already started to work on this ticket.

Author: Balint Molnar <balintmolnar91@gmail.com>

Reviewers: Ewen Cheslack-Postava <ewen@confluent.io>

Closes #2423 from baluchicken/KAFKA-4679
This commit is contained in:
Balint Molnar 2017-01-27 19:36:31 -08:00 committed by Ewen Cheslack-Postava
parent 6cb76b95af
commit 1434b61d5d
20 changed files with 0 additions and 44 deletions

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.data.Schema;
/**
@ -28,7 +27,6 @@ import org.apache.kafka.connect.data.Schema;
* notion of offset, it is not included here because they differ in type.
* </p>
*/
@InterfaceStability.Unstable
public abstract class ConnectRecord<R extends ConnectRecord<R>> {
private final String topic;
private final Integer kafkaPartition;

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.Config;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigValue;
@ -42,7 +41,6 @@ import java.util.Map;
* Tasks.
* </p>
*/
@InterfaceStability.Unstable
public abstract class Connector {
protected ConnectorContext context;

View File

@ -17,12 +17,9 @@
package org.apache.kafka.connect.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* ConnectorContext allows Connectors to proactively interact with the Kafka Connect runtime.
*/
@InterfaceStability.Unstable
public interface ConnectorContext {
/**
* Requests that the runtime reconfigure the Tasks for this source. This should be used to

View File

@ -17,8 +17,6 @@
package org.apache.kafka.connect.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
@ -34,7 +32,6 @@ import java.util.Map;
* {@link org.apache.kafka.connect.sink.SinkTask}.
* </p>
*/
@InterfaceStability.Unstable
public interface Task {
/**
* Get the version of this task. Usually this should be the same as the corresponding {@link Connector} class's version.

View File

@ -18,12 +18,10 @@
package org.apache.kafka.connect.errors;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* ConnectException is the top-level exception type generated by Kafka Connect and connector implementations.
*/
@InterfaceStability.Unstable
public class ConnectException extends KafkaException {
public ConnectException(String s) {

View File

@ -17,13 +17,11 @@
package org.apache.kafka.connect.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.connector.Connector;
/**
* SinkConnectors implement the Connector interface to send Kafka data to another system.
*/
@InterfaceStability.Unstable
public abstract class SinkConnector extends Connector {
/**

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.connect.connector.ConnectRecord;
import org.apache.kafka.connect.data.Schema;
@ -30,7 +29,6 @@ import org.apache.kafka.connect.data.Schema;
* It also includes the {@link TimestampType}, which may be {@link TimestampType#NO_TIMESTAMP_TYPE}, and the relevant
* timestamp, which may be {@code null}.
*/
@InterfaceStability.Unstable
public class SinkRecord extends ConnectRecord<SinkRecord> {
private final long kafkaOffset;
private final TimestampType timestampType;

View File

@ -18,7 +18,6 @@ package org.apache.kafka.connect.sink;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.connector.Task;
import java.util.Collection;
@ -52,7 +51,6 @@ import java.util.Map;
* </ol>
*
*/
@InterfaceStability.Unstable
public abstract class SinkTask implements Task {
/**

View File

@ -18,7 +18,6 @@
package org.apache.kafka.connect.sink;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
import java.util.Set;
@ -26,7 +25,6 @@ import java.util.Set;
/**
* Context passed to SinkTasks, allowing them to access utilities in the Kafka Connect runtime.
*/
@InterfaceStability.Unstable
public interface SinkTaskContext {
/**
* Reset the consumer offsets for the given topic partitions. SinkTasks should use this if they manage offsets

View File

@ -16,14 +16,12 @@
**/
package org.apache.kafka.connect.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.connector.Connector;
/**
* SourceConnectors implement the connector interface to pull data from another system and send
* it to Kafka.
*/
@InterfaceStability.Unstable
public abstract class SourceConnector extends Connector {
}

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.connector.ConnectRecord;
import org.apache.kafka.connect.data.Schema;
@ -41,7 +40,6 @@ import java.util.Map;
* "table_name"} and the sourceOffset as a Long containing the timestamp of the row.
* </p>
*/
@InterfaceStability.Unstable
public class SourceRecord extends ConnectRecord<SourceRecord> {
private final Map<String, ?> sourcePartition;
private final Map<String, ?> sourceOffset;

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.connector.Task;
import java.util.List;
@ -26,7 +25,6 @@ import java.util.Map;
/**
* SourceTask is a Task that pulls records from another system for storage in Kafka.
*/
@InterfaceStability.Unstable
public abstract class SourceTask implements Task {
protected SourceTaskContext context;

View File

@ -16,14 +16,12 @@
**/
package org.apache.kafka.connect.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.storage.OffsetStorageReader;
/**
* SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying
* runtime.
*/
@InterfaceStability.Unstable
public interface SourceTaskContext {
/**
* Get the OffsetStorageReader for this SourceTask.

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaAndValue;
@ -28,7 +27,6 @@ import java.util.Map;
* and byte[]. Internally, this likely includes an intermediate step to the format used by the serialization
* layer (e.g. JsonNode, GenericRecord, Message).
*/
@InterfaceStability.Unstable
public interface Converter {
/**

View File

@ -17,8 +17,6 @@
package org.apache.kafka.connect.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
@ -33,7 +31,6 @@ import java.util.Map;
* {@link org.apache.kafka.connect.data.Schema} other than Array, Map, and Struct.
* </p>
*/
@InterfaceStability.Unstable
public interface OffsetStorageReader {
/**
* Get the offset for the specified partition. If the data isn't already available locally, this

View File

@ -17,8 +17,6 @@
package org.apache.kafka.connect.util;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.ArrayList;
import java.util.List;
@ -26,7 +24,6 @@ import java.util.List;
* Utilities that connector implementations might find useful. Contains common building blocks
* for writing connectors.
*/
@InterfaceStability.Unstable
public class ConnectorUtils {
/**
* Given a list of elements and a target number of groups, generates list of groups of

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.connect.runtime.Connect;
@ -47,7 +46,6 @@ import java.util.Map;
* instances.
* </p>
*/
@InterfaceStability.Unstable
public class ConnectDistributed {
private static final Logger log = LoggerFactory.getLogger(ConnectDistributed.class);

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.connect.runtime.Connect;
@ -51,7 +50,6 @@ import java.util.Map;
* fault tolerant by overriding the settings to use file storage for both.
* </p>
*/
@InterfaceStability.Unstable
public class ConnectStandalone {
private static final Logger log = LoggerFactory.getLogger(ConnectStandalone.class);

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.runtime;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.connect.runtime.rest.RestServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -29,7 +28,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
* This class ties together all the components of a Kafka Connect process (herder, worker,
* storage, command interface), managing their lifecycle.
*/
@InterfaceStability.Unstable
public class Connect {
private static final Logger log = LoggerFactory.getLogger(Connect.class);

View File

@ -17,7 +17,6 @@
package org.apache.kafka.connect.runtime;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
@ -28,7 +27,6 @@ import java.util.Map;
/**
* Common base class providing configuration for Kafka Connect workers, whether standalone or distributed.
*/
@InterfaceStability.Unstable
public class WorkerConfig extends AbstractConfig {
public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";