Mark Copycat classes as unstable and reduce visibility of some classes where possible.

This commit is contained in:
Ewen Cheslack-Postava 2015-08-12 20:34:30 -07:00
parent d55d31ec98
commit 0fa7a365dc
29 changed files with 57 additions and 11 deletions

View File

@ -109,6 +109,7 @@
</subpackage> </subpackage>
<subpackage name="copycat"> <subpackage name="copycat">
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.copycat.data" /> <allow pkg="org.apache.kafka.copycat.data" />
<allow pkg="org.apache.kafka.copycat.errors" /> <allow pkg="org.apache.kafka.copycat.errors" />
@ -124,7 +125,6 @@
<subpackage name="runtime"> <subpackage name="runtime">
<allow pkg="org.apache.kafka.copycat" /> <allow pkg="org.apache.kafka.copycat" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.clients" /> <allow pkg="org.apache.kafka.clients" />
<!-- for tests --> <!-- for tests -->
<allow pkg="org.easymock" /> <allow pkg="org.easymock" />

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.connector; package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.errors.CopycatException; import org.apache.kafka.copycat.errors.CopycatException;
import java.util.List; import java.util.List;
@ -39,6 +40,7 @@ import java.util.Properties;
* Tasks. * Tasks.
* </p> * </p>
*/ */
@InterfaceStability.Unstable
public abstract class Connector { public abstract class Connector {
protected ConnectorContext context; protected ConnectorContext context;

View File

@ -17,9 +17,12 @@
package org.apache.kafka.copycat.connector; package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
/** /**
* ConnectorContext allows Connectors to proactively interact with the Copycat runtime. * ConnectorContext allows Connectors to proactively interact with the Copycat runtime.
*/ */
@InterfaceStability.Unstable
public interface ConnectorContext { public interface ConnectorContext {
/** /**
* Requests that the runtime reconfigure the Tasks for this source. This should be used to * Requests that the runtime reconfigure the Tasks for this source. This should be used to

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.connector; package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
/** /**
* <p> * <p>
* Base class for records containing data to be copied to/from Kafka. This corresponds closely to * Base class for records containing data to be copied to/from Kafka. This corresponds closely to
@ -25,6 +27,7 @@ package org.apache.kafka.copycat.connector;
* notion of offset, it is not included here because they differ in type. * notion of offset, it is not included here because they differ in type.
* </p> * </p>
*/ */
@InterfaceStability.Unstable
public abstract class CopycatRecord { public abstract class CopycatRecord {
private final String topic; private final String topic;
private final Integer kafkaPartition; private final Integer kafkaPartition;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.connector; package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.errors.CopycatException; import org.apache.kafka.copycat.errors.CopycatException;
import java.util.Properties; import java.util.Properties;
@ -34,6 +35,7 @@ import java.util.Properties;
* {@link org.apache.kafka.copycat.sink.SinkTask}. * {@link org.apache.kafka.copycat.sink.SinkTask}.
* </p> * </p>
*/ */
@InterfaceStability.Unstable
public interface Task { public interface Task {
/** /**
* Start the Task * Start the Task

View File

@ -17,9 +17,12 @@
package org.apache.kafka.copycat.errors; package org.apache.kafka.copycat.errors;
import org.apache.kafka.common.annotation.InterfaceStability;
/** /**
* CopycatException is the top-level exception type generated by Copycat and connectors. * CopycatException is the top-level exception type generated by Copycat and connectors.
*/ */
@InterfaceStability.Unstable
public class CopycatException extends Exception { public class CopycatException extends Exception {
public CopycatException(String s) { public CopycatException(String s) {

View File

@ -17,6 +17,9 @@
package org.apache.kafka.copycat.errors; package org.apache.kafka.copycat.errors;
import org.apache.kafka.common.annotation.InterfaceStability;
@InterfaceStability.Unstable
public class CopycatRuntimeException extends RuntimeException { public class CopycatRuntimeException extends RuntimeException {
public CopycatRuntimeException(String s) { public CopycatRuntimeException(String s) {
super(s); super(s);

View File

@ -17,11 +17,13 @@
package org.apache.kafka.copycat.sink; package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Connector; import org.apache.kafka.copycat.connector.Connector;
/** /**
* SinkConnectors implement the Connector interface to send Kafka data to another system. * SinkConnectors implement the Connector interface to send Kafka data to another system.
*/ */
@InterfaceStability.Unstable
public abstract class SinkConnector extends Connector { public abstract class SinkConnector extends Connector {
/** /**

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.sink; package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.CopycatRecord; import org.apache.kafka.copycat.connector.CopycatRecord;
/** /**
@ -24,6 +25,7 @@ import org.apache.kafka.copycat.connector.CopycatRecord;
* the record in the Kafka topic-partition in addition to the standard fields. This information * the record in the Kafka topic-partition in addition to the standard fields. This information
* should be used by the SinkTask to coordinate kafkaOffset commits. * should be used by the SinkTask to coordinate kafkaOffset commits.
*/ */
@InterfaceStability.Unstable
public class SinkRecord extends CopycatRecord { public class SinkRecord extends CopycatRecord {
private final long kafkaOffset; private final long kafkaOffset;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.sink; package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Task; import org.apache.kafka.copycat.connector.Task;
import org.apache.kafka.copycat.errors.CopycatException; import org.apache.kafka.copycat.errors.CopycatException;
@ -28,6 +29,7 @@ import java.util.Map;
* addition to the basic {@link #put} interface, SinkTasks must also implement {@link #flush} * addition to the basic {@link #put} interface, SinkTasks must also implement {@link #flush}
* to support offset commits. * to support offset commits.
*/ */
@InterfaceStability.Unstable
public abstract class SinkTask implements Task { public abstract class SinkTask implements Task {
/** /**

View File

@ -18,6 +18,7 @@
package org.apache.kafka.copycat.sink; package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -25,6 +26,7 @@ import java.util.Map;
/** /**
* Context passed to SinkTasks, allowing them to access utilities in the copycat runtime. * Context passed to SinkTasks, allowing them to access utilities in the copycat runtime.
*/ */
@InterfaceStability.Unstable
public abstract class SinkTaskContext { public abstract class SinkTaskContext {
private Map<TopicPartition, Long> offsets; private Map<TopicPartition, Long> offsets;

View File

@ -16,12 +16,14 @@
**/ **/
package org.apache.kafka.copycat.source; package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Connector; import org.apache.kafka.copycat.connector.Connector;
/** /**
* SourceConnectors implement the connector interface to pull data from another system and send * SourceConnectors implement the connector interface to pull data from another system and send
* it to Kafka. * it to Kafka.
*/ */
@InterfaceStability.Unstable
public abstract class SourceConnector extends Connector { public abstract class SourceConnector extends Connector {
} }

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.source; package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.CopycatRecord; import org.apache.kafka.copycat.connector.CopycatRecord;
/** /**
@ -37,6 +38,7 @@ import org.apache.kafka.copycat.connector.CopycatRecord;
* "table_name"} and the sourceOffset as a Long containing the timestamp of the row. * "table_name"} and the sourceOffset as a Long containing the timestamp of the row.
* </p> * </p>
*/ */
@InterfaceStability.Unstable
public class SourceRecord extends CopycatRecord { public class SourceRecord extends CopycatRecord {
private final Object sourcePartition; private final Object sourcePartition;
private final Object sourceOffset; private final Object sourceOffset;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.source; package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Task; import org.apache.kafka.copycat.connector.Task;
import java.util.List; import java.util.List;
@ -24,6 +25,7 @@ import java.util.List;
/** /**
* SourceTask is a Task that pulls records from another system for storage in Kafka. * SourceTask is a Task that pulls records from another system for storage in Kafka.
*/ */
@InterfaceStability.Unstable
public abstract class SourceTask implements Task { public abstract class SourceTask implements Task {
protected SourceTaskContext context; protected SourceTaskContext context;

View File

@ -16,12 +16,14 @@
**/ **/
package org.apache.kafka.copycat.source; package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.storage.OffsetStorageReader; import org.apache.kafka.copycat.storage.OffsetStorageReader;
/** /**
* SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying * SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying
* runtime. * runtime.
*/ */
@InterfaceStability.Unstable
public class SourceTaskContext { public class SourceTaskContext {
private final OffsetStorageReader reader; private final OffsetStorageReader reader;

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.storage; package org.apache.kafka.copycat.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
/** /**
* The Converter interface provides support for translating between Copycat's runtime data format * The Converter interface provides support for translating between Copycat's runtime data format
* and the "native" runtime format used by the serialization layer. This is used to translate * and the "native" runtime format used by the serialization layer. This is used to translate
@ -24,6 +26,7 @@ package org.apache.kafka.copycat.storage;
* component -- the producer or consumer serializer or deserializer for records or a Copycat * component -- the producer or consumer serializer or deserializer for records or a Copycat
* serializer or deserializer for offsets. * serializer or deserializer for offsets.
*/ */
@InterfaceStability.Unstable
public interface Converter<T> { public interface Converter<T> {
/** /**

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.storage; package org.apache.kafka.copycat.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection; import java.util.Collection;
import java.util.Map; import java.util.Map;
@ -25,6 +27,7 @@ import java.util.Map;
* connectors to determine offsets to start consuming data from. This is most commonly used during * connectors to determine offsets to start consuming data from. This is most commonly used during
* initialization of a task, but can also be used during runtime, e.g. when reconfiguring a task. * initialization of a task, but can also be used during runtime, e.g. when reconfiguring a task.
*/ */
@InterfaceStability.Unstable
public interface OffsetStorageReader { public interface OffsetStorageReader {
/** /**
* Get the offset for the specified partition. If the data isn't already available locally, this * Get the offset for the specified partition. If the data isn't already available locally, this

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.util; package org.apache.kafka.copycat.util;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -24,6 +26,7 @@ import java.util.List;
* Utilities that connector implementations might find useful. Contains common building blocks * Utilities that connector implementations might find useful. Contains common building blocks
* for writing connectors. * for writing connectors.
*/ */
@InterfaceStability.Unstable
public class ConnectorUtils { public class ConnectorUtils {
/** /**
* Given a list of elements and a target number of groups, generates list of groups of * Given a list of elements and a target number of groups, generates list of groups of

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli; package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.copycat.runtime.Coordinator; import org.apache.kafka.copycat.runtime.Coordinator;
@ -40,6 +41,7 @@ import java.util.Properties;
* fault tolerant by overriding the settings to use file storage for both. * fault tolerant by overriding the settings to use file storage for both.
* </p> * </p>
*/ */
@InterfaceStability.Unstable
public class Copycat { public class Copycat {
private static final Logger log = LoggerFactory.getLogger(Copycat.class); private static final Logger log = LoggerFactory.getLogger(Copycat.class);

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli; package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Importance;
@ -24,6 +25,7 @@ import org.apache.kafka.common.config.ConfigDef.Type;
import java.util.Properties; import java.util.Properties;
@InterfaceStability.Unstable
public class CopycatConfig extends AbstractConfig { public class CopycatConfig extends AbstractConfig {
public static final String WORKER_PROPERTIES_CONFIG = "worker-config"; public static final String WORKER_PROPERTIES_CONFIG = "worker-config";
@ -53,11 +55,8 @@ public class CopycatConfig extends AbstractConfig {
Importance.HIGH, DELETE_CONNECTORS_CONFIG_DOC); Importance.HIGH, DELETE_CONNECTORS_CONFIG_DOC);
} }
private Properties originalProperties;
public CopycatConfig(Properties props) { public CopycatConfig(Properties props) {
super(config, props); super(config, props);
this.originalProperties = props;
} }
/** /**

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli; package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Importance;
@ -28,6 +29,7 @@ import java.util.Set;
/** /**
* Configuration for standalone workers. * Configuration for standalone workers.
*/ */
@InterfaceStability.Unstable
public class WorkerConfig extends AbstractConfig { public class WorkerConfig extends AbstractConfig {
public static final String CLUSTER_CONFIG = "cluster"; public static final String CLUSTER_CONFIG = "cluster";

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.runtime; package org.apache.kafka.copycat.runtime;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -27,6 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* This class ties together all the components of a Copycat process (coordinator, worker, * This class ties together all the components of a Copycat process (coordinator, worker,
* storage, command interface), managing their lifecycle. * storage, command interface), managing their lifecycle.
*/ */
@InterfaceStability.Unstable
public class Copycat { public class Copycat {
private static final Logger log = LoggerFactory.getLogger(Copycat.class); private static final Logger log = LoggerFactory.getLogger(Copycat.class);

View File

@ -19,6 +19,6 @@ package org.apache.kafka.copycat.runtime;
import org.apache.kafka.copycat.sink.SinkTaskContext; import org.apache.kafka.copycat.sink.SinkTaskContext;
public class SinkTaskContextImpl extends SinkTaskContext { class SinkTaskContextImpl extends SinkTaskContext {
} }

View File

@ -44,7 +44,7 @@ import java.util.concurrent.TimeUnit;
* The current implementation uses a single thread to process commits and * The current implementation uses a single thread to process commits and
* </p> * </p>
*/ */
public class SourceTaskOffsetCommitter { class SourceTaskOffsetCommitter {
private static final Logger log = LoggerFactory.getLogger(SourceTaskOffsetCommitter.class); private static final Logger log = LoggerFactory.getLogger(SourceTaskOffsetCommitter.class);
private Time time; private Time time;

View File

@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit;
/** /**
* WorkerTask that uses a SinkTask to export data from Kafka. * WorkerTask that uses a SinkTask to export data from Kafka.
*/ */
public class WorkerSinkTask<K, V> implements WorkerTask { class WorkerSinkTask<K, V> implements WorkerTask {
private static final Logger log = LoggerFactory.getLogger(WorkerSinkTask.class); private static final Logger log = LoggerFactory.getLogger(WorkerSinkTask.class);
private final ConnectorTaskId id; private final ConnectorTaskId id;

View File

@ -46,7 +46,7 @@ import java.util.concurrent.TimeoutException;
/** /**
* WorkerTask that uses a SourceTask to ingest data into Kafka. * WorkerTask that uses a SourceTask to ingest data into Kafka.
*/ */
public class WorkerSourceTask<K, V> implements WorkerTask { class WorkerSourceTask<K, V> implements WorkerTask {
private static final Logger log = LoggerFactory.getLogger(WorkerSourceTask.class); private static final Logger log = LoggerFactory.getLogger(WorkerSourceTask.class);
private ConnectorTaskId id; private ConnectorTaskId id;

View File

@ -26,7 +26,7 @@ import java.util.Properties;
* used by {@link Worker} to manage the tasks. Implementations combine a user-specified Task with * used by {@link Worker} to manage the tasks. Implementations combine a user-specified Task with
* Kafka to create a data flow. * Kafka to create a data flow.
*/ */
public interface WorkerTask { interface WorkerTask {
/** /**
* Start the Task * Start the Task
* @param props initial configuration * @param props initial configuration

View File

@ -24,7 +24,7 @@ import java.util.Properties;
* Interface used by StandaloneController to store configuration data for jobs. To be fault * Interface used by StandaloneController to store configuration data for jobs. To be fault
* tolerant, all data required to resume jobs is stored here. * tolerant, all data required to resume jobs is stored here.
*/ */
public interface ConfigStorage { interface ConfigStorage {
/** /**
* Configure this storage engine. * Configure this storage engine.

View File

@ -23,7 +23,7 @@ import org.apache.kafka.copycat.connector.ConnectorContext;
* ConnectorContext for use with the StandaloneCoordinator, which maintains all connectors and tasks * ConnectorContext for use with the StandaloneCoordinator, which maintains all connectors and tasks
* in a single process. * in a single process.
*/ */
public class StandaloneConnectorContext implements ConnectorContext { class StandaloneConnectorContext implements ConnectorContext {
private StandaloneCoordinator coordinator; private StandaloneCoordinator coordinator;
private String connectorName; private String connectorName;