Mark Copycat classes as unstable and reduce visibility of some classes where possible.

This commit is contained in:
Ewen Cheslack-Postava 2015-08-12 20:34:30 -07:00
parent d55d31ec98
commit 0fa7a365dc
29 changed files with 57 additions and 11 deletions

View File

@ -109,6 +109,7 @@
</subpackage>
<subpackage name="copycat">
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.copycat.data" />
<allow pkg="org.apache.kafka.copycat.errors" />
@ -124,7 +125,6 @@
<subpackage name="runtime">
<allow pkg="org.apache.kafka.copycat" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.clients" />
<!-- for tests -->
<allow pkg="org.easymock" />

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.errors.CopycatException;
import java.util.List;
@ -39,6 +40,7 @@ import java.util.Properties;
* Tasks.
* </p>
*/
@InterfaceStability.Unstable
public abstract class Connector {
protected ConnectorContext context;

View File

@ -17,9 +17,12 @@
package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* ConnectorContext allows Connectors to proactively interact with the Copycat runtime.
*/
@InterfaceStability.Unstable
public interface ConnectorContext {
/**
* Requests that the runtime reconfigure the Tasks for this source. This should be used to

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* <p>
* Base class for records containing data to be copied to/from Kafka. This corresponds closely to
@ -25,6 +27,7 @@ package org.apache.kafka.copycat.connector;
* notion of offset, it is not included here because they differ in type.
* </p>
*/
@InterfaceStability.Unstable
public abstract class CopycatRecord {
private final String topic;
private final Integer kafkaPartition;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.connector;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.errors.CopycatException;
import java.util.Properties;
@ -34,6 +35,7 @@ import java.util.Properties;
* {@link org.apache.kafka.copycat.sink.SinkTask}.
* </p>
*/
@InterfaceStability.Unstable
public interface Task {
/**
* Start the Task

View File

@ -17,9 +17,12 @@
package org.apache.kafka.copycat.errors;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* CopycatException is the top-level exception type generated by Copycat and connectors.
*/
@InterfaceStability.Unstable
public class CopycatException extends Exception {
public CopycatException(String s) {

View File

@ -17,6 +17,9 @@
package org.apache.kafka.copycat.errors;
import org.apache.kafka.common.annotation.InterfaceStability;
@InterfaceStability.Unstable
public class CopycatRuntimeException extends RuntimeException {
public CopycatRuntimeException(String s) {
super(s);

View File

@ -17,11 +17,13 @@
package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Connector;
/**
* SinkConnectors implement the Connector interface to send Kafka data to another system.
*/
@InterfaceStability.Unstable
public abstract class SinkConnector extends Connector {
/**

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.CopycatRecord;
/**
@ -24,6 +25,7 @@ import org.apache.kafka.copycat.connector.CopycatRecord;
* the record in the Kafka topic-partition in addition to the standard fields. This information
* should be used by the SinkTask to coordinate kafkaOffset commits.
*/
@InterfaceStability.Unstable
public class SinkRecord extends CopycatRecord {
private final long kafkaOffset;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Task;
import org.apache.kafka.copycat.errors.CopycatException;
@ -28,6 +29,7 @@ import java.util.Map;
* addition to the basic {@link #put} interface, SinkTasks must also implement {@link #flush}
* to support offset commits.
*/
@InterfaceStability.Unstable
public abstract class SinkTask implements Task {
/**

View File

@ -18,6 +18,7 @@
package org.apache.kafka.copycat.sink;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.HashMap;
import java.util.Map;
@ -25,6 +26,7 @@ import java.util.Map;
/**
* Context passed to SinkTasks, allowing them to access utilities in the copycat runtime.
*/
@InterfaceStability.Unstable
public abstract class SinkTaskContext {
private Map<TopicPartition, Long> offsets;

View File

@ -16,12 +16,14 @@
**/
package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Connector;
/**
* SourceConnectors implement the connector interface to pull data from another system and send
* it to Kafka.
*/
@InterfaceStability.Unstable
public abstract class SourceConnector extends Connector {
}

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.CopycatRecord;
/**
@ -37,6 +38,7 @@ import org.apache.kafka.copycat.connector.CopycatRecord;
* "table_name"} and the sourceOffset as a Long containing the timestamp of the row.
* </p>
*/
@InterfaceStability.Unstable
public class SourceRecord extends CopycatRecord {
private final Object sourcePartition;
private final Object sourceOffset;

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.connector.Task;
import java.util.List;
@ -24,6 +25,7 @@ import java.util.List;
/**
* SourceTask is a Task that pulls records from another system for storage in Kafka.
*/
@InterfaceStability.Unstable
public abstract class SourceTask implements Task {
protected SourceTaskContext context;

View File

@ -16,12 +16,14 @@
**/
package org.apache.kafka.copycat.source;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.copycat.storage.OffsetStorageReader;
/**
* SourceTaskContext is provided to SourceTasks to allow them to interact with the underlying
* runtime.
*/
@InterfaceStability.Unstable
public class SourceTaskContext {
private final OffsetStorageReader reader;

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The Converter interface provides support for translating between Copycat's runtime data format
* and the "native" runtime format used by the serialization layer. This is used to translate
@ -24,6 +26,7 @@ package org.apache.kafka.copycat.storage;
* component -- the producer or consumer serializer or deserializer for records or a Copycat
* serializer or deserializer for offsets.
*/
@InterfaceStability.Unstable
public interface Converter<T> {
/**

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.storage;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
@ -25,6 +27,7 @@ import java.util.Map;
* connectors to determine offsets to start consuming data from. This is most commonly used during
* initialization of a task, but can also be used during runtime, e.g. when reconfiguring a task.
*/
@InterfaceStability.Unstable
public interface OffsetStorageReader {
/**
* Get the offset for the specified partition. If the data isn't already available locally, this

View File

@ -17,6 +17,8 @@
package org.apache.kafka.copycat.util;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.ArrayList;
import java.util.List;
@ -24,6 +26,7 @@ import java.util.List;
* Utilities that connector implementations might find useful. Contains common building blocks
* for writing connectors.
*/
@InterfaceStability.Unstable
public class ConnectorUtils {
/**
* Given a list of elements and a target number of groups, generates list of groups of

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.copycat.runtime.Coordinator;
@ -40,6 +41,7 @@ import java.util.Properties;
* fault tolerant by overriding the settings to use file storage for both.
* </p>
*/
@InterfaceStability.Unstable
public class Copycat {
private static final Logger log = LoggerFactory.getLogger(Copycat.class);

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
@ -24,6 +25,7 @@ import org.apache.kafka.common.config.ConfigDef.Type;
import java.util.Properties;
@InterfaceStability.Unstable
public class CopycatConfig extends AbstractConfig {
public static final String WORKER_PROPERTIES_CONFIG = "worker-config";
@ -53,11 +55,8 @@ public class CopycatConfig extends AbstractConfig {
Importance.HIGH, DELETE_CONNECTORS_CONFIG_DOC);
}
private Properties originalProperties;
public CopycatConfig(Properties props) {
super(config, props);
this.originalProperties = props;
}
/**

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.cli;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
@ -28,6 +29,7 @@ import java.util.Set;
/**
* Configuration for standalone workers.
*/
@InterfaceStability.Unstable
public class WorkerConfig extends AbstractConfig {
public static final String CLUSTER_CONFIG = "cluster";

View File

@ -17,6 +17,7 @@
package org.apache.kafka.copycat.runtime;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -27,6 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* This class ties together all the components of a Copycat process (coordinator, worker,
* storage, command interface), managing their lifecycle.
*/
@InterfaceStability.Unstable
public class Copycat {
private static final Logger log = LoggerFactory.getLogger(Copycat.class);

View File

@ -19,6 +19,6 @@ package org.apache.kafka.copycat.runtime;
import org.apache.kafka.copycat.sink.SinkTaskContext;
public class SinkTaskContextImpl extends SinkTaskContext {
class SinkTaskContextImpl extends SinkTaskContext {
}

View File

@ -44,7 +44,7 @@ import java.util.concurrent.TimeUnit;
* The current implementation uses a single thread to process commits and
* </p>
*/
public class SourceTaskOffsetCommitter {
class SourceTaskOffsetCommitter {
private static final Logger log = LoggerFactory.getLogger(SourceTaskOffsetCommitter.class);
private Time time;

View File

@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit;
/**
* WorkerTask that uses a SinkTask to export data from Kafka.
*/
public class WorkerSinkTask<K, V> implements WorkerTask {
class WorkerSinkTask<K, V> implements WorkerTask {
private static final Logger log = LoggerFactory.getLogger(WorkerSinkTask.class);
private final ConnectorTaskId id;

View File

@ -46,7 +46,7 @@ import java.util.concurrent.TimeoutException;
/**
* WorkerTask that uses a SourceTask to ingest data into Kafka.
*/
public class WorkerSourceTask<K, V> implements WorkerTask {
class WorkerSourceTask<K, V> implements WorkerTask {
private static final Logger log = LoggerFactory.getLogger(WorkerSourceTask.class);
private ConnectorTaskId id;

View File

@ -26,7 +26,7 @@ import java.util.Properties;
* used by {@link Worker} to manage the tasks. Implementations combine a user-specified Task with
* Kafka to create a data flow.
*/
public interface WorkerTask {
interface WorkerTask {
/**
* Start the Task
* @param props initial configuration

View File

@ -24,7 +24,7 @@ import java.util.Properties;
* Interface used by StandaloneController to store configuration data for jobs. To be fault
* tolerant, all data required to resume jobs is stored here.
*/
public interface ConfigStorage {
interface ConfigStorage {
/**
* Configure this storage engine.

View File

@ -23,7 +23,7 @@ import org.apache.kafka.copycat.connector.ConnectorContext;
* ConnectorContext for use with the StandaloneCoordinator, which maintains all connectors and tasks
* in a single process.
*/
public class StandaloneConnectorContext implements ConnectorContext {
class StandaloneConnectorContext implements ConnectorContext {
private StandaloneCoordinator coordinator;
private String connectorName;