Modifier and Type | Method and Description |
---|---|
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer08.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer010.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer09.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected abstract Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumerBase.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
Modifier and Type | Method and Description |
---|---|
protected AbstractFetcher<T,?> |
FlinkKafkaConsumer08.createFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
OffsetCommitMode offsetCommitMode,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
protected AbstractFetcher<T,?> |
FlinkKafkaConsumer.createFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
OffsetCommitMode offsetCommitMode,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
protected AbstractFetcher<T,?> |
FlinkKafkaConsumer010.createFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
OffsetCommitMode offsetCommitMode,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
protected AbstractFetcher<T,?> |
FlinkKafkaConsumer09.createFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
OffsetCommitMode offsetCommitMode,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
protected abstract AbstractFetcher<T,?> |
FlinkKafkaConsumerBase.createFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> subscribedPartitionsToStartOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
OffsetCommitMode offsetCommitMode,
MetricGroup kafkaMetricGroup,
boolean useMetrics)
Creates the fetcher that connect to the Kafka brokers, pulls data, deserialized the
data, and emits it into the data streams.
|
protected KafkaTableSourceBase |
Kafka08TableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets) |
protected KafkaTableSourceBase |
KafkaTableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets) |
protected KafkaTableSourceBase |
Kafka011TableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets) |
protected KafkaTableSourceBase |
Kafka010TableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets) |
protected KafkaTableSourceBase |
Kafka09TableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets) |
protected abstract KafkaTableSourceBase |
KafkaTableSourceSinkFactoryBase.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Constructs the version-specific Kafka table source.
|
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer08.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer010.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumer09.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
protected abstract Map<KafkaTopicPartition,Long> |
FlinkKafkaConsumerBase.fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions,
long timestamp) |
FlinkKafkaConsumerBase<T> |
FlinkKafkaConsumerBase.setStartFromSpecificOffsets(Map<KafkaTopicPartition,Long> specificStartupOffsets)
Specifies the consumer to start reading partitions from specific offsets, set independently for each partition.
|
Constructor and Description |
---|
Kafka010TableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a Kafka 0.10
StreamTableSource . |
Kafka011TableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a Kafka 0.11
StreamTableSource . |
Kafka08TableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a Kafka 0.8
StreamTableSource . |
Kafka09TableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a Kafka 0.9
StreamTableSource . |
KafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a generic Kafka
StreamTableSource . |
KafkaTableSourceBase(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets)
Creates a generic Kafka
StreamTableSource . |
Modifier and Type | Method and Description |
---|---|
protected List<KafkaTopicPartition> |
KafkaPartitionDiscoverer.getAllPartitionsForTopics(List<String> topics) |
protected List<KafkaTopicPartition> |
Kafka09PartitionDiscoverer.getAllPartitionsForTopics(List<String> topics) |
Modifier and Type | Method and Description |
---|---|
org.apache.kafka.common.TopicPartition |
KafkaFetcher.createKafkaPartitionHandle(KafkaTopicPartition partition) |
org.apache.kafka.common.TopicPartition |
Kafka09Fetcher.createKafkaPartitionHandle(KafkaTopicPartition partition) |
Modifier and Type | Method and Description |
---|---|
protected void |
KafkaFetcher.doCommitInternalOffsetsToKafka(Map<KafkaTopicPartition,Long> offsets,
KafkaCommitCallback commitCallback) |
protected void |
Kafka09Fetcher.doCommitInternalOffsetsToKafka(Map<KafkaTopicPartition,Long> offsets,
KafkaCommitCallback commitCallback) |
Constructor and Description |
---|
Kafka010Fetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
ProcessingTimeService processingTimeProvider,
long autoWatermarkInterval,
ClassLoader userCodeClassLoader,
String taskNameWithSubtasks,
KafkaDeserializationSchema<T> deserializer,
Properties kafkaProperties,
long pollTimeout,
MetricGroup subtaskMetricGroup,
MetricGroup consumerMetricGroup,
boolean useMetrics,
FlinkConnectorRateLimiter rateLimiter) |
Kafka09Fetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
ProcessingTimeService processingTimeProvider,
long autoWatermarkInterval,
ClassLoader userCodeClassLoader,
String taskNameWithSubtasks,
KafkaDeserializationSchema<T> deserializer,
Properties kafkaProperties,
long pollTimeout,
MetricGroup subtaskMetricGroup,
MetricGroup consumerMetricGroup,
boolean useMetrics,
FlinkConnectorRateLimiter rateLimiter) |
KafkaFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> assignedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
ProcessingTimeService processingTimeProvider,
long autoWatermarkInterval,
ClassLoader userCodeClassLoader,
String taskNameWithSubtasks,
KafkaDeserializationSchema<T> deserializer,
Properties kafkaProperties,
long pollTimeout,
MetricGroup subtaskMetricGroup,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
Modifier and Type | Method and Description |
---|---|
KafkaTopicPartition |
KafkaTopicPartitionState.getKafkaTopicPartition()
Gets Flink's descriptor for the Kafka Partition.
|
KafkaTopicPartition |
KafkaTopicPartitionLeader.getTopicPartition() |
Modifier and Type | Method and Description |
---|---|
List<KafkaTopicPartition> |
AbstractPartitionDiscoverer.discoverPartitions()
Execute a partition discovery attempt for this subtask.
|
static List<KafkaTopicPartition> |
KafkaTopicPartition.dropLeaderData(List<KafkaTopicPartitionLeader> partitionInfos) |
List<KafkaTopicPartition> |
Kafka08PartitionDiscoverer.getAllPartitionsForTopics(List<String> topics) |
protected abstract List<KafkaTopicPartition> |
AbstractPartitionDiscoverer.getAllPartitionsForTopics(List<String> topics)
Fetch the list of all partitions for a specific topics list from Kafka.
|
HashMap<KafkaTopicPartition,Long> |
AbstractFetcher.snapshotCurrentState()
Takes a snapshot of the partition offsets.
|
Modifier and Type | Method and Description |
---|---|
static int |
KafkaTopicPartitionAssigner.assign(KafkaTopicPartition partition,
int numParallelSubtasks)
Returns the index of the target subtask that a specific Kafka partition should be
assigned to.
|
int |
KafkaTopicPartition.Comparator.compare(KafkaTopicPartition p1,
KafkaTopicPartition p2) |
protected kafka.common.TopicAndPartition |
Kafka08Fetcher.createKafkaPartitionHandle(KafkaTopicPartition partition) |
protected abstract KPH |
AbstractFetcher.createKafkaPartitionHandle(KafkaTopicPartition partition)
Creates the Kafka version specific representation of the given
topic partition.
|
Long |
ZookeeperOffsetHandler.getCommittedOffset(KafkaTopicPartition partition) |
boolean |
AbstractPartitionDiscoverer.setAndCheckDiscoveredPartition(KafkaTopicPartition partition)
Sets a partition as discovered.
|
Modifier and Type | Method and Description |
---|---|
void |
AbstractFetcher.addDiscoveredPartitions(List<KafkaTopicPartition> newPartitions)
Adds a list of newly discovered partitions to the fetcher for consuming.
|
void |
AbstractFetcher.commitInternalOffsetsToKafka(Map<KafkaTopicPartition,Long> offsets,
KafkaCommitCallback commitCallback)
Commits the given partition offsets to the Kafka brokers (or to ZooKeeper for
older Kafka versions).
|
protected void |
Kafka08Fetcher.doCommitInternalOffsetsToKafka(Map<KafkaTopicPartition,Long> offsets,
KafkaCommitCallback commitCallback) |
protected abstract void |
AbstractFetcher.doCommitInternalOffsetsToKafka(Map<KafkaTopicPartition,Long> offsets,
KafkaCommitCallback commitCallback) |
void |
ZookeeperOffsetHandler.prepareAndCommitOffsets(Map<KafkaTopicPartition,Long> internalOffsets)
Commits offsets for Kafka partitions to ZooKeeper.
|
static String |
KafkaTopicPartition.toString(List<KafkaTopicPartition> partitions) |
static String |
KafkaTopicPartition.toString(Map<KafkaTopicPartition,Long> map) |
Constructor and Description |
---|
KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition,
org.apache.kafka.common.Node leader) |
KafkaTopicPartitionState(KafkaTopicPartition partition,
KPH kafkaPartitionHandle) |
KafkaTopicPartitionStateWithPeriodicWatermarks(KafkaTopicPartition partition,
KPH kafkaPartitionHandle,
AssignerWithPeriodicWatermarks<T> timestampsAndWatermarks) |
KafkaTopicPartitionStateWithPunctuatedWatermarks(KafkaTopicPartition partition,
KPH kafkaPartitionHandle,
AssignerWithPunctuatedWatermarks<T> timestampsAndWatermarks) |
Constructor and Description |
---|
AbstractFetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> seedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
ProcessingTimeService processingTimeProvider,
long autoWatermarkInterval,
ClassLoader userCodeClassLoader,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
Kafka08Fetcher(SourceFunction.SourceContext<T> sourceContext,
Map<KafkaTopicPartition,Long> seedPartitionsWithInitialOffsets,
SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
StreamingRuntimeContext runtimeContext,
KafkaDeserializationSchema<T> deserializer,
Properties kafkaProperties,
long autoCommitInterval,
MetricGroup consumerMetricGroup,
boolean useMetrics) |
Copyright © 2014–2020 The Apache Software Foundation. All rights reserved.