Modifier and Type | Method and Description |
---|---|
static Object |
PythonBridgeUtils.getPickledBytesFromRow(Row row,
DataType[] dataTypes) |
Modifier and Type | Method and Description |
---|---|
static TypeInformation<Row> |
Types.ROW_NAMED(String[] fieldNames,
TypeInformation<?>... types)
Returns type information for
Row with fields of the given
types and with given names. |
static TypeInformation<Row> |
Types.ROW(TypeInformation<?>... types)
Returns type information for
Row with fields of the given
types. |
Modifier and Type | Method and Description |
---|---|
protected Row |
RowCsvInputFormat.fillRecord(Row reuse,
Object[] parsedValues) |
Modifier and Type | Method and Description |
---|---|
TypeInformation<Row> |
RowCsvInputFormat.getProducedType() |
Modifier and Type | Method and Description |
---|---|
protected Row |
RowCsvInputFormat.fillRecord(Row reuse,
Object[] parsedValues) |
Modifier and Type | Method and Description |
---|---|
TypeComparator<Row> |
RowTypeInfo.createComparator(int[] logicalKeyFields,
boolean[] orders,
int logicalFieldOffset,
ExecutionConfig config) |
TypeSerializer<Row> |
RowTypeInfo.createLegacySerializer(ExecutionConfig config)
Deprecated.
|
TypeSerializer<Row> |
RowTypeInfo.createSerializer(ExecutionConfig config) |
protected CompositeType.TypeComparatorBuilder<Row> |
RowTypeInfo.createTypeComparatorBuilder() |
Modifier and Type | Method and Description |
---|---|
Row |
RowSerializer.copy(Row from) |
Row |
RowSerializer.copy(Row from,
Row reuse) |
Row |
RowSerializer.createInstance() |
Row |
RowSerializer.deserialize(DataInputView source) |
Row |
RowSerializer.deserialize(Row reuse,
DataInputView source) |
Row |
RowComparator.readWithKeyDenormalization(Row reuse,
DataInputView source) |
Modifier and Type | Method and Description |
---|---|
TypeComparator<Row> |
RowComparator.duplicate() |
TypeSerializer<Row> |
RowSerializer.duplicate() |
TypeSerializerSchemaCompatibility<Row> |
RowSerializer.RowSerializerConfigSnapshot.resolveSchemaCompatibility(TypeSerializer<Row> newSerializer)
Deprecated.
|
TypeSerializerSnapshot<Row> |
RowSerializer.snapshotConfiguration() |
Modifier and Type | Method and Description |
---|---|
int |
RowComparator.compare(Row first,
Row second) |
Row |
RowSerializer.copy(Row from) |
Row |
RowSerializer.copy(Row from,
Row reuse) |
Row |
RowSerializer.deserialize(Row reuse,
DataInputView source) |
boolean |
RowComparator.equalToReference(Row candidate) |
int |
RowComparator.hash(Row record) |
void |
RowComparator.putNormalizedKey(Row record,
MemorySegment target,
int offset,
int numBytes) |
Row |
RowComparator.readWithKeyDenormalization(Row reuse,
DataInputView source) |
void |
RowSerializer.serialize(Row record,
DataOutputView target) |
void |
RowComparator.setReference(Row toCompare) |
void |
RowComparator.writeWithKeyNormalization(Row record,
DataOutputView target) |
Modifier and Type | Method and Description |
---|---|
int |
RowComparator.compareToReference(TypeComparator<Row> referencedComparator) |
TypeSerializerSchemaCompatibility<Row> |
RowSerializer.RowSerializerConfigSnapshot.resolveSchemaCompatibility(TypeSerializer<Row> newSerializer)
Deprecated.
|
Modifier and Type | Method and Description |
---|---|
protected Object[] |
CassandraRowOutputFormat.extractFields(Row record) |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.hbase.client.Mutation |
LegacyMutationConverter.convertToMutation(Tuple2<Boolean,Row> record) |
Modifier and Type | Method and Description |
---|---|
AsyncTableFunction<Row> |
AbstractHBaseTableSource.getAsyncLookupFunction(String[] lookupKeys) |
DataSet<Row> |
AbstractHBaseTableSource.getDataSet(ExecutionEnvironment execEnv) |
DataStream<Row> |
AbstractHBaseTableSource.getDataStream(StreamExecutionEnvironment execEnv) |
protected abstract InputFormat<Row,?> |
AbstractHBaseTableSource.getInputFormat(HBaseTableSchema projectedSchema) |
TableFunction<Row> |
AbstractHBaseTableSource.getLookupFunction(String[] lookupKeys) |
TypeInformation<Row> |
HBaseLookupFunction.getResultType() |
TypeInformation<Row> |
AbstractHBaseTableSource.getReturnType() |
Modifier and Type | Method and Description |
---|---|
Row |
HBaseReadWriteHelper.parseToRow(org.apache.hadoop.hbase.client.Result result)
Parses HBase
Result into Row . |
Row |
HBaseReadWriteHelper.parseToRow(org.apache.hadoop.hbase.client.Result result,
Object rowKey)
Parses HBase
Result into Row . |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.hbase.client.Delete |
HBaseReadWriteHelper.createDeleteMutation(Row row)
Returns an instance of Delete that remove record from HBase table.
|
org.apache.hadoop.hbase.client.Put |
HBaseReadWriteHelper.createPutMutation(Row row)
Returns an instance of Put that writes record to HBase table.
|
Modifier and Type | Method and Description |
---|---|
StreamTableSink<Tuple2<Boolean,Row>> |
HBase1TableFactory.createStreamTableSink(Map<String,String> properties) |
StreamTableSource<Row> |
HBase1TableFactory.createStreamTableSource(Map<String,String> properties) |
Modifier and Type | Method and Description |
---|---|
TableSink<Tuple2<Boolean,Row>> |
HBaseUpsertTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
TypeInformation<Row> |
HBaseUpsertTableSink.getRecordType() |
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
HBaseUpsertTableSink.consumeDataStream(DataStream<Tuple2<Boolean,Row>> dataStream) |
Modifier and Type | Method and Description |
---|---|
protected Row |
HBaseRowInputFormat.mapResultToOutType(org.apache.hadoop.hbase.client.Result res) |
Modifier and Type | Method and Description |
---|---|
InputFormat<Row,?> |
HBaseTableSource.getInputFormat(HBaseTableSchema projectedSchema) |
TypeInformation<Row> |
HBaseRowInputFormat.getProducedType() |
Modifier and Type | Method and Description |
---|---|
StreamTableSink<Tuple2<Boolean,Row>> |
HBase2TableFactory.createStreamTableSink(Map<String,String> properties) |
StreamTableSource<Row> |
HBase2TableFactory.createStreamTableSource(Map<String,String> properties) |
Modifier and Type | Method and Description |
---|---|
TableSink<Tuple2<Boolean,Row>> |
HBaseUpsertTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
TypeInformation<Row> |
HBaseUpsertTableSink.getRecordType() |
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
HBaseUpsertTableSink.consumeDataStream(DataStream<Tuple2<Boolean,Row>> dataStream) |
Modifier and Type | Method and Description |
---|---|
protected Row |
HBaseRowInputFormat.mapResultToOutType(org.apache.hadoop.hbase.client.Result res) |
Modifier and Type | Method and Description |
---|---|
InputFormat<Row,?> |
HBaseTableSource.getInputFormat(HBaseTableSchema projectedSchema) |
TypeInformation<Row> |
HBaseRowInputFormat.getProducedType() |
Modifier and Type | Method and Description |
---|---|
Row |
JdbcInputFormat.nextRecord(Row reuse)
Stores the next resultSet row in a tuple.
|
Modifier and Type | Method and Description |
---|---|
Row |
JdbcInputFormat.nextRecord(Row reuse)
Stores the next resultSet row in a tuple.
|
Modifier and Type | Method and Description |
---|---|
JdbcBatchingOutputFormat<Tuple2<Boolean,Row>,Row,JdbcBatchStatementExecutor<Row>> |
JdbcBatchingOutputFormat.Builder.build()
Finalizes the configuration and checks validity.
|
JdbcBatchingOutputFormat<Tuple2<Boolean,Row>,Row,JdbcBatchStatementExecutor<Row>> |
JdbcBatchingOutputFormat.Builder.build()
Finalizes the configuration and checks validity.
|
JdbcBatchingOutputFormat<Tuple2<Boolean,Row>,Row,JdbcBatchStatementExecutor<Row>> |
JdbcBatchingOutputFormat.Builder.build()
Finalizes the configuration and checks validity.
|
Modifier and Type | Method and Description |
---|---|
TableSink<Tuple2<Boolean,Row>> |
JdbcUpsertTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
StreamTableSink<Tuple2<Boolean,Row>> |
JdbcTableSourceSinkFactory.createStreamTableSink(Map<String,String> properties) |
StreamTableSource<Row> |
JdbcTableSourceSinkFactory.createStreamTableSource(Map<String,String> properties) |
AsyncTableFunction<Row> |
JdbcTableSource.getAsyncLookupFunction(String[] lookupKeys) |
DataStream<Row> |
JdbcTableSource.getDataStream(StreamExecutionEnvironment execEnv) |
TableFunction<Row> |
JdbcTableSource.getLookupFunction(String[] lookupKeys) |
TypeInformation<Tuple2<Boolean,Row>> |
JdbcUpsertTableSink.getOutputType() |
TypeInformation<Row> |
JdbcUpsertTableSink.getRecordType() |
TypeInformation<Row> |
JdbcLookupFunction.getResultType() |
TableSource<Row> |
JdbcTableSource.projectFields(int[] fields) |
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
JdbcUpsertTableSink.consumeDataStream(DataStream<Tuple2<Boolean,Row>> dataStream) |
Modifier and Type | Method and Description |
---|---|
static Row |
JdbcUtils.getPrimaryKey(Row row,
int[] pkFields) |
Modifier and Type | Method and Description |
---|---|
static Row |
JdbcUtils.getPrimaryKey(Row row,
int[] pkFields) |
static void |
JdbcUtils.setRecordToStatement(PreparedStatement upload,
int[] typesArray,
Row row)
Adds a record to the prepared statement.
|
Modifier and Type | Method and Description |
---|---|
LinkedHashMap<String,String> |
HiveRowPartitionComputer.generatePartValues(Row in) |
Modifier and Type | Method and Description |
---|---|
java.util.function.Function<Row,org.apache.hadoop.io.Writable> |
HiveWriterFactory.createRowConverter() |
Modifier and Type | Method and Description |
---|---|
Row |
AvroRowDeserializationSchema.deserialize(byte[] message) |
Modifier and Type | Method and Description |
---|---|
DeserializationSchema<Row> |
AvroRowFormatFactory.createDeserializationSchema(Map<String,String> properties) |
SerializationSchema<Row> |
AvroRowFormatFactory.createSerializationSchema(Map<String,String> properties) |
TypeInformation<Row> |
AvroRowDeserializationSchema.getProducedType() |
Modifier and Type | Method and Description |
---|---|
byte[] |
AvroRowSerializationSchema.serialize(Row row) |
Modifier and Type | Method and Description |
---|---|
static <T extends org.apache.avro.specific.SpecificRecord> |
AvroSchemaConverter.convertToTypeInfo(Class<T> avroClass)
Converts an Avro class into a nested row structure with deterministic field order and data
types that are compatible with Flink's Table & SQL API.
|
Modifier and Type | Method and Description |
---|---|
static org.apache.avro.generic.GenericRecord |
AvroConversions.convertRowToAvroRecord(org.apache.avro.Schema schema,
Row row) |
Modifier and Type | Method and Description |
---|---|
Row |
CsvRowDeserializationSchema.deserialize(byte[] message) |
Row |
RowCsvInputFormat.nextRecord(Row record) |
Modifier and Type | Method and Description |
---|---|
DeserializationSchema<Row> |
CsvRowFormatFactory.createDeserializationSchema(Map<String,String> properties) |
SerializationSchema<Row> |
CsvRowFormatFactory.createSerializationSchema(Map<String,String> properties) |
TypeInformation<Row> |
CsvRowDeserializationSchema.getProducedType() |
Modifier and Type | Method and Description |
---|---|
boolean |
CsvRowDeserializationSchema.isEndOfStream(Row nextElement) |
Row |
RowCsvInputFormat.nextRecord(Row record) |
byte[] |
CsvRowSerializationSchema.serialize(Row row) |
Modifier and Type | Method and Description |
---|---|
static RowCsvInputFormat.Builder |
RowCsvInputFormat.builder(TypeInformation<Row> typeInfo,
Path... filePaths)
Create a builder.
|
Constructor and Description |
---|
Builder(TypeInformation<Row> typeInfo)
Creates a CSV deserialization schema for the given
TypeInformation with optional
parameters. |
Builder(TypeInformation<Row> typeInfo)
Creates a
CsvRowSerializationSchema expecting the given TypeInformation . |
Modifier and Type | Method and Description |
---|---|
Row |
JsonRowDeserializationSchema.deserialize(byte[] message) |
Modifier and Type | Method and Description |
---|---|
DeserializationSchema<Row> |
JsonRowFormatFactory.createDeserializationSchema(Map<String,String> properties) |
SerializationSchema<Row> |
JsonRowFormatFactory.createSerializationSchema(Map<String,String> properties) |
TypeInformation<Row> |
JsonRowDeserializationSchema.getProducedType() |
Modifier and Type | Method and Description |
---|---|
boolean |
JsonRowDeserializationSchema.isEndOfStream(Row nextElement) |
byte[] |
JsonRowSerializationSchema.serialize(Row row) |
Modifier and Type | Method and Description |
---|---|
JsonRowSerializationSchema.Builder |
JsonRowSerializationSchema.Builder.withTypeInfo(TypeInformation<Row> typeInfo)
Sets type information for JSON serialization schema.
|
Constructor and Description |
---|
Builder(TypeInformation<Row> typeInfo)
Creates a JSON deserialization schema for the given type information.
|
Builder(TypeInformation<Row> typeInfo)
Deprecated.
Use
JsonRowSerializationSchema.builder() instead. |
JsonRowDeserializationSchema(TypeInformation<Row> typeInfo)
Deprecated.
Use the provided
JsonRowDeserializationSchema.Builder instead. |
Modifier and Type | Method and Description |
---|---|
protected Row |
ParquetRowInputFormat.convert(Row row) |
Modifier and Type | Method and Description |
---|---|
TableSource<Row> |
ParquetTableSource.applyPredicate(List<Expression> predicates) |
DataSet<Row> |
ParquetTableSource.getDataSet(ExecutionEnvironment executionEnvironment) |
TypeInformation<Row> |
ParquetRowInputFormat.getProducedType() |
TypeInformation<Row> |
ParquetTableSource.getReturnType() |
TableSource<Row> |
ParquetTableSource.projectFields(int[] fields) |
Modifier and Type | Method and Description |
---|---|
protected Map |
ParquetMapInputFormat.convert(Row row) |
protected Row |
ParquetRowInputFormat.convert(Row row) |
protected E |
ParquetPojoInputFormat.convert(Row row) |
protected org.apache.avro.generic.GenericRecord |
ParquetAvroInputFormat.convert(Row row) |
protected abstract E |
ParquetInputFormat.convert(Row row)
This ParquetInputFormat read parquet record as Row by default.
|
Modifier and Type | Method and Description |
---|---|
Row |
RowMaterializer.getCurrentRecord() |
Row |
RowConverter.getCurrentRow() |
Modifier and Type | Method and Description |
---|---|
org.apache.parquet.io.api.RecordMaterializer<Row> |
RowReadSupport.prepareForRead(Configuration configuration,
Map<String,String> keyValueMetaData,
org.apache.parquet.schema.MessageType fileSchema,
org.apache.parquet.hadoop.api.ReadSupport.ReadContext readContext) |
Modifier and Type | Method and Description |
---|---|
Row |
OrcRowSplitReader.nextRecord(Row reuse) |
Modifier and Type | Method and Description |
---|---|
TableSource<Row> |
OrcTableSource.applyPredicate(List<Expression> predicates) |
DataSet<Row> |
OrcTableSource.getDataSet(ExecutionEnvironment execEnv) |
TypeInformation<Row> |
OrcRowInputFormat.getProducedType() |
TypeInformation<Row> |
OrcTableSource.getReturnType() |
TableSource<Row> |
OrcTableSource.projectFields(int[] selectedFields) |
Modifier and Type | Method and Description |
---|---|
Row |
OrcRowSplitReader.nextRecord(Row reuse) |
Modifier and Type | Method and Description |
---|---|
Row |
StreamSQLTestProgram.KillMapper.map(Row value) |
Row |
BatchSQLTestProgram.DataGenerator.next() |
Modifier and Type | Method and Description |
---|---|
DataStream<Row> |
StreamSQLTestProgram.GeneratorTableSource.getDataStream(StreamExecutionEnvironment execEnv) |
InputFormat<Row,?> |
BatchSQLTestProgram.GeneratorTableSource.getInputFormat() |
TypeInformation<Row> |
StreamSQLTestProgram.Generator.getProducedType() |
TypeInformation<Row> |
StreamSQLTestProgram.GeneratorTableSource.getReturnType() |
Modifier and Type | Method and Description |
---|---|
String |
StreamSQLTestProgram.KeyBucketAssigner.getBucketId(Row element,
BucketAssigner.Context context) |
Row |
StreamSQLTestProgram.KillMapper.map(Row value) |
Modifier and Type | Method and Description |
---|---|
void |
StreamSQLTestProgram.Generator.run(SourceFunction.SourceContext<Row> ctx) |
Modifier and Type | Method and Description |
---|---|
Row |
KeyByKeySelector.getKey(Row value) |
Modifier and Type | Method and Description |
---|---|
Row |
KeyByKeySelector.getKey(Row value) |
Integer |
PartitionCustomKeySelector.getKey(Row value) |
Modifier and Type | Field and Description |
---|---|
protected Row |
TwoInputPythonFunctionOperator.reuseRow |
Modifier and Type | Method and Description |
---|---|
protected TypeInformation<Row> |
TwoInputPythonFunctionOperator.getRunnerInputTypeInfo() |
protected TypeSerializer<Row> |
TwoInputPythonFunctionOperator.getRunnerInputTypeSerializer() |
Modifier and Type | Method and Description |
---|---|
void |
PythonKeyedProcessOperator.onEventTime(InternalTimer<Row,Object> timer) |
void |
PythonKeyedCoProcessOperator.onEventTime(InternalTimer<Row,VoidNamespace> timer) |
void |
PythonKeyedProcessOperator.onProcessingTime(InternalTimer<Row,Object> timer) |
void |
PythonKeyedCoProcessOperator.onProcessingTime(InternalTimer<Row,VoidNamespace> timer) |
void |
PythonKeyedProcessOperator.processElement(StreamRecord<Row> element) |
void |
PythonKeyedCoProcessOperator.processElement1(StreamRecord<Row> element) |
void |
PythonKeyedCoProcessOperator.processElement2(StreamRecord<Row> element) |
Constructor and Description |
---|
PythonKeyedCoProcessOperator(Configuration config,
TypeInformation<Row> inputTypeInfo1,
TypeInformation<Row> inputTypeInfo2,
TypeInformation<OUT> outputTypeInfo,
DataStreamPythonFunctionInfo pythonFunctionInfo) |
PythonKeyedCoProcessOperator(Configuration config,
TypeInformation<Row> inputTypeInfo1,
TypeInformation<Row> inputTypeInfo2,
TypeInformation<OUT> outputTypeInfo,
DataStreamPythonFunctionInfo pythonFunctionInfo) |
TwoInputPythonFunctionOperator(Configuration config,
DataStreamPythonFunctionInfo pythonFunctionInfo,
String coderUrn,
TypeInformation<Row> runnerInputTypeInfo,
TypeInformation<RUNNER_OUT> runnerOutputTypeInfo) |
Modifier and Type | Field and Description |
---|---|
protected Row |
KeyedInputWithTimerRowFactory.reuseRunnerInput |
protected Row |
KeyedInputWithTimerRowFactory.reuseTimerData
Reusable row for timer data runner inputs.
|
Modifier and Type | Method and Description |
---|---|
Row |
KeyedTwoInputWithTimerRowFactory.fromNormalData(boolean isLeft,
long timestamp,
long watermark,
Row userInput) |
Row |
KeyedInputWithTimerRowFactory.fromNormalData(long timestamp,
long watermark,
Row reuseNormalData) |
Row |
KeyedTwoInputWithTimerRowFactory.fromTimer(TimeDomain timeDomain,
long timestamp,
long watermark,
Row key,
byte[] encodedNamespace) |
Row |
KeyedInputWithTimerRowFactory.fromTimer(TimeDomain timeDomain,
long timestamp,
long watermark,
Row key,
byte[] encodedNamespace) |
Modifier and Type | Method and Description |
---|---|
static TypeInformation<Row> |
KeyedInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> normalDataTypeInfo,
TypeInformation<Row> keyType) |
static TypeInformation<Row> |
KeyedTwoInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> userInputType1,
TypeInformation<Row> userInputType2,
TypeInformation<Row> keyType) |
Modifier and Type | Method and Description |
---|---|
Row |
KeyedTwoInputWithTimerRowFactory.fromNormalData(boolean isLeft,
long timestamp,
long watermark,
Row userInput) |
Row |
KeyedInputWithTimerRowFactory.fromNormalData(long timestamp,
long watermark,
Row reuseNormalData) |
Row |
KeyedTwoInputWithTimerRowFactory.fromTimer(TimeDomain timeDomain,
long timestamp,
long watermark,
Row key,
byte[] encodedNamespace) |
Row |
KeyedInputWithTimerRowFactory.fromTimer(TimeDomain timeDomain,
long timestamp,
long watermark,
Row key,
byte[] encodedNamespace) |
Modifier and Type | Method and Description |
---|---|
static TypeInformation<Row> |
KeyedInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> normalDataTypeInfo,
TypeInformation<Row> keyType) |
static TypeInformation<Row> |
KeyedInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> normalDataTypeInfo,
TypeInformation<Row> keyType) |
static TypeInformation<Row> |
KeyedTwoInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> userInputType1,
TypeInformation<Row> userInputType2,
TypeInformation<Row> keyType) |
static TypeInformation<Row> |
KeyedTwoInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> userInputType1,
TypeInformation<Row> userInputType2,
TypeInformation<Row> keyType) |
static TypeInformation<Row> |
KeyedTwoInputWithTimerRowFactory.getRunnerInputTypeInfo(TypeInformation<Row> userInputType1,
TypeInformation<Row> userInputType2,
TypeInformation<Row> keyType) |
Modifier and Type | Method and Description |
---|---|
static TypeInformation<Row> |
OutputWithTimerRowHandler.getRunnerOutputTypeInfo(TypeInformation<?> outputType,
TypeInformation<Row> keyType) |
Modifier and Type | Method and Description |
---|---|
void |
OutputWithTimerRowHandler.accept(Row runnerOutput,
long timestamp) |
Modifier and Type | Method and Description |
---|---|
static TypeInformation<Row> |
OutputWithTimerRowHandler.getRunnerOutputTypeInfo(TypeInformation<?> outputType,
TypeInformation<Row> keyType) |
Constructor and Description |
---|
OutputWithTimerRowHandler(KeyedStateBackend<Row> keyedStateBackend,
InternalTimerService internalTimerService,
TimestampedCollector collector,
KeyContext keyContext,
TypeSerializer namespaceSerializer) |
Modifier and Type | Method and Description |
---|---|
protected CassandraSink<Row> |
CassandraSink.CassandraRowSinkBuilder.createSink() |
protected CassandraSink<Row> |
CassandraSink.CassandraRowSinkBuilder.createWriteAheadSink() |
TypeInformation<Row> |
CassandraAppendTableSink.getOutputType() |
Modifier and Type | Method and Description |
---|---|
protected Object[] |
CassandraRowSink.extract(Row record) |
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
CassandraAppendTableSink.consumeDataStream(DataStream<Row> dataStream) |
protected boolean |
CassandraRowWriteAheadSink.sendValues(Iterable<Row> values,
long checkpointId,
long timestamp) |
Constructor and Description |
---|
CassandraRowSinkBuilder(DataStream<Row> input,
TypeInformation<Row> typeInfo,
TypeSerializer<Row> serializer) |
CassandraRowSinkBuilder(DataStream<Row> input,
TypeInformation<Row> typeInfo,
TypeSerializer<Row> serializer) |
CassandraRowSinkBuilder(DataStream<Row> input,
TypeInformation<Row> typeInfo,
TypeSerializer<Row> serializer) |
CassandraRowWriteAheadSink(String insertQuery,
TypeSerializer<Row> serializer,
ClusterBuilder builder,
CheckpointCommitter committer) |
Modifier and Type | Method and Description |
---|---|
TableSink<Tuple2<Boolean,Row>> |
ElasticsearchUpsertTableSinkBase.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
protected abstract SinkFunction<Tuple2<Boolean,Row>> |
ElasticsearchUpsertTableSinkBase.createSinkFunction(List<ElasticsearchUpsertTableSinkBase.Host> hosts,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.ElasticsearchUpsertSinkFunction upsertFunction) |
StreamTableSink<Tuple2<Boolean,Row>> |
ElasticsearchUpsertTableSinkFactoryBase.createStreamTableSink(Map<String,String> properties) |
TypeInformation<Tuple2<Boolean,Row>> |
ElasticsearchUpsertTableSinkBase.getOutputType() |
TypeInformation<Row> |
ElasticsearchUpsertTableSinkBase.getRecordType() |
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
ElasticsearchUpsertTableSinkBase.consumeDataStream(DataStream<Tuple2<Boolean,Row>> dataStream) |
protected abstract ElasticsearchUpsertTableSinkBase |
ElasticsearchUpsertTableSinkBase.copy(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory) |
protected abstract ElasticsearchUpsertTableSinkBase |
ElasticsearchUpsertTableSinkFactoryBase.createElasticsearchUpsertTableSink(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions) |
void |
ElasticsearchUpsertTableSinkBase.ElasticsearchUpsertSinkFunction.process(Tuple2<Boolean,Row> element,
RuntimeContext ctx,
RequestIndexer indexer) |
Constructor and Description |
---|
ElasticsearchUpsertSinkFunction(IndexGenerator indexGenerator,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory,
int[] keyFieldIndices) |
ElasticsearchUpsertTableSinkBase(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory) |
Modifier and Type | Method and Description |
---|---|
String |
IndexGenerator.generate(Row row)
Generate index name according the the given row.
|
String |
StaticIndexGenerator.generate(Row row) |
Modifier and Type | Method and Description |
---|---|
protected SinkFunction<Tuple2<Boolean,Row>> |
Elasticsearch6UpsertTableSink.createSinkFunction(List<ElasticsearchUpsertTableSinkBase.Host> hosts,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.ElasticsearchUpsertSinkFunction upsertSinkFunction) |
Modifier and Type | Method and Description |
---|---|
protected ElasticsearchUpsertTableSinkBase |
Elasticsearch6UpsertTableSink.copy(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory) |
protected ElasticsearchUpsertTableSinkBase |
Elasticsearch6UpsertTableSinkFactory.createElasticsearchUpsertTableSink(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions) |
Constructor and Description |
---|
Elasticsearch6UpsertTableSink(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions) |
Modifier and Type | Method and Description |
---|---|
protected SinkFunction<Tuple2<Boolean,Row>> |
Elasticsearch7UpsertTableSink.createSinkFunction(List<ElasticsearchUpsertTableSinkBase.Host> hosts,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.ElasticsearchUpsertSinkFunction upsertSinkFunction) |
Modifier and Type | Method and Description |
---|---|
protected ElasticsearchUpsertTableSinkBase |
Elasticsearch7UpsertTableSink.copy(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions,
ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory) |
protected ElasticsearchUpsertTableSinkBase |
Elasticsearch7UpsertTableSinkFactory.createElasticsearchUpsertTableSink(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String docType,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions) |
Constructor and Description |
---|
Elasticsearch7UpsertTableSink(boolean isAppendOnly,
TableSchema schema,
List<ElasticsearchUpsertTableSinkBase.Host> hosts,
String index,
String keyDelimiter,
String keyNullLiteral,
SerializationSchema<Row> serializationSchema,
org.elasticsearch.common.xcontent.XContentType contentType,
ActionRequestFailureHandler failureHandler,
Map<ElasticsearchUpsertTableSinkBase.SinkOption,String> sinkOptions) |
Modifier and Type | Field and Description |
---|---|
protected Optional<FlinkKafkaPartitioner<Row>> |
KafkaTableSinkBase.partitioner
Deprecated.
Partitioner to select Kafka partition for each item.
|
protected SerializationSchema<Row> |
KafkaTableSinkBase.serializationSchema
Deprecated.
Serialization schema for encoding records to Kafka.
|
Modifier and Type | Method and Description |
---|---|
protected FlinkKafkaConsumerBase<Row> |
KafkaTableSource.createKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
|
protected abstract FlinkKafkaConsumerBase<Row> |
KafkaTableSourceBase.createKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Creates a version-specific Kafka consumer.
|
protected SinkFunction<Row> |
KafkaTableSink.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
|
protected abstract SinkFunction<Row> |
KafkaTableSinkBase.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
Returns the version-specific Kafka producer.
|
StreamTableSink<Row> |
KafkaTableSourceSinkFactoryBase.createStreamTableSink(Map<String,String> properties)
Deprecated.
|
StreamTableSource<Row> |
KafkaTableSourceSinkFactoryBase.createStreamTableSource(Map<String,String> properties)
Deprecated.
|
DataStream<Row> |
KafkaTableSourceBase.getDataStream(StreamExecutionEnvironment env)
Deprecated.
NOTE: This method is for internal use only for defining a TableSource.
|
DeserializationSchema<Row> |
KafkaTableSourceBase.getDeserializationSchema()
Deprecated.
Returns the deserialization schema.
|
protected FlinkKafkaConsumerBase<Row> |
KafkaTableSourceBase.getKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Returns a version-specific Kafka consumer with the start position configured.
|
TypeInformation<Row> |
KafkaTableSinkBase.getOutputType()
Deprecated.
|
TypeInformation<Row> |
KafkaTableSourceBase.getReturnType()
Deprecated.
|
Modifier and Type | Method and Description |
---|---|
DataStreamSink<?> |
KafkaTableSinkBase.consumeDataStream(DataStream<Row> dataStream)
Deprecated.
|
protected FlinkKafkaConsumerBase<Row> |
KafkaTableSource.createKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
|
protected abstract FlinkKafkaConsumerBase<Row> |
KafkaTableSourceBase.createKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Creates a version-specific Kafka consumer.
|
protected SinkFunction<Row> |
KafkaTableSink.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
|
protected SinkFunction<Row> |
KafkaTableSink.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
|
protected abstract SinkFunction<Row> |
KafkaTableSinkBase.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
Returns the version-specific Kafka producer.
|
protected abstract SinkFunction<Row> |
KafkaTableSinkBase.createKafkaProducer(String topic,
Properties properties,
SerializationSchema<Row> serializationSchema,
Optional<FlinkKafkaPartitioner<Row>> partitioner)
Deprecated.
Returns the version-specific Kafka producer.
|
protected KafkaTableSinkBase |
KafkaTableSourceSinkFactory.createKafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
protected KafkaTableSinkBase |
KafkaTableSourceSinkFactory.createKafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
protected abstract KafkaTableSinkBase |
KafkaTableSourceSinkFactoryBase.createKafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
Constructs the version-specific Kafka table sink.
|
protected abstract KafkaTableSinkBase |
KafkaTableSourceSinkFactoryBase.createKafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
Constructs the version-specific Kafka table sink.
|
protected KafkaTableSourceBase |
KafkaTableSourceSinkFactory.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis)
Deprecated.
|
protected abstract KafkaTableSourceBase |
KafkaTableSourceSinkFactoryBase.createKafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Map<String,String> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis)
Deprecated.
Constructs the version-specific Kafka table source.
|
protected FlinkKafkaConsumerBase<Row> |
KafkaTableSourceBase.getKafkaConsumer(String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Returns a version-specific Kafka consumer with the start position configured.
|
Constructor and Description |
---|
KafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
KafkaTableSink(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
KafkaTableSinkBase(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
KafkaTableSinkBase(TableSchema schema,
String topic,
Properties properties,
Optional<FlinkKafkaPartitioner<Row>> partitioner,
SerializationSchema<Row> serializationSchema)
Deprecated.
|
KafkaTableSource(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis)
Deprecated.
Creates a generic Kafka
StreamTableSource . |
KafkaTableSource(TableSchema schema,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Creates a generic Kafka
StreamTableSource . |
KafkaTableSourceBase(TableSchema schema,
Optional<String> proctimeAttribute,
List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
Optional<Map<String,String>> fieldMapping,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis)
Deprecated.
Creates a generic Kafka
StreamTableSource . |
KafkaTableSourceBase(TableSchema schema,
String topic,
Properties properties,
DeserializationSchema<Row> deserializationSchema)
Deprecated.
Creates a generic Kafka
StreamTableSource . |
Modifier and Type | Method and Description |
---|---|
CloseableIterator<Row> |
TableResult.collect()
Get the result contents as a closeable row iterator.
|
static TypeInformation<Row> |
Types.ROW(String[] fieldNames,
TypeInformation<?>[] types)
Deprecated.
Returns type information for
Row with fields of the given types and with given names. |
static TypeInformation<Row> |
Types.ROW(TypeInformation<?>... types)
Deprecated.
Returns type information for
Row with fields of the given types. |
TypeInformation<Row> |
TableSchema.toRowType()
Deprecated.
Use
TableSchema.toRowDataType() instead. |
Modifier and Type | Method and Description |
---|---|
DataStream<Row> |
StreamTableEnvironment.toChangelogStream(Table table)
Converts the given
Table into a DataStream of changelog entries. |
DataStream<Row> |
StreamTableEnvironment.toChangelogStream(Table table,
Schema targetSchema)
Converts the given
Table into a DataStream of changelog entries. |
DataStream<Row> |
StreamTableEnvironment.toChangelogStream(Table table,
Schema targetSchema,
ChangelogMode changelogMode)
Converts the given
Table into a DataStream of changelog entries. |
DataStream<Row> |
StreamTableEnvironment.toDataStream(Table table)
Converts the given
Table into a DataStream . |
Modifier and Type | Method and Description |
---|---|
Table |
StreamTableEnvironment.fromChangelogStream(DataStream<Row> dataStream)
Converts the given
DataStream of changelog entries into a Table . |
Table |
StreamTableEnvironment.fromChangelogStream(DataStream<Row> dataStream,
Schema schema)
Converts the given
DataStream of changelog entries into a Table . |
Table |
StreamTableEnvironment.fromChangelogStream(DataStream<Row> dataStream,
Schema schema,
ChangelogMode changelogMode)
Converts the given
DataStream of changelog entries into a Table . |
Modifier and Type | Method and Description |
---|---|
DataStream<Row> |
StreamTableEnvironmentImpl.toChangelogStream(Table table) |
DataStream<Row> |
StreamTableEnvironmentImpl.toChangelogStream(Table table,
Schema targetSchema) |
DataStream<Row> |
StreamTableEnvironmentImpl.toChangelogStream(Table table,
Schema targetSchema,
ChangelogMode changelogMode) |
DataStream<Row> |
StreamTableEnvironmentImpl.toDataStream(Table table) |
Modifier and Type | Method and Description |
---|---|
Table |
StreamTableEnvironmentImpl.fromChangelogStream(DataStream<Row> dataStream) |
Table |
StreamTableEnvironmentImpl.fromChangelogStream(DataStream<Row> dataStream,
Schema schema) |
Table |
StreamTableEnvironmentImpl.fromChangelogStream(DataStream<Row> dataStream,
Schema schema,
ChangelogMode changelogMode) |
Modifier and Type | Method and Description |
---|---|
CloseableIterator<Row> |
TableResultImpl.collect() |
CloseableIterator<Row> |
CollectResultProvider.getResultIterator()
Returns the select result as row iterator.
|
Modifier and Type | Method and Description |
---|---|
TableResultImpl.Builder |
TableResultImpl.Builder.data(CloseableIterator<Row> rowIterator)
Specifies an row iterator as the execution result.
|
TableResultImpl.Builder |
TableResultImpl.Builder.data(List<Row> rowList)
Specifies an row list as the execution result.
|
Modifier and Type | Method and Description |
---|---|
TypedResult<List<Row>> |
Executor.retrieveResultChanges(String sessionId,
String resultId)
Asks for the next changelog results (non-blocking).
|
List<Row> |
Executor.retrieveResultPage(String resultId,
int page)
Returns the rows that are part of the current page or throws an exception if the snapshot has
been expired.
|
Modifier and Type | Method and Description |
---|---|
TypedResult<List<Row>> |
LocalExecutor.retrieveResultChanges(String sessionId,
String resultId) |
List<Row> |
LocalExecutor.retrieveResultPage(String resultId,
int page) |
Modifier and Type | Field and Description |
---|---|
protected List<Row> |
MaterializedCollectResultBase.materializedTable
Materialized table that is continuously updated by inserts and deletes.
|
Modifier and Type | Method and Description |
---|---|
protected List<Row> |
MaterializedCollectStreamResult.getMaterializedTable() |
protected List<Row> |
MaterializedCollectBatchResult.getMaterializedTable() |
TypedResult<List<Row>> |
ChangelogCollectResult.retrieveChanges() |
TypedResult<List<Row>> |
ChangelogResult.retrieveChanges()
Retrieves the available result records.
|
List<Row> |
MaterializedResult.retrievePage(int page)
Retrieves a page of a snapshotted result.
|
List<Row> |
MaterializedCollectResultBase.retrievePage(int page) |
Modifier and Type | Method and Description |
---|---|
protected void |
MaterializedCollectStreamResult.processRecord(Row row) |
protected void |
ChangelogCollectResult.processRecord(Row row) |
protected void |
MaterializedCollectBatchResult.processRecord(Row row) |
protected abstract void |
CollectResultBase.processRecord(Row row) |
Modifier and Type | Method and Description |
---|---|
Row |
RowRowConverter.toExternal(RowData internal) |
Modifier and Type | Method and Description |
---|---|
RowData |
RowRowConverter.toInternal(Row external) |
Modifier and Type | Method and Description |
---|---|
Csv |
Csv.schema(TypeInformation<Row> schemaType)
Deprecated.
Csv supports derive schema from table schema by default, it is no longer
necessary to explicitly declare the format schema. This method will be removed in the
future. |
Json |
Json.schema(TypeInformation<Row> schemaType)
Deprecated.
Json supports derive schema from table schema by default, it is no longer
necessary to explicitly declare the format schema. This method will be removed in the
future. |
Modifier and Type | Method and Description |
---|---|
Row |
LastDatedValueFunction.getValue(LastDatedValueFunction.Accumulator<T> acc) |
Modifier and Type | Method and Description |
---|---|
Row |
RowPartitionComputer.projectColumnsToWrite(Row in) |
Modifier and Type | Method and Description |
---|---|
LinkedHashMap<String,String> |
RowPartitionComputer.generatePartValues(Row in) |
Row |
RowPartitionComputer.projectColumnsToWrite(Row in) |
Modifier and Type | Method and Description |
---|---|
TypeInformation<Row> |
PythonTableFunction.getResultType() |
Modifier and Type | Method and Description |
---|---|
TypeInformation<Row> |
ReplicateRows.getResultType() |
Modifier and Type | Method and Description |
---|---|
static ArrowWriter<Row> |
ArrowUtils.createRowArrowWriter(org.apache.arrow.vector.VectorSchemaRoot root,
RowType rowType)
Creates an
ArrowWriter for the specified VectorSchemaRoot . |
Modifier and Type | Method and Description |
---|---|
Row |
RowFieldReader.read(int index) |
Row |
RowArrowReader.read(int rowId) |
Modifier and Type | Method and Description |
---|---|
ArrowReader<Row> |
RowArrowSerializer.createArrowReader(org.apache.arrow.vector.VectorSchemaRoot root) |
ArrowWriter<Row> |
RowArrowSerializer.createArrowWriter() |
Modifier and Type | Method and Description |
---|---|
DataStream<Row> |
RowArrowTableSource.getDataStream(StreamExecutionEnvironment execEnv) |
TypeInformation<Row> |
RowArrowSourceFunction.getProducedType() |
Modifier and Type | Method and Description |
---|---|
void |
RowTimeWriter.doWrite(Row row,
int ordinal) |
void |
RowDateWriter.doWrite(Row value,
int ordinal) |
void |
RowBigIntWriter.doWrite(Row value,
int ordinal) |
void |
RowFloatWriter.doWrite(Row value,
int ordinal) |
void |
RowDoubleWriter.doWrite(Row value,
int ordinal) |
void |
RowVarBinaryWriter.doWrite(Row value,
int ordinal) |
void |
RowBooleanWriter.doWrite(Row value,
int ordinal) |
void |
RowTinyIntWriter.doWrite(Row value,
int ordinal) |
void |
RowIntWriter.doWrite(Row value,
int ordinal) |
void |
RowSmallIntWriter.doWrite(Row value,
int ordinal) |
void |
RowVarCharWriter.doWrite(Row value,
int ordinal) |
void |
RowRowWriter.doWrite(Row value,
int ordinal) |
void |
RowArrayWriter.doWrite(Row row,
int ordinal) |
void |
RowDecimalWriter.doWrite(Row value,
int ordinal) |
void |
RowTimestampWriter.doWrite(Row row,
int ordinal) |
Constructor and Description |
---|
RowArrayWriter(org.apache.arrow.vector.complex.ListVector listVector,
ArrowFieldWriter<Row> elementWriter) |
Modifier and Type | Field and Description |
---|---|
protected LinkedBlockingQueue<Row> |
AbstractPythonStatelessFunctionFlatMap.forwardedInputQueue
The queue holding the input elements for which the execution results have not been received.
|
protected TypeSerializer<Row> |
AbstractPythonStatelessFunctionFlatMap.forwardedInputSerializer
The type serializer for the forwarded fields.
|
protected Collector<Row> |
AbstractPythonStatelessFunctionFlatMap.resultCollector
The collector used to collect records.
|
Modifier and Type | Method and Description |
---|---|
protected Row |
AbstractPythonStatelessFunctionFlatMap.getFunctionInput(Row element) |
Modifier and Type | Method and Description |
---|---|
TypeInformation<Row> |
AbstractPythonStatelessFunctionFlatMap.getProducedType() |
Modifier and Type | Method and Description |
---|---|
void |
PythonTableFunctionFlatMap.bufferInput(Row input) |
void |
AbstractPythonScalarFunctionFlatMap.bufferInput(Row input) |
abstract void |
AbstractPythonStatelessFunctionFlatMap.bufferInput(Row input) |
void |
AbstractPythonStatelessFunctionFlatMap.flatMap(Row value,
Collector<Row> out) |
protected Row |
AbstractPythonStatelessFunctionFlatMap.getFunctionInput(Row element) |
void |
PythonTableFunctionFlatMap.processElementInternal(Row value) |
void |
PythonScalarFunctionFlatMap.processElementInternal(Row value) |
abstract void |
AbstractPythonStatelessFunctionFlatMap.processElementInternal(Row value) |
Modifier and Type | Method and Description |
---|---|
void |
AbstractPythonStatelessFunctionFlatMap.flatMap(Row value,
Collector<Row> out) |
Modifier and Type | Method and Description |
---|---|
void |
ArrowPythonScalarFunctionFlatMap.processElementInternal(Row value) |
Modifier and Type | Method and Description |
---|---|
Row |
AbstractRowPythonScalarFunctionOperator.getFunctionInput(org.apache.flink.table.runtime.types.CRow element) |
Modifier and Type | Method and Description |
---|---|
Row |
PythonTableFunctionOperator.getFunctionInput(org.apache.flink.table.runtime.types.CRow element) |
Modifier and Type | Method and Description |
---|---|
void |
StreamRecordCRowWrappingCollector.collect(Row record) |
Modifier and Type | Method and Description |
---|---|
TableSink<Row> |
BatchSelectTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
TableSink<Tuple2<Boolean,Row>> |
StreamSelectTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
TableSink<Row> |
CsvTableSink.configure(String[] fieldNames,
TypeInformation<?>[] fieldTypes) |
BatchTableSink<Row> |
CsvBatchTableSinkFactory.createBatchTableSink(Map<String,String> properties) |
StreamTableSink<Row> |
CsvAppendTableSinkFactory.createStreamTableSink(Map<String,String> properties) |
TypeInformation<Row> |
StreamSelectTableSink.getRecordType() |
Modifier and Type | Method and Description |
---|---|
String |
CsvTableSink.CsvFormatter.map(Row row) |
Modifier and Type | Method and Description |
---|---|
DataSink<?> |
BatchSelectTableSink.consumeDataSet(DataSet<Row> dataSet) |
DataSink<?> |
CsvTableSink.consumeDataSet(DataSet<Row> dataSet) |
DataStreamSink<?> |
CsvTableSink.consumeDataStream(DataStream<Row> dataStream) |
DataStreamSink<?> |
StreamSelectTableSink.consumeDataStream(DataStream<Tuple2<Boolean,Row>> dataStream) |
Modifier and Type | Method and Description |
---|---|
BatchTableSource<Row> |
CsvBatchTableSourceFactory.createBatchTableSource(Map<String,String> properties) |
StreamTableSource<Row> |
CsvAppendTableSourceFactory.createStreamTableSource(Map<String,String> properties) |
AsyncTableFunction<Row> |
CsvTableSource.getAsyncLookupFunction(String[] lookupKeys) |
DataSet<Row> |
CsvTableSource.getDataSet(ExecutionEnvironment execEnv) |
DataStream<Row> |
CsvTableSource.getDataStream(StreamExecutionEnvironment execEnv) |
TableFunction<Row> |
CsvTableSource.getLookupFunction(String[] lookupKeys) |
TypeInformation<Row> |
CsvTableSource.CsvLookupFunction.getResultType() |
Modifier and Type | Method and Description |
---|---|
abstract Watermark |
PunctuatedWatermarkAssigner.getWatermark(Row row,
long timestamp)
Returns the watermark for the current row or null if no watermark should be generated.
|
Modifier and Type | Method and Description |
---|---|
static String[] |
PrintUtils.rowToString(Row row,
ResolvedSchema resolvedSchema,
java.time.ZoneId sessionTimeZone) |
static String[] |
PrintUtils.rowToString(Row row,
String nullColumn,
boolean printRowKind,
ResolvedSchema resolvedSchema,
java.time.ZoneId sessionTimeZone) |
Modifier and Type | Method and Description |
---|---|
static void |
PrintUtils.printAsTableauForm(ResolvedSchema resolvedSchema,
Iterator<Row> it,
PrintWriter printWriter,
int maxColumnWidth,
String nullColumn,
boolean deriveColumnWidthByType,
boolean printRowKind,
java.time.ZoneId sessionTimeZone)
Displays the result in a tableau form.
|
static void |
PrintUtils.printAsTableauForm(ResolvedSchema resolvedSchema,
Iterator<Row> it,
PrintWriter printWriter,
java.time.ZoneId sessionTimeZone)
Displays the result in a tableau form.
|
Modifier and Type | Method and Description |
---|---|
static Row |
Row.copy(Row row)
Creates a new row which is copied from another row (including its
RowKind ). |
static Row |
RowUtils.createRowWithNamedPositions(RowKind kind,
Object[] fieldByPosition,
LinkedHashMap<String,Integer> positionByName)
Internal utility for creating a row in static named-position field mode.
|
static Row |
Row.join(Row first,
Row... remainings)
Creates a new row with fields that are copied from the other rows and appended to the
resulting row in the given order.
|
static Row |
Row.of(Object... values)
Creates a fixed-length row in position-based field mode and assigns the given values to the
row's fields.
|
static Row |
Row.ofKind(RowKind kind,
Object... values)
Creates a fixed-length row in position-based field mode with given kind and assigns the given
values to the row's fields.
|
static Row |
Row.project(Row row,
int[] fieldPositions)
Creates a new row with projected fields and identical
RowKind from another row. |
static Row |
Row.project(Row row,
String[] fieldNames)
Creates a new row with projected fields and identical
RowKind from another row. |
static Row |
Row.withNames()
Creates a variable-length row in name-based field mode.
|
static Row |
Row.withNames(RowKind kind)
Creates a variable-length row in name-based field mode.
|
static Row |
Row.withPositions(int arity)
Creates a fixed-length row in position-based field mode.
|
static Row |
Row.withPositions(RowKind kind,
int arity)
Creates a fixed-length row in position-based field mode.
|
Modifier and Type | Method and Description |
---|---|
static Row |
Row.copy(Row row)
Creates a new row which is copied from another row (including its
RowKind ). |
static Row |
Row.join(Row first,
Row... remainings)
Creates a new row with fields that are copied from the other rows and appended to the
resulting row in the given order.
|
static Row |
Row.join(Row first,
Row... remainings)
Creates a new row with fields that are copied from the other rows and appended to the
resulting row in the given order.
|
static Row |
Row.project(Row row,
int[] fieldPositions)
Creates a new row with projected fields and identical
RowKind from another row. |
static Row |
Row.project(Row row,
String[] fieldNames)
Creates a new row with projected fields and identical
RowKind from another row. |
Modifier and Type | Method and Description |
---|---|
static boolean |
RowUtils.compareRows(List<Row> l1,
List<Row> l2)
|
static boolean |
RowUtils.compareRows(List<Row> l1,
List<Row> l2)
|
static boolean |
RowUtils.compareRows(List<Row> l1,
List<Row> l2,
boolean ignoreOrder)
|
static boolean |
RowUtils.compareRows(List<Row> l1,
List<Row> l2,
boolean ignoreOrder)
|
Modifier and Type | Method and Description |
---|---|
Row |
TransactionRowInputFormat.nextRecord(Row reuse) |
Modifier and Type | Method and Description |
---|---|
Row |
TransactionRowInputFormat.nextRecord(Row reuse) |
Copyright © 2014–2022 The Apache Software Foundation. All rights reserved.