Modifier and Type | Field and Description |
---|---|
protected List<DataFileMeta> |
MergeTreeBenchmark.compactedFiles |
Modifier and Type | Method and Description |
---|---|
protected void |
MergeTreeBenchmark.mergeCompacted(Set<String> newFileNames,
List<DataFileMeta> compactedFiles,
RecordWriter.CommitIncrement increment) |
Modifier and Type | Method and Description |
---|---|
void |
StoreSinkWriteImpl.notifyNewFiles(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files) |
Modifier and Type | Method and Description |
---|---|
List<DataFileMeta> |
AppendOnlyCompactManager.CompactRewriter.rewrite(List<DataFileMeta> compactBefore) |
Modifier and Type | Method and Description |
---|---|
void |
AppendOnlyCompactManager.addNewFile(DataFileMeta file) |
Modifier and Type | Method and Description |
---|---|
void |
AppendOnlyWriter.addNewFiles(List<DataFileMeta> files) |
protected CompactResult |
AppendOnlyCompactManager.IterativeCompactTask.doCompact(List<DataFileMeta> inputs) |
protected CompactResult |
AppendOnlyCompactManager.AutoCompactTask.doCompact(List<DataFileMeta> inputs) |
List<DataFileMeta> |
AppendOnlyCompactManager.CompactRewriter.rewrite(List<DataFileMeta> compactBefore) |
Constructor and Description |
---|
AppendOnlyCompactManager(ExecutorService executor,
LinkedList<DataFileMeta> toCompact,
int minFileNum,
int maxFileNum,
long targetFileSize,
AppendOnlyCompactManager.CompactRewriter rewriter,
DataFilePathFactory pathFactory) |
AutoCompactTask(List<DataFileMeta> toCompact,
AppendOnlyCompactManager.CompactRewriter rewriter) |
IterativeCompactTask(List<DataFileMeta> inputs,
long targetFileSize,
int minFileNum,
int maxFileNum,
AppendOnlyCompactManager.CompactRewriter rewriter,
DataFilePathFactory factory) |
Modifier and Type | Method and Description |
---|---|
List<DataFileMeta> |
CompactResult.after() |
List<DataFileMeta> |
CompactResult.before() |
List<DataFileMeta> |
CompactResult.changelog() |
List<DataFileMeta> |
CompactUnit.files() |
Modifier and Type | Method and Description |
---|---|
void |
NoopCompactManager.addNewFile(DataFileMeta file) |
void |
CompactManager.addNewFile(DataFileMeta file)
Add a new file.
|
Modifier and Type | Method and Description |
---|---|
protected abstract CompactResult |
CompactTask.doCompact(List<DataFileMeta> inputs)
Perform compaction.
|
static CompactUnit |
CompactUnit.fromFiles(int outputLevel,
List<DataFileMeta> files) |
protected String |
CompactTask.logMetric(long startMillis,
List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter) |
protected String |
CompactTask.logMetric(long startMillis,
List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter) |
Constructor and Description |
---|
CompactResult(DataFileMeta before,
DataFileMeta after) |
Constructor and Description |
---|
CompactResult(List<DataFileMeta> before,
List<DataFileMeta> after) |
CompactResult(List<DataFileMeta> before,
List<DataFileMeta> after) |
CompactResult(List<DataFileMeta> before,
List<DataFileMeta> after,
List<DataFileMeta> changelog) |
CompactResult(List<DataFileMeta> before,
List<DataFileMeta> after,
List<DataFileMeta> changelog) |
CompactResult(List<DataFileMeta> before,
List<DataFileMeta> after,
List<DataFileMeta> changelog) |
CompactTask(List<DataFileMeta> inputs) |
Modifier and Type | Method and Description |
---|---|
DataFileMeta |
DataFileMeta.copy(List<String> newExtraFiles) |
static DataFileMeta |
DataFileMeta.forAppend(String fileName,
long fileSize,
long rowCount,
BinaryTableStats rowStats,
long minSequenceNumber,
long maxSequenceNumber,
long schemaId) |
DataFileMeta |
DataFileMetaSerializer.fromRow(org.apache.flink.table.data.RowData row) |
DataFileMeta |
KeyValueDataFileWriter.result() |
DataFileMeta |
RowDataFileWriter.result() |
DataFileMeta |
DataFileMeta.upgrade(int newLevel) |
Modifier and Type | Method and Description |
---|---|
List<DataFileMeta> |
CompactIncrement.changelogFiles() |
List<DataFileMeta> |
NewFilesIncrement.changelogFiles() |
List<DataFileMeta> |
CompactIncrement.compactAfter() |
List<DataFileMeta> |
CompactIncrement.compactBefore() |
RollingFileWriter<KeyValue,DataFileMeta> |
KeyValueFileWriterFactory.createRollingChangelogFileWriter(int level) |
RollingFileWriter<KeyValue,DataFileMeta> |
KeyValueFileWriterFactory.createRollingMergeTreeFileWriter(int level) |
List<DataFileMeta> |
NewFilesIncrement.newFiles() |
Modifier and Type | Method and Description |
---|---|
org.apache.flink.table.data.RowData |
DataFileMetaSerializer.toRow(DataFileMeta meta) |
Modifier and Type | Method and Description |
---|---|
static long |
DataFileMeta.getMaxSequenceNumber(List<DataFileMeta> fileMetas) |
Constructor and Description |
---|
CompactIncrement(List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter,
List<DataFileMeta> changelogFiles) |
CompactIncrement(List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter,
List<DataFileMeta> changelogFiles) |
CompactIncrement(List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter,
List<DataFileMeta> changelogFiles) |
NewFilesIncrement(List<DataFileMeta> newFiles,
List<DataFileMeta> changelogFiles) |
NewFilesIncrement(List<DataFileMeta> newFiles,
List<DataFileMeta> changelogFiles) |
Modifier and Type | Method and Description |
---|---|
DataFileMeta |
ManifestEntry.file() |
Constructor and Description |
---|
ManifestEntry(FileKind kind,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
int totalBuckets,
DataFileMeta file) |
Modifier and Type | Method and Description |
---|---|
List<DataFileMeta> |
Levels.allFiles() |
List<DataFileMeta> |
SortedRun.files() |
Modifier and Type | Method and Description |
---|---|
void |
Levels.addLevel0File(DataFileMeta file) |
static SortedRun |
SortedRun.fromSingle(DataFileMeta file) |
Modifier and Type | Method and Description |
---|---|
void |
MergeTreeWriter.addNewFiles(List<DataFileMeta> files) |
static SortedRun |
SortedRun.fromSorted(List<DataFileMeta> sortedFiles) |
static SortedRun |
SortedRun.fromUnsorted(List<DataFileMeta> unsortedFiles,
Comparator<org.apache.flink.table.data.RowData> keyComparator) |
void |
Levels.update(List<DataFileMeta> before,
List<DataFileMeta> after) |
void |
Levels.update(List<DataFileMeta> before,
List<DataFileMeta> after) |
Constructor and Description |
---|
Levels(Comparator<org.apache.flink.table.data.RowData> keyComparator,
List<DataFileMeta> inputFiles,
int numLevels) |
Modifier and Type | Method and Description |
---|---|
protected static List<DataFileMeta> |
AbstractCompactRewriter.extractFilesFromSections(List<List<SortedRun>> sections) |
Modifier and Type | Method and Description |
---|---|
void |
MergeTreeCompactManager.addNewFile(DataFileMeta file) |
CompactResult |
AbstractCompactRewriter.upgrade(int outputLevel,
DataFileMeta file) |
CompactResult |
FullChangelogMergeTreeCompactRewriter.upgrade(int outputLevel,
DataFileMeta file) |
CompactResult |
CompactRewriter.upgrade(int outputLevel,
DataFileMeta file) |
Modifier and Type | Method and Description |
---|---|
protected CompactResult |
MergeTreeCompactTask.doCompact(List<DataFileMeta> inputs) |
protected String |
MergeTreeCompactTask.logMetric(long startMillis,
List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter) |
protected String |
MergeTreeCompactTask.logMetric(long startMillis,
List<DataFileMeta> compactBefore,
List<DataFileMeta> compactAfter) |
Constructor and Description |
---|
IntervalPartition(List<DataFileMeta> inputFiles,
Comparator<org.apache.flink.table.data.RowData> keyComparator) |
Modifier and Type | Method and Description |
---|---|
default Map<org.apache.flink.table.data.binary.BinaryRowData,Map<Integer,List<DataFileMeta>>> |
FileStoreScan.Plan.groupByPartFiles(List<ManifestEntry> files)
Return a map group by partition and bucket.
|
protected List<DataFileMeta> |
AbstractFileStoreWrite.scanExistingFileMetas(Long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket) |
Modifier and Type | Method and Description |
---|---|
void |
FileStoreWrite.notifyNewFiles(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files)
Notify that some new files are created at given snapshot in given bucket.
|
void |
AbstractFileStoreWrite.notifyNewFiles(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files) |
Modifier and Type | Method and Description |
---|---|
void |
RecordWriter.addNewFiles(List<DataFileMeta> files)
Add files to the internal
CompactManager . |
Modifier and Type | Method and Description |
---|---|
void |
TableWrite.notifyNewFiles(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files)
Notify that some new files are created at given snapshot in given bucket.
|
void |
TableWriteImpl.notifyNewFiles(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files) |
Modifier and Type | Method and Description |
---|---|
List<DataFileMeta> |
DataSplit.files() |
List<List<DataFileMeta>> |
AppendOnlySplitGenerator.split(List<DataFileMeta> files) |
List<List<DataFileMeta>> |
MergeTreeSplitGenerator.split(List<DataFileMeta> files) |
List<List<DataFileMeta>> |
SplitGenerator.split(List<DataFileMeta> files) |
Modifier and Type | Method and Description |
---|---|
static List<DataSplit> |
AbstractDataTableScan.generateSplits(long snapshotId,
boolean isIncremental,
SplitGenerator splitGenerator,
Map<org.apache.flink.table.data.binary.BinaryRowData,Map<Integer,List<DataFileMeta>>> groupedDataFiles) |
List<List<DataFileMeta>> |
AppendOnlySplitGenerator.split(List<DataFileMeta> files) |
List<List<DataFileMeta>> |
MergeTreeSplitGenerator.split(List<DataFileMeta> files) |
List<List<DataFileMeta>> |
SplitGenerator.split(List<DataFileMeta> files) |
Constructor and Description |
---|
DataSplit(long snapshotId,
org.apache.flink.table.data.binary.BinaryRowData partition,
int bucket,
List<DataFileMeta> files,
boolean isIncremental) |
Copyright © 2019–2023 The Apache Software Foundation. All rights reserved.