Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public List<SourceSplitBase> generateHybridLakeFlussSplits() throws Exception {
lakeSplits, isLogTable, tableBucketsOffset, partitionNameById);
} else {
Map<Integer, List<LakeSplit>> nonPartitionLakeSplits =
lakeSplits.values().iterator().next();
lakeSplits.isEmpty() ? null : lakeSplits.values().iterator().next();
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

when no split is generate, it may be empty. Let's use the safe way.

// non-partitioned table
return generateNoPartitionedTableSplit(
nonPartitionLakeSplits, isLogTable, tableBucketsOffset);
Expand Down Expand Up @@ -307,7 +307,7 @@ private SourceSplitBase generateSplitForPrimaryKeyTableBucket(
}

private List<SourceSplitBase> generateNoPartitionedTableSplit(
Map<Integer, List<LakeSplit>> lakeSplits,
@Nullable Map<Integer, List<LakeSplit>> lakeSplits,
boolean isLogTable,
Map<TableBucket, Long> tableBucketSnapshotLogOffset) {
// iterate all bucket
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,8 +331,7 @@ public boolean isBounded() {
enableLakeSource = false;
} else {
if (enableLakeSource) {
enableLakeSource =
pushTimeStampFilterToLakeSource(lakeSource, flussRowType);
enableLakeSource = pushTimeStampFilterToLakeSource(lakeSource);
}
}
break;
Expand Down Expand Up @@ -385,12 +384,11 @@ public boolean isBounded() {
}
}

private boolean pushTimeStampFilterToLakeSource(
LakeSource<?> lakeSource, RowType flussRowType) {
private boolean pushTimeStampFilterToLakeSource(LakeSource<?> lakeSource) {
// will push timestamp to lake
// we will have three additional system columns, __bucket, __offset, __timestamp
// in lake, get the __timestamp index in lake table
final int timestampFieldIndex = flussRowType.getFieldCount() + 2;
final int timestampFieldIndex = tableOutputType.getFieldCount() + 2;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

flussRowType is projected row, we should use origin tableOutputType

Predicate timestampFilter =
new LeafPredicate(
GreaterOrEqual.INSTANCE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -770,16 +770,6 @@ private void assignPendingSplits(Set<Integer> pendingReaders) {
TableBucket tableBucket = split.getTableBucket();
assignedTableBuckets.add(tableBucket);

if (pendingHybridLakeFlussSplits != null) {
// removed from the pendingHybridLakeFlussSplits
// since this split already be assigned
pendingHybridLakeFlussSplits.removeIf(
hybridLakeFlussSplit ->
hybridLakeFlussSplit
.splitId()
.equals(split.splitId()));
}

if (isPartitioned) {
long partitionId =
checkNotNull(
Expand All @@ -792,6 +782,17 @@ private void assignPendingSplits(Set<Integer> pendingReaders) {
assignedPartitions.put(partitionId, partitionName);
}
});

if (pendingHybridLakeFlussSplits != null) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

move out from the loop to speed it.

Set<String> splitIdsToRemove =
pendingAssignmentForReader.stream()
.map(SourceSplitBase::splitId)
.collect(Collectors.toSet());
// removed from the pendingHybridLakeFlussSplits
// since this split already be assigned
pendingHybridLakeFlussSplits.removeIf(
split -> splitIdsToRemove.contains(split.splitId()));
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,16 @@ void testUnionReadFromTimestamp() throws Exception {
CloseableIterator<Row> actualRows =
streamTEnv
.executeSql(
"select * from "
"select b from "
+ tableName
+ " /*+ OPTIONS('scan.startup.mode' = 'timestamp',\n"
+ "'scan.startup.timestamp' = '2000') */")
.collect();
List<Row> expectedRows = rows.stream().skip(2 * 3).collect(Collectors.toList());
List<Row> expectedRows =
rows.stream()
.skip(2 * 3)
.map(row -> Row.of(row.getField(1)))
.collect(Collectors.toList());
assertRowResultsIgnoreOrder(actualRows, expectedRows, true);

// verify scan from earliest
Expand Down