Skip to content

[HUDI-7833] validate if nested key columns works for fg reader #13130

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ public class TestHoodieFileGroupReaderOnHive extends TestHoodieFileGroupReaderBa

@Override
@Disabled("[HUDI-8072]")
public void testReadLogFilesOnlyInMergeOnReadTable(RecordMergeMode recordMergeMode, String logDataBlockFormat) throws Exception {
public void testReadLogFilesOnlyInMergeOnReadTable(RecordMergeMode recordMergeMode,
String logDataBlockFormat,
Boolean useNestedKey) throws Exception {
}

private static final String PARTITION_COLUMN = "datestr";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,55 +115,81 @@ public void validateRecordsInFileGroup(String tablePath,

private static Stream<Arguments> testArguments() {
return Stream.of(
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "avro"),
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "parquet"),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "avro"),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "parquet"),
arguments(RecordMergeMode.CUSTOM, "avro"),
arguments(RecordMergeMode.CUSTOM, "parquet")
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "avro", false),
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "parquet", false),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "avro", false),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "parquet", false),
arguments(RecordMergeMode.CUSTOM, "avro", false),
arguments(RecordMergeMode.CUSTOM, "parquet", false),
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "avro", true),
arguments(RecordMergeMode.COMMIT_TIME_ORDERING, "parquet", true),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "avro", true),
arguments(RecordMergeMode.EVENT_TIME_ORDERING, "parquet", true),
arguments(RecordMergeMode.CUSTOM, "avro", true),
arguments(RecordMergeMode.CUSTOM, "parquet", true)
);
}

@ParameterizedTest
@MethodSource("testArguments")
public void testReadFileGroupInMergeOnReadTable(RecordMergeMode recordMergeMode, String logDataBlockFormat) throws Exception {
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(recordMergeMode));
public void testReadFileGroupInMergeOnReadTable(RecordMergeMode recordMergeMode,
String logDataBlockFormat,
Boolean useNestedKey) throws Exception {
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(recordMergeMode, useNestedKey));
writeConfigs.put(HoodieStorageConfig.LOGFILE_DATA_BLOCK_FORMAT.key(), logDataBlockFormat);

try (HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator(0xDEEF)) {
List<HoodieRecord> baseRecords = useNestedKey
? dataGen.generateInsertsWithKeyFieldSpecified("001", "fare", 100)
: dataGen.generateInserts("001", 100);

// One commit; reading one file group containing a base file only
commitToTable(dataGen.generateInserts("001", 100), INSERT.value(), writeConfigs);
commitToTable(baseRecords, INSERT.value(), writeConfigs);
validateOutputFromFileGroupReader(
getStorageConf(), getBasePath(), dataGen.getPartitionPaths(), true, 0, recordMergeMode);

// Two commits; reading one file group containing a base file and a log file
commitToTable(dataGen.generateUpdates("002", 100), UPSERT.value(), writeConfigs);
List<HoodieRecord> firstUpdate = useNestedKey
? dataGen.generateUpdates("002", baseRecords, "fare")
: dataGen.generateUpdates("002", 100);
commitToTable(firstUpdate, UPSERT.value(), writeConfigs);
validateOutputFromFileGroupReader(
getStorageConf(), getBasePath(), dataGen.getPartitionPaths(), true, 1, recordMergeMode);

// Three commits; reading one file group containing a base file and two log files
commitToTable(dataGen.generateUpdates("003", 100), UPSERT.value(), writeConfigs);
List<HoodieRecord> secondUpdates = useNestedKey
? dataGen.generateUpdates("003", baseRecords, "fare")
: dataGen.generateUpdates("003", 100);
commitToTable(secondUpdates, UPSERT.value(), writeConfigs);
validateOutputFromFileGroupReader(
getStorageConf(), getBasePath(), dataGen.getPartitionPaths(), true, 2, recordMergeMode);
}
}

@ParameterizedTest
@MethodSource("testArguments")
public void testReadLogFilesOnlyInMergeOnReadTable(RecordMergeMode recordMergeMode, String logDataBlockFormat) throws Exception {
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(recordMergeMode));
public void testReadLogFilesOnlyInMergeOnReadTable(RecordMergeMode recordMergeMode,
String logDataBlockFormat,
Boolean useNestedKey) throws Exception {
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(recordMergeMode, useNestedKey));
writeConfigs.put(HoodieStorageConfig.LOGFILE_DATA_BLOCK_FORMAT.key(), logDataBlockFormat);
// Use InMemoryIndex to generate log only mor table
writeConfigs.put("hoodie.index.type", "INMEMORY");

try (HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator(0xDEEF)) {
// One commit; reading one file group containing a base file only
commitToTable(dataGen.generateInserts("001", 100), INSERT.value(), writeConfigs);
// One commit; reading one file group containing one log file
List<HoodieRecord> baseRecords = useNestedKey
? dataGen.generateInsertsWithKeyFieldSpecified("001", "fare", 100)
: dataGen.generateInserts("001", 100);
commitToTable(baseRecords, INSERT.value(), writeConfigs);
validateOutputFromFileGroupReader(
getStorageConf(), getBasePath(), dataGen.getPartitionPaths(), false, 1, recordMergeMode);

// Two commits; reading one file group containing a base file and a log file
commitToTable(dataGen.generateUpdates("002", 100), UPSERT.value(), writeConfigs);
// Two commits; reading two log files
List<HoodieRecord> updateRecords = useNestedKey
? dataGen.generateUpdates("002", baseRecords, "fare")
: dataGen.generateUpdates("002", 100);
commitToTable(updateRecords, UPSERT.value(), writeConfigs);
validateOutputFromFileGroupReader(
getStorageConf(), getBasePath(), dataGen.getPartitionPaths(), false, 2, recordMergeMode);
}
Expand All @@ -172,7 +198,7 @@ public void testReadLogFilesOnlyInMergeOnReadTable(RecordMergeMode recordMergeMo
@ParameterizedTest
@EnumSource(value = ExternalSpillableMap.DiskMapType.class)
public void testSpillableMapUsage(ExternalSpillableMap.DiskMapType diskMapType) throws Exception {
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(RecordMergeMode.COMMIT_TIME_ORDERING));
Map<String, String> writeConfigs = new HashMap<>(getCommonConfigs(RecordMergeMode.COMMIT_TIME_ORDERING, false));
Option<Schema.Type> orderingFieldType = Option.of(Schema.Type.STRING);
try (HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator(0xDEEF)) {
commitToTable(dataGen.generateInserts("001", 100), INSERT.value(), writeConfigs);
Expand Down Expand Up @@ -230,9 +256,11 @@ public void testSpillableMapUsage(ExternalSpillableMap.DiskMapType diskMapType)
}
}

private Map<String, String> getCommonConfigs(RecordMergeMode recordMergeMode) {
private Map<String, String> getCommonConfigs(RecordMergeMode recordMergeMode, Boolean useNestedKey) {
Map<String, String> configMapping = new HashMap<>();
configMapping.put(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "_row_key");
configMapping.put(
KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(),
useNestedKey ? "fare" : "_row_key");
configMapping.put(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partition_path");
configMapping.put("hoodie.datasource.write.precombine.field", "timestamp");
configMapping.put("hoodie.payload.ordering.field", "timestamp");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hudi.storage.StorageConfiguration;
import org.apache.hudi.storage.StoragePath;

import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.avro.Conversions;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
Expand Down Expand Up @@ -848,6 +849,68 @@ public List<HoodieRecord> generateSameKeyInserts(String instantTime, List<Hoodie
return copy;
}

/**
* Step 1: Generate a list of HoodieRecord.
* Step 2: Based on the keyField, extract the HoodieKey for each record.
* Step 3: Create a list of HoodieRecord combining keys and their payload.
*
* This is different from other methods since their key are not necessarily from one of the columns.
*/
public List<HoodieRecord> generateInsertsWithKeyFieldSpecified(String instantTime,
String keyField,
int recordNum) throws IOException {
List<HoodieRecord> initRecords = generateInserts(instantTime, recordNum);
List<HoodieKey> keys = initRecords.stream().map(r -> {
try {
return new HoodieKey(
((RawTripTestPayload) r.getData()).getJsonDataAsMap().get(keyField).toString(),
r.getPartitionPath()
);
} catch (IOException e) {
throw new RuntimeException(e);
}
}).collect(Collectors.toList());
return IntStream.range(0, keys.size())
.mapToObj(i -> new HoodieAvroRecord(keys.get(i), (RawTripTestPayload) initRecords.get(i).getData()))
.collect(Collectors.toList());
}

/**
* Generate an update based on existing records, and use the given keyField
* column to generate the key.
*
* Step 1: Create a list of HoodieRecord.
* Step 2: For each record, create a new HoodieRecord,
* whose HoodieKey is from the given record,
* and payload is generated by replacing key value in the payload.
*/
public List<HoodieRecord> generateUpdates(String instantTime,
List<HoodieRecord> records,
String keyField) throws IOException {
List<HoodieRecord> initRecords = generateInserts(instantTime, records.size());
ObjectMapper mapper = new ObjectMapper();

return IntStream.range(0, records.size())
.mapToObj(i -> {
RawTripTestPayload payload = (RawTripTestPayload) initRecords.get(i).getData();
try {
Map<String, Object> oldJsonMap =
((RawTripTestPayload) records.get(i).getData()).getJsonDataAsMap();
Map<String, Object> jsonMap = payload.getJsonDataAsMap();
// set keyField and partition_path.
jsonMap.put(keyField, oldJsonMap.get(keyField));
jsonMap.put("partition_path", oldJsonMap.get("partition_path"));
String json = mapper.writeValueAsString(jsonMap);
return new HoodieAvroRecord(
records.get(i).getKey(),
new RawTripTestPayload(json, keyField));
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.collect(Collectors.toList());
}

public List<HoodieRecord> generateInsertsWithHoodieAvroPayload(String instantTime, int limit) {
List<HoodieRecord> inserts = new ArrayList<>();
int currSize = getNumExistingKeys(TRIP_EXAMPLE_SCHEMA);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,16 @@ public RawTripTestPayload(String jsonData) throws IOException {
this.orderingVal = Integer.valueOf(jsonRecordMap.getOrDefault("number", 0L).toString());
}

public RawTripTestPayload(String jsonData, String keyField) throws IOException {
this.jsonDataCompressed = compressData(jsonData);
this.dataSize = jsonData.length();
Map<String, Object> jsonRecordMap = OBJECT_MAPPER.readValue(jsonData, Map.class);
this.rowKey = jsonRecordMap.get(keyField).toString();
this.partitionPath = (String) jsonRecordMap.get("partition_path");
this.isDeleted = false;
this.orderingVal = Integer.valueOf(jsonRecordMap.getOrDefault("number", 0L).toString());
}

public RawTripTestPayload(GenericRecord record, Comparable orderingVal) {
this.orderingVal = orderingVal;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
Expand Down
Loading