[HUDI-3357] MVP implementation of BigQuerySyncTool (#5125)
Co-authored-by: Raymond Xu <2701446+xushiyan@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
c19f505b5a
commit
20964df770
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.sync.common.util;
|
||||
|
||||
import org.apache.hudi.common.config.HoodieMetadataConfig;
|
||||
import org.apache.hudi.common.config.SerializableConfiguration;
|
||||
import org.apache.hudi.common.engine.HoodieLocalEngineContext;
|
||||
import org.apache.hudi.common.fs.FSUtils;
|
||||
import org.apache.hudi.common.model.HoodieBaseFile;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.metadata.HoodieMetadataFileSystemView;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.apache.hudi.common.config.HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS;
|
||||
import static org.apache.hudi.common.config.HoodieMetadataConfig.ENABLE;
|
||||
|
||||
public class ManifestFileUtil {
|
||||
private static final Logger LOG = LogManager.getLogger(ManifestFileUtil.class);
|
||||
private static final String MANIFEST_FOLDER_NAME = "manifest";
|
||||
private static final String MANIFEST_FILE_NAME = "latest-snapshot.csv";
|
||||
private static final String DELIMITER = "\n";
|
||||
private final SerializableConfiguration hadoopConf;
|
||||
private final String basePath;
|
||||
private final transient HoodieLocalEngineContext engineContext;
|
||||
private final HoodieTableMetaClient metaClient;
|
||||
|
||||
private ManifestFileUtil(Configuration conf, String basePath) {
|
||||
this.hadoopConf = new SerializableConfiguration(conf);
|
||||
this.basePath = basePath;
|
||||
this.engineContext = new HoodieLocalEngineContext(conf);
|
||||
this.metaClient = HoodieTableMetaClient.builder().setConf(hadoopConf.get()).setBasePath(basePath).setLoadActiveTimelineOnLoad(true).build();
|
||||
}
|
||||
|
||||
public synchronized void writeManifestFile() {
|
||||
try {
|
||||
Path manifestFilePath = new Path(getManifestFolder(), MANIFEST_FILE_NAME);
|
||||
Option<byte[]> content = Option.of(fetchLatestBaseFilesForAllPartitions().collect(Collectors.joining(DELIMITER)).getBytes());
|
||||
FileIOUtils.createFileInPath(metaClient.getFs(), manifestFilePath, content);
|
||||
} catch (Exception e) {
|
||||
String msg = "Error writing manifest file";
|
||||
LOG.error(msg, e);
|
||||
throw new HoodieException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
public Stream<String> fetchLatestBaseFilesForAllPartitions() {
|
||||
try {
|
||||
HoodieMetadataConfig metadataConfig = buildMetadataConfig(hadoopConf.get());
|
||||
|
||||
List<String> partitions = FSUtils.getAllPartitionPaths(engineContext, metadataConfig, basePath);
|
||||
|
||||
return partitions.parallelStream().flatMap(p -> {
|
||||
HoodieLocalEngineContext engContext = new HoodieLocalEngineContext(hadoopConf.get());
|
||||
HoodieMetadataFileSystemView fsView =
|
||||
new HoodieMetadataFileSystemView(engContext, metaClient, metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants(), metadataConfig);
|
||||
return fsView.getLatestBaseFiles(p).map(HoodieBaseFile::getFileName);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
String msg = "Error checking path :" + basePath;
|
||||
LOG.error(msg, e);
|
||||
throw new HoodieException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
private static HoodieMetadataConfig buildMetadataConfig(Configuration conf) {
|
||||
return HoodieMetadataConfig.newBuilder()
|
||||
.enable(conf.getBoolean(ENABLE.key(), DEFAULT_METADATA_ENABLE_FOR_READERS))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Manifest File folder
|
||||
*/
|
||||
public Path getManifestFolder() {
|
||||
return new Path(metaClient.getMetaPath(), MANIFEST_FOLDER_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Manifest File Full Path
|
||||
*/
|
||||
public Path getManifestFilePath() {
|
||||
return new Path(getManifestFolder(), MANIFEST_FILE_NAME);
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder for {@link ManifestFileUtil}.
|
||||
*/
|
||||
public static class Builder {
|
||||
|
||||
private Configuration conf;
|
||||
private String basePath;
|
||||
|
||||
public Builder setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setBasePath(String basePath) {
|
||||
this.basePath = basePath;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ManifestFileUtil build() {
|
||||
ValidationUtils.checkArgument(conf != null, "Configuration needs to be set to init ManifestFileGenerator");
|
||||
ValidationUtils.checkArgument(basePath != null, "basePath needs to be set to init ManifestFileGenerator");
|
||||
return new ManifestFileUtil(conf, basePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.sync.common.util;
|
||||
|
||||
import org.apache.hudi.common.config.HoodieMetadataConfig;
|
||||
import org.apache.hudi.common.engine.HoodieLocalEngineContext;
|
||||
import org.apache.hudi.common.fs.FSUtils;
|
||||
import org.apache.hudi.common.model.HoodieBaseFile;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.metadata.HoodieMetadataFileSystemView;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class ManifestFileWriter {
|
||||
|
||||
public static final String MANIFEST_FOLDER_NAME = "manifest";
|
||||
public static final String MANIFEST_FILE_NAME = "latest-snapshot.csv";
|
||||
private static final Logger LOG = LogManager.getLogger(ManifestFileWriter.class);
|
||||
|
||||
private final HoodieTableMetaClient metaClient;
|
||||
private final boolean useFileListingFromMetadata;
|
||||
private final boolean assumeDatePartitioning;
|
||||
|
||||
private ManifestFileWriter(Configuration hadoopConf, String basePath, boolean useFileListingFromMetadata, boolean assumeDatePartitioning) {
|
||||
this.metaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(basePath).setLoadActiveTimelineOnLoad(true).build();
|
||||
this.useFileListingFromMetadata = useFileListingFromMetadata;
|
||||
this.assumeDatePartitioning = assumeDatePartitioning;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write all the latest base file names to the manifest file.
|
||||
*/
|
||||
public synchronized void writeManifestFile() {
|
||||
try {
|
||||
List<String> baseFiles = fetchLatestBaseFilesForAllPartitions(metaClient, useFileListingFromMetadata, assumeDatePartitioning)
|
||||
.collect(Collectors.toList());
|
||||
if (baseFiles.isEmpty()) {
|
||||
LOG.warn("No base file to generate manifest file.");
|
||||
return;
|
||||
} else {
|
||||
LOG.info("Writing base file names to manifest file: " + baseFiles.size());
|
||||
}
|
||||
final Path manifestFilePath = getManifestFilePath();
|
||||
try (FSDataOutputStream outputStream = metaClient.getFs().create(manifestFilePath, true);
|
||||
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream, StandardCharsets.UTF_8))) {
|
||||
for (String f : baseFiles) {
|
||||
writer.write(f);
|
||||
writer.write("\n");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new HoodieException("Error in writing manifest file.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static Stream<String> fetchLatestBaseFilesForAllPartitions(HoodieTableMetaClient metaClient,
|
||||
boolean useFileListingFromMetadata, boolean assumeDatePartitioning) {
|
||||
try {
|
||||
List<String> partitions = FSUtils.getAllPartitionPaths(new HoodieLocalEngineContext(metaClient.getHadoopConf()),
|
||||
metaClient.getBasePath(), useFileListingFromMetadata, assumeDatePartitioning);
|
||||
LOG.info("Retrieve all partitions: " + partitions.size());
|
||||
return partitions.parallelStream().flatMap(p -> {
|
||||
Configuration hadoopConf = metaClient.getHadoopConf();
|
||||
HoodieLocalEngineContext engContext = new HoodieLocalEngineContext(hadoopConf);
|
||||
HoodieMetadataFileSystemView fsView = new HoodieMetadataFileSystemView(engContext, metaClient,
|
||||
metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants(),
|
||||
HoodieMetadataConfig.newBuilder().enable(useFileListingFromMetadata).withAssumeDatePartitioning(assumeDatePartitioning).build());
|
||||
return fsView.getLatestBaseFiles(p).map(HoodieBaseFile::getFileName);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
throw new HoodieException("Error in fetching latest base files.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public Path getManifestFolder() {
|
||||
return new Path(metaClient.getMetaPath(), MANIFEST_FOLDER_NAME);
|
||||
}
|
||||
|
||||
public Path getManifestFilePath() {
|
||||
return new Path(getManifestFolder(), MANIFEST_FILE_NAME);
|
||||
}
|
||||
|
||||
public String getManifestSourceUri() {
|
||||
return new Path(getManifestFolder(), "*").toUri().toString();
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder for {@link ManifestFileWriter}.
|
||||
*/
|
||||
public static class Builder {
|
||||
|
||||
private Configuration conf;
|
||||
private String basePath;
|
||||
private boolean useFileListingFromMetadata;
|
||||
private boolean assumeDatePartitioning;
|
||||
|
||||
public Builder setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setBasePath(String basePath) {
|
||||
this.basePath = basePath;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setUseFileListingFromMetadata(boolean useFileListingFromMetadata) {
|
||||
this.useFileListingFromMetadata = useFileListingFromMetadata;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setAssumeDatePartitioning(boolean assumeDatePartitioning) {
|
||||
this.assumeDatePartitioning = assumeDatePartitioning;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ManifestFileWriter build() {
|
||||
ValidationUtils.checkArgument(conf != null, "Configuration needs to be set to init ManifestFileGenerator");
|
||||
ValidationUtils.checkArgument(basePath != null, "basePath needs to be set to init ManifestFileGenerator");
|
||||
return new ManifestFileWriter(conf, basePath, useFileListingFromMetadata, assumeDatePartitioning);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.sync.common.util;
|
||||
|
||||
import org.apache.hudi.common.fs.FSUtils;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
|
||||
public class TestManifestFileUtil extends HoodieCommonTestHarness {
|
||||
|
||||
private static final List<String> MULTI_LEVEL_PARTITIONS = Arrays.asList("2019/01", "2020/01", "2021/01");
|
||||
private static HoodieTestTable hoodieTestTable;
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
initMetaClient();
|
||||
hoodieTestTable = HoodieTestTable.of(metaClient);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiLevelPartitionedTable() throws Exception {
|
||||
// Generate 10 files under each partition
|
||||
createTestDataForPartitionedTable(10);
|
||||
ManifestFileUtil manifestFileUtil = ManifestFileUtil.builder().setConf(metaClient.getHadoopConf()).setBasePath(basePath).build();
|
||||
Assertions.assertEquals(30, manifestFileUtil.fetchLatestBaseFilesForAllPartitions().count());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateManifestFile() throws Exception {
|
||||
// Generate 10 files under each partition
|
||||
createTestDataForPartitionedTable(10);
|
||||
ManifestFileUtil manifestFileUtil = ManifestFileUtil.builder().setConf(metaClient.getHadoopConf()).setBasePath(basePath).build();
|
||||
manifestFileUtil.writeManifestFile();
|
||||
Assertions.assertTrue(FSUtils.getFileSize(metaClient.getFs(), manifestFileUtil.getManifestFilePath()) > 0);
|
||||
}
|
||||
|
||||
public void createTestDataForPartitionedTable(int numOfFiles) throws Exception {
|
||||
String instant = "100";
|
||||
hoodieTestTable = hoodieTestTable.addCommit(instant);
|
||||
// Generate 10 files under each partition
|
||||
for (String partition : MULTI_LEVEL_PARTITIONS) {
|
||||
hoodieTestTable = hoodieTestTable.withPartitionMetaFiles(partition)
|
||||
.withBaseFilesInPartition(partition, IntStream.range(0, numOfFiles).toArray());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.sync.common.util;
|
||||
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS;
|
||||
import static org.apache.hudi.sync.common.util.ManifestFileWriter.fetchLatestBaseFilesForAllPartitions;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
public class TestManifestFileWriter extends HoodieCommonTestHarness {
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
initMetaClient();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiLevelPartitionedTable() throws Exception {
|
||||
// Generate 10 files under each partition
|
||||
createTestDataForPartitionedTable(metaClient, 10);
|
||||
ManifestFileWriter manifestFileWriter = ManifestFileWriter.builder().setConf(metaClient.getHadoopConf()).setBasePath(basePath).build();
|
||||
assertEquals(30, fetchLatestBaseFilesForAllPartitions(metaClient, false, false).count());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateManifestFile() throws Exception {
|
||||
// Generate 10 files under each partition
|
||||
createTestDataForPartitionedTable(metaClient, 3);
|
||||
ManifestFileWriter manifestFileWriter = ManifestFileWriter.builder().setConf(metaClient.getHadoopConf()).setBasePath(basePath).build();
|
||||
manifestFileWriter.writeManifestFile();
|
||||
Path manifestFilePath = manifestFileWriter.getManifestFilePath();
|
||||
try (InputStream is = metaClient.getFs().open(manifestFilePath)) {
|
||||
assertEquals(9, FileIOUtils.readAsUTFStringLines(is).size(), "there should be 9 base files in total; 3 per partition.");
|
||||
}
|
||||
}
|
||||
|
||||
private static void createTestDataForPartitionedTable(HoodieTableMetaClient metaClient, int numFilesPerPartition) throws Exception {
|
||||
final String instantTime = "100";
|
||||
HoodieTestTable testTable = HoodieTestTable.of(metaClient).addCommit(instantTime);
|
||||
for (String partition : DEFAULT_PARTITION_PATHS) {
|
||||
testTable.withPartitionMetaFiles(partition)
|
||||
.withBaseFilesInPartition(partition, IntStream.range(0, numFilesPerPartition).toArray());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getManifestSourceUri() {
|
||||
ManifestFileWriter manifestFileWriter = ManifestFileWriter.builder().setConf(metaClient.getHadoopConf()).setBasePath(basePath).build();
|
||||
String sourceUri = manifestFileWriter.getManifestSourceUri();
|
||||
assertEquals(new Path(basePath, ".hoodie/manifest/*").toUri().toString(), sourceUri);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user