Compare commits
10 Commits
46ce96096d
...
d9581682a2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9581682a2 | ||
|
|
181df2240a | ||
|
|
2188b8ed8a | ||
|
|
6be03ca56a | ||
|
|
215a794fd3 | ||
|
|
eb4b741c38 | ||
|
|
5c4908f006 | ||
|
|
0ac43017cb | ||
|
|
32f7e323dc | ||
|
|
8462d79ead |
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-hadoop-docker</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-aws</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-aws</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-client</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-client-common</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-client-common</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -117,7 +117,7 @@ public class HoodieTimelineArchiver<T extends HoodieAvroPayload, I, K, O> {
|
||||
if (this.writer == null) {
|
||||
return HoodieLogFormat.newWriterBuilder().onParentPath(archiveFilePath.getParent())
|
||||
.withFileId(archiveFilePath.getName()).withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION)
|
||||
.withFs(metaClient.getFs()).overBaseCommit("").build();
|
||||
.withFs(metaClient.getFs()).overBaseCommit("").withUseHSync(config.getUseHSync()).build();
|
||||
} else {
|
||||
return this.writer;
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@ import org.apache.hudi.config.metrics.HoodieMetricsDatadogConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsGraphiteConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsJmxConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsPrometheusConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsVictoriaConfig;
|
||||
import org.apache.hudi.exception.HoodieNotSupportedException;
|
||||
import org.apache.hudi.execution.bulkinsert.BulkInsertSortMode;
|
||||
import org.apache.hudi.index.HoodieIndex;
|
||||
@@ -480,6 +481,12 @@ public class HoodieWriteConfig extends HoodieConfig {
|
||||
.sinceVersion("0.11.0")
|
||||
.withDocumentation("Auto adjust lock configurations when metadata table is enabled and for async table services.");
|
||||
|
||||
public static final ConfigProperty<Boolean> USE_HSYNC = ConfigProperty
|
||||
.key("hoodie.write.use.hsync")
|
||||
.defaultValue(true)
|
||||
.sinceVersion("0.12.0")
|
||||
.withDocumentation("Use hsync or not");
|
||||
|
||||
private ConsistencyGuardConfig consistencyGuardConfig;
|
||||
private FileSystemRetryConfig fileSystemRetryConfig;
|
||||
|
||||
@@ -1857,6 +1864,30 @@ public class HoodieWriteConfig extends HoodieConfig {
|
||||
return getStringOrDefault(HoodieMetricsConfig.METRICS_REPORTER_PREFIX);
|
||||
}
|
||||
|
||||
public String getVictoriaEndpoint() {
|
||||
return getString(HoodieMetricsVictoriaConfig.VICTORIA_ENDPOINT);
|
||||
}
|
||||
|
||||
public int getVictoriaTimeout() {
|
||||
return getInt(HoodieMetricsVictoriaConfig.VICTORIA_TIMEOUT);
|
||||
}
|
||||
|
||||
public String getVictoriaTags() {
|
||||
return getString(HoodieMetricsVictoriaConfig.VICTORIA_TAGS);
|
||||
}
|
||||
|
||||
public boolean getVictoriaBasicAuthEnable() {
|
||||
return getBoolean(HoodieMetricsVictoriaConfig.VICTORIA_BASIC_AUTH_ENABLE);
|
||||
}
|
||||
|
||||
public String getVictoriaBasicAuthUsername() {
|
||||
return getString(HoodieMetricsVictoriaConfig.VICTORIA_BASIC_AUTH_USERNAME);
|
||||
}
|
||||
|
||||
public String getVictoriaBasicAuthPassword() {
|
||||
return getString(HoodieMetricsVictoriaConfig.VICTORIA_BASIC_AUTH_PASSWORD);
|
||||
}
|
||||
|
||||
/**
|
||||
* memory configs.
|
||||
*/
|
||||
@@ -2038,6 +2069,10 @@ public class HoodieWriteConfig extends HoodieConfig {
|
||||
return WriteConcurrencyMode.fromValue(getString(WRITE_CONCURRENCY_MODE));
|
||||
}
|
||||
|
||||
public Boolean getUseHSync() {
|
||||
return getBooleanOrDefault(USE_HSYNC);
|
||||
}
|
||||
|
||||
/**
|
||||
* Are any table services configured to run inline for both scheduling and execution?
|
||||
*
|
||||
@@ -2517,6 +2552,11 @@ public class HoodieWriteConfig extends HoodieConfig {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder withUseHSync(boolean useHSync) {
|
||||
writeConfig.setValue(USE_HSYNC, String.valueOf(useHSync));
|
||||
return this;
|
||||
}
|
||||
|
||||
protected void setDefaults() {
|
||||
writeConfig.setDefaultValue(MARKERS_TYPE, getDefaultMarkersType(engineType));
|
||||
// Check for mandatory properties
|
||||
|
||||
@@ -181,6 +181,8 @@ public class HoodieMetricsConfig extends HoodieConfig {
|
||||
HoodieMetricsGraphiteConfig.newBuilder().fromProperties(hoodieMetricsConfig.getProps()).build());
|
||||
hoodieMetricsConfig.setDefaultOnCondition(reporterType == MetricsReporterType.CLOUDWATCH,
|
||||
HoodieMetricsCloudWatchConfig.newBuilder().fromProperties(hoodieMetricsConfig.getProps()).build());
|
||||
hoodieMetricsConfig.setDefaultOnCondition(reporterType == MetricsReporterType.VICTORIA,
|
||||
HoodieMetricsVictoriaConfig.newBuilder().fromProperties(hoodieMetricsConfig.getProps()).build());
|
||||
return hoodieMetricsConfig;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,116 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.config.metrics;
|
||||
|
||||
import org.apache.hudi.common.config.ConfigClassProperty;
|
||||
import org.apache.hudi.common.config.ConfigGroups;
|
||||
import org.apache.hudi.common.config.ConfigProperty;
|
||||
import org.apache.hudi.common.config.HoodieConfig;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.apache.hudi.config.metrics.HoodieMetricsConfig.METRIC_PREFIX;
|
||||
|
||||
/**
|
||||
* Configs for Victoria metrics reporter type.
|
||||
* <p>
|
||||
* {@link org.apache.hudi.metrics.MetricsReporterType#VICTORIA}
|
||||
*/
|
||||
@ConfigClassProperty(name = "Metrics Configurations for Victoria",
|
||||
groupName = ConfigGroups.Names.METRICS,
|
||||
description = "Enables reporting on Hudi metrics using Victoria. "
|
||||
+ " Hudi publishes metrics on every commit, clean, rollback etc.")
|
||||
public class HoodieMetricsVictoriaConfig extends HoodieConfig {
|
||||
|
||||
public static final String VICTORIA_PREFIX = METRIC_PREFIX + ".victoria";
|
||||
|
||||
public static final ConfigProperty<String> VICTORIA_ENDPOINT = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".endpoint")
|
||||
.defaultValue("http://localhost:8428/api/v1/import/prometheus")
|
||||
.sinceVersion("0.12.0")
|
||||
.withDocumentation("Victoria metrics endpoint. eg: http://localhost:8428/api/v1/import/prometheus.");
|
||||
|
||||
public static final ConfigProperty<Boolean> VICTORIA_BASIC_AUTH_ENABLE = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".auth.basic.enable")
|
||||
.defaultValue(false)
|
||||
.sinceVersion("0.13.0")
|
||||
.withDocumentation("Enable basic authentication.");
|
||||
|
||||
public static final ConfigProperty<String> VICTORIA_BASIC_AUTH_USERNAME = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".auth.basic.username")
|
||||
.defaultValue("")
|
||||
.sinceVersion("0.13.0")
|
||||
.withDocumentation("Basic authentication username");
|
||||
|
||||
public static final ConfigProperty<String> VICTORIA_BASIC_AUTH_PASSWORD = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".auth.basic.password")
|
||||
.defaultValue("")
|
||||
.sinceVersion("0.13.0")
|
||||
.withDocumentation("Basic authentication password");
|
||||
|
||||
public static final ConfigProperty<Integer> VICTORIA_TIMEOUT = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".timeout")
|
||||
.defaultValue(60000)
|
||||
.sinceVersion("0.12.0")
|
||||
.withDocumentation("Http push timeout. Default 1 minute.");
|
||||
|
||||
public static final ConfigProperty<String> VICTORIA_TAGS = ConfigProperty
|
||||
.key(VICTORIA_PREFIX + ".tags")
|
||||
.defaultValue("")
|
||||
.sinceVersion("0.12.0")
|
||||
.withDocumentation("Extra tags for every metric.");
|
||||
|
||||
private HoodieMetricsVictoriaConfig() {
|
||||
super();
|
||||
}
|
||||
|
||||
public static HoodieMetricsVictoriaConfig.Builder newBuilder() {
|
||||
return new HoodieMetricsVictoriaConfig.Builder();
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final HoodieMetricsVictoriaConfig hoodieMetricsVictoriaConfig = new HoodieMetricsVictoriaConfig();
|
||||
|
||||
public Builder fromProperties(Properties props) {
|
||||
this.hoodieMetricsVictoriaConfig.getProps().putAll(props);
|
||||
return this;
|
||||
}
|
||||
|
||||
public HoodieMetricsVictoriaConfig.Builder withVictoriaEndpoint(String endpoint) {
|
||||
hoodieMetricsVictoriaConfig.setValue(VICTORIA_ENDPOINT, endpoint);
|
||||
return this;
|
||||
}
|
||||
|
||||
public HoodieMetricsVictoriaConfig.Builder withVictoriaTimeout(Integer timeout) {
|
||||
hoodieMetricsVictoriaConfig.setValue(VICTORIA_TIMEOUT, String.valueOf(timeout));
|
||||
return this;
|
||||
}
|
||||
|
||||
public HoodieMetricsVictoriaConfig.Builder withVictoriaTags(String tags) {
|
||||
hoodieMetricsVictoriaConfig.setValue(VICTORIA_TAGS, tags);
|
||||
return this;
|
||||
}
|
||||
|
||||
public HoodieMetricsVictoriaConfig build() {
|
||||
hoodieMetricsVictoriaConfig.setDefaults(HoodieMetricsVictoriaConfig.class.getName());
|
||||
return hoodieMetricsVictoriaConfig;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -477,7 +477,8 @@ public class HoodieAppendHandle<T extends HoodieRecordPayload, I, K, O> extends
|
||||
.withSizeThreshold(config.getLogFileMaxSize()).withFs(fs)
|
||||
.withRolloverLogWriteToken(writeToken)
|
||||
.withLogWriteToken(latestLogFile.map(x -> FSUtils.getWriteTokenFromLogPath(x.getPath())).orElse(writeToken))
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION)
|
||||
.withUseHSync(config.getUseHSync()).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -716,7 +716,8 @@ public abstract class HoodieBackedTableMetadataWriter implements HoodieTableMeta
|
||||
.withFs(dataMetaClient.getFs())
|
||||
.withRolloverLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN)
|
||||
.withLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN)
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION)
|
||||
.withUseHSync(dataWriteConfig.getUseHSync()).build();
|
||||
writer.appendBlock(block);
|
||||
writer.close();
|
||||
} catch (InterruptedException e) {
|
||||
|
||||
@@ -29,6 +29,7 @@ import org.apache.hudi.metrics.prometheus.PrometheusReporter;
|
||||
import org.apache.hudi.metrics.prometheus.PushGatewayMetricsReporter;
|
||||
|
||||
import com.codahale.metrics.MetricRegistry;
|
||||
import org.apache.hudi.metrics.victoria.VictoriaMetricsReporter;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@@ -81,6 +82,9 @@ public class MetricsReporterFactory {
|
||||
case CLOUDWATCH:
|
||||
reporter = new CloudWatchMetricsReporter(config, registry);
|
||||
break;
|
||||
case VICTORIA:
|
||||
reporter = new VictoriaMetricsReporter(config, registry);
|
||||
break;
|
||||
default:
|
||||
LOG.error("Reporter type[" + type + "] is not supported.");
|
||||
break;
|
||||
|
||||
@@ -22,5 +22,5 @@ package org.apache.hudi.metrics;
|
||||
* Types of the reporter supported, hudi also supports user defined reporter.
|
||||
*/
|
||||
public enum MetricsReporterType {
|
||||
GRAPHITE, INMEMORY, JMX, DATADOG, CONSOLE, PROMETHEUS_PUSHGATEWAY, PROMETHEUS, CLOUDWATCH
|
||||
GRAPHITE, INMEMORY, JMX, DATADOG, CONSOLE, PROMETHEUS_PUSHGATEWAY, PROMETHEUS, CLOUDWATCH, VICTORIA
|
||||
}
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.metrics.victoria;
|
||||
|
||||
import com.codahale.metrics.MetricFilter;
|
||||
import com.codahale.metrics.MetricRegistry;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.metrics.MetricsReporter;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Implementation of victoria metrics reporter, which could receive prometheus format data
|
||||
*/
|
||||
public class VictoriaMetricsReporter extends MetricsReporter {
|
||||
private static final Logger LOG = LogManager.getLogger(VictoriaMetricsReporter.class);
|
||||
|
||||
private final static String TAG_SPLIT = ";";
|
||||
private final static String TAG_OPERATOR = "=";
|
||||
|
||||
private final VictoriaReporter victoriaReporter;
|
||||
|
||||
public VictoriaMetricsReporter(HoodieWriteConfig config, MetricRegistry registry) {
|
||||
String endpoint = config.getVictoriaEndpoint();
|
||||
int timeout = config.getVictoriaTimeout();
|
||||
String tagsText = config.getVictoriaTags();
|
||||
boolean basicEnable = config.getVictoriaBasicAuthEnable();
|
||||
String basicUsername = config.getVictoriaBasicAuthUsername();
|
||||
String basicPassword = config.getVictoriaBasicAuthPassword();
|
||||
Map<String, String> tags = new HashMap<>(10);
|
||||
if (tagsText != null && !tagsText.isEmpty()) {
|
||||
for (String item : tagsText.split(TAG_SPLIT)) {
|
||||
String[] parsed = item.split(TAG_OPERATOR);
|
||||
tags.put(parsed[0], parsed[1]);
|
||||
}
|
||||
}
|
||||
victoriaReporter = new VictoriaReporter(
|
||||
registry,
|
||||
MetricFilter.ALL,
|
||||
TimeUnit.SECONDS,
|
||||
TimeUnit.SECONDS,
|
||||
endpoint,
|
||||
timeout,
|
||||
tags,
|
||||
basicEnable,
|
||||
basicUsername,
|
||||
basicPassword
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
victoriaReporter.start(10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void report() {
|
||||
victoriaReporter.report(null, null, null, null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Closeable getReporter() {
|
||||
return victoriaReporter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
victoriaReporter.stop();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.metrics.victoria;
|
||||
|
||||
import com.codahale.metrics.*;
|
||||
import io.prometheus.client.CollectorRegistry;
|
||||
import io.prometheus.client.dropwizard.DropwizardExports;
|
||||
import io.prometheus.client.exporter.common.TextFormat;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class VictoriaReporter extends ScheduledReporter {
|
||||
private static final Logger LOG = LogManager.getLogger(VictoriaReporter.class);
|
||||
|
||||
private final DropwizardExports metricExports;
|
||||
private final CollectorRegistry collectorRegistry;
|
||||
private final String endpoint;
|
||||
private final Map<String, String> tags;
|
||||
private final boolean basicAuthEnable;
|
||||
private final String basicAuthUsername;
|
||||
private final String basicAuthPassword;
|
||||
private final RequestConfig requestConfig;
|
||||
|
||||
protected VictoriaReporter(MetricRegistry registry,
|
||||
MetricFilter filter,
|
||||
TimeUnit rateUnit,
|
||||
TimeUnit durationUnit,
|
||||
String endpoint,
|
||||
Integer timout,
|
||||
Map<String, String> tags,
|
||||
boolean basicAuthEnable,
|
||||
String basicAuthUsername,
|
||||
String basicAuthPassword) {
|
||||
super(registry, "hudi-push-victoria-reporter", filter, rateUnit, durationUnit);
|
||||
this.endpoint = endpoint;
|
||||
this.tags = tags;
|
||||
this.basicAuthEnable = basicAuthEnable;
|
||||
this.basicAuthUsername = basicAuthUsername;
|
||||
this.basicAuthPassword = basicAuthPassword;
|
||||
|
||||
requestConfig = RequestConfig.custom()
|
||||
.setConnectTimeout(timout)
|
||||
.setSocketTimeout(timout)
|
||||
.build();
|
||||
|
||||
collectorRegistry = new CollectorRegistry();
|
||||
metricExports = new DropwizardExports(registry);
|
||||
metricExports.register(collectorRegistry);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void report(SortedMap<String, Gauge> gauges,
|
||||
SortedMap<String, Counter> counters,
|
||||
SortedMap<String, Histogram> histograms,
|
||||
SortedMap<String, Meter> meters,
|
||||
SortedMap<String, Timer> timers) {
|
||||
try (StringWriter writer = new StringWriter()) {
|
||||
TextFormat.write004(writer, collectorRegistry.metricFamilySamples());
|
||||
|
||||
String query = tags.entrySet()
|
||||
.stream()
|
||||
.map(entry -> String.format("extra_label=%s=%s", entry.getKey(), entry.getValue()))
|
||||
.collect(Collectors.joining("&"));
|
||||
|
||||
HttpClientBuilder builder = HttpClientBuilder.create();
|
||||
if (basicAuthEnable) {
|
||||
CredentialsProvider provider = new BasicCredentialsProvider();
|
||||
provider.setCredentials(
|
||||
new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM),
|
||||
new UsernamePasswordCredentials(basicAuthUsername, basicAuthPassword)
|
||||
);
|
||||
builder.setDefaultCredentialsProvider(provider);
|
||||
}
|
||||
try(CloseableHttpClient client = builder.build()) {
|
||||
HttpPost post = new HttpPost(String.format("%s?%s", endpoint, query));
|
||||
post.setConfig(requestConfig);
|
||||
HttpResponse response = client.execute(post);
|
||||
int code = response.getStatusLine().getStatusCode();
|
||||
if (code < 200 || code >= 300) {
|
||||
HttpEntity entity = response.getEntity();
|
||||
LOG.warn("Fail to push metrics: " + (entity == null ? "" : EntityUtils.toString(entity)));
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Fail to push metrics", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(long period, TimeUnit unit) {
|
||||
super.start(period, unit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
super.stop();
|
||||
collectorRegistry.unregister(metricExports);
|
||||
}
|
||||
}
|
||||
@@ -129,7 +129,8 @@ public class BaseRollbackHelper implements Serializable {
|
||||
.withFileId(fileId)
|
||||
.overBaseCommit(latestBaseInstant)
|
||||
.withFs(metaClient.getFs())
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
|
||||
.withFileExtension(HoodieLogFile.DELTA_EXTENSION)
|
||||
.withUseHSync(config.getUseHSync()).build();
|
||||
|
||||
// generate metadata
|
||||
if (doDelete) {
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-client</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink-client</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-flink-client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-client</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-java-client</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-java-client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-client</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark-client</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark-client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -144,6 +144,8 @@ public interface HoodieLogFormat {
|
||||
// Rollover Log file write token
|
||||
private String rolloverLogWriteToken;
|
||||
|
||||
private Boolean useHSync;
|
||||
|
||||
public WriterBuilder withBufferSize(int bufferSize) {
|
||||
this.bufferSize = bufferSize;
|
||||
return this;
|
||||
@@ -204,6 +206,11 @@ public interface HoodieLogFormat {
|
||||
return this;
|
||||
}
|
||||
|
||||
public WriterBuilder withUseHSync(boolean useHSync) {
|
||||
this.useHSync = useHSync;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Writer build() throws IOException {
|
||||
LOG.info("Building HoodieLogFormat Writer");
|
||||
if (fs == null) {
|
||||
@@ -264,7 +271,10 @@ public interface HoodieLogFormat {
|
||||
if (sizeThreshold == null) {
|
||||
sizeThreshold = DEFAULT_SIZE_THRESHOLD;
|
||||
}
|
||||
return new HoodieLogFormatWriter(fs, logFile, bufferSize, replication, sizeThreshold, rolloverLogWriteToken);
|
||||
if (useHSync == null) {
|
||||
useHSync = true;
|
||||
}
|
||||
return new HoodieLogFormatWriter(fs, logFile, bufferSize, replication, sizeThreshold, rolloverLogWriteToken, useHSync);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,15 +58,18 @@ public class HoodieLogFormatWriter implements HoodieLogFormat.Writer {
|
||||
private boolean closed = false;
|
||||
private transient Thread shutdownThread = null;
|
||||
|
||||
private final boolean useHSync;
|
||||
|
||||
private static final String APPEND_UNAVAILABLE_EXCEPTION_MESSAGE = "not sufficiently replicated yet";
|
||||
|
||||
HoodieLogFormatWriter(FileSystem fs, HoodieLogFile logFile, Integer bufferSize, Short replication, Long sizeThreshold, String rolloverLogWriteToken) {
|
||||
HoodieLogFormatWriter(FileSystem fs, HoodieLogFile logFile, Integer bufferSize, Short replication, Long sizeThreshold, String rolloverLogWriteToken, Boolean useHSync) {
|
||||
this.fs = fs;
|
||||
this.logFile = logFile;
|
||||
this.sizeThreshold = sizeThreshold;
|
||||
this.bufferSize = bufferSize;
|
||||
this.replication = replication;
|
||||
this.rolloverLogWriteToken = rolloverLogWriteToken;
|
||||
this.useHSync = useHSync;
|
||||
addShutDownHook();
|
||||
}
|
||||
|
||||
@@ -258,7 +261,9 @@ public class HoodieLogFormatWriter implements HoodieLogFormat.Writer {
|
||||
output.flush();
|
||||
// NOTE : the following API call makes sure that the data is flushed to disk on DataNodes (akin to POSIX fsync())
|
||||
// See more details here : https://issues.apache.org/jira/browse/HDFS-744
|
||||
output.hsync();
|
||||
if (useHSync) {
|
||||
output.hsync();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -21,6 +21,7 @@ package org.apache.hudi.common.table.log.block;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.generic.IndexedRecord;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -107,7 +108,7 @@ public class HoodieParquetDataBlock extends HoodieDataBlock {
|
||||
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
|
||||
try (FSDataOutputStream outputStream = new FSDataOutputStream(baos)) {
|
||||
try (FSDataOutputStream outputStream = new FSDataOutputStream(baos, new FileSystem.Statistics(""))) {
|
||||
try (HoodieParquetStreamWriter<IndexedRecord> parquetWriter = new HoodieParquetStreamWriter<>(outputStream, avroParquetConfig)) {
|
||||
for (IndexedRecord record : records) {
|
||||
String recordKey = getRecordKey(record).orElse(null);
|
||||
|
||||
@@ -185,7 +185,11 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
String fileId = pair.getValue();
|
||||
HoodieFileGroup group = new HoodieFileGroup(pair.getKey(), fileId, timeline);
|
||||
if (baseFiles.containsKey(pair)) {
|
||||
baseFiles.get(pair).forEach(group::addBaseFile);
|
||||
// if there are multiple files under the same partition path and file ID, sort them according to the
|
||||
// modification time of the files to avoid reading the files that failed to write before.
|
||||
List<HoodieBaseFile> partitionFileIdPairBaseFiles = baseFiles.get(pair);
|
||||
partitionFileIdPairBaseFiles.sort(Comparator.comparingLong(o -> o.getFileStatus().getModificationTime()));
|
||||
partitionFileIdPairBaseFiles.forEach(group::addBaseFile);
|
||||
}
|
||||
if (logFiles.containsKey(pair)) {
|
||||
logFiles.get(pair).forEach(group::addLogFile);
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-examples</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-examples</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-examples</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-examples</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -22,12 +22,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-flink-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -325,10 +325,10 @@ public class StreamWriteFunction<I> extends AbstractStreamWriteFunction<I> {
|
||||
private final double maxBufferSize;
|
||||
|
||||
TotalSizeTracer(Configuration conf) {
|
||||
long mergeReaderMem = 100; // constant 100MB
|
||||
long mergeReaderMem = 10; // constant 100MB
|
||||
long mergeMapMaxMem = conf.getInteger(FlinkOptions.WRITE_MERGE_MAX_MEMORY);
|
||||
this.maxBufferSize = (conf.getDouble(FlinkOptions.WRITE_TASK_MAX_SIZE) - mergeReaderMem - mergeMapMaxMem) * 1024 * 1024;
|
||||
final String errMsg = String.format("'%s' should be at least greater than '%s' plus merge reader memory(constant 100MB now)",
|
||||
final String errMsg = String.format("'%s' should be at least greater than '%s' plus merge reader memory(constant 50MB now)",
|
||||
FlinkOptions.WRITE_TASK_MAX_SIZE.key(), FlinkOptions.WRITE_MERGE_MAX_MEMORY.key());
|
||||
ValidationUtils.checkState(this.maxBufferSize > 0, errMsg);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,58 @@
|
||||
package org.apache.hudi.sink.compact;
|
||||
|
||||
import org.apache.flink.configuration.Configuration;
|
||||
import org.apache.hudi.client.WriteStatus;
|
||||
import org.apache.hudi.common.model.HoodieCommitMetadata;
|
||||
import org.apache.hudi.common.util.ReflectionUtils;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
import org.apache.hudi.configuration.FlinkOptions;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Do something when compaction finish
|
||||
*
|
||||
* @author ZhangJiacheng
|
||||
* @date 2022-05-15
|
||||
*/
|
||||
public interface CompactEventHandler extends Serializable {
|
||||
/**
|
||||
* This method will be called when compaction commit failure
|
||||
*
|
||||
* @param instant commit instant
|
||||
*/
|
||||
void failure(String instant);
|
||||
/**
|
||||
* This method will be called when compaction commit success
|
||||
*
|
||||
* @param instant commit instant
|
||||
* @param statuses write status
|
||||
* @param metadata commit data
|
||||
*/
|
||||
void success(String instant, List<WriteStatus> statuses, HoodieCommitMetadata metadata);
|
||||
|
||||
/**
|
||||
* This method will be called when compaction sink closed.
|
||||
*
|
||||
* @param message Any message want to say
|
||||
* @param e Any exception caused by close
|
||||
*/
|
||||
void closed(String message, Exception e);
|
||||
|
||||
static CompactEventHandler defaultHandler() {
|
||||
return new CompactEventHandler() {
|
||||
@Override
|
||||
public void failure(String instant) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void success(String instant, List<WriteStatus> statuses, HoodieCommitMetadata metadata) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closed(String message, Exception e) {
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,8 @@ import org.apache.hudi.common.data.HoodieListData;
|
||||
import org.apache.hudi.common.model.HoodieCommitMetadata;
|
||||
import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ReflectionUtils;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
import org.apache.hudi.configuration.FlinkOptions;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.sink.CleanFunction;
|
||||
@@ -62,6 +64,11 @@ public class CompactionCommitSink extends CleanFunction<CompactionCommitEvent> {
|
||||
*/
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
* Do something when sink closed
|
||||
*/
|
||||
private CompactEventHandler eventHandler;
|
||||
|
||||
/**
|
||||
* Buffer to collect the event from each compact task {@code CompactFunction}.
|
||||
*
|
||||
@@ -85,6 +92,14 @@ public class CompactionCommitSink extends CleanFunction<CompactionCommitEvent> {
|
||||
public CompactionCommitSink(Configuration conf) {
|
||||
super(conf);
|
||||
this.conf = conf;
|
||||
this.eventHandler = CompactEventHandler.defaultHandler();
|
||||
}
|
||||
|
||||
public CompactionCommitSink(Configuration conf, CompactEventHandler eventHandler) {
|
||||
this(conf);
|
||||
if (eventHandler != null) {
|
||||
this.eventHandler = eventHandler;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -132,6 +147,7 @@ public class CompactionCommitSink extends CleanFunction<CompactionCommitEvent> {
|
||||
try {
|
||||
// handle failure case
|
||||
CompactionUtil.rollbackCompaction(table, instant);
|
||||
eventHandler.failure(instant);
|
||||
} finally {
|
||||
// remove commitBuffer to avoid obsolete metadata commit
|
||||
reset(instant);
|
||||
@@ -167,10 +183,23 @@ public class CompactionCommitSink extends CleanFunction<CompactionCommitEvent> {
|
||||
if (!conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED)) {
|
||||
this.writeClient.clean();
|
||||
}
|
||||
|
||||
eventHandler.success(instant, statuses, metadata);
|
||||
}
|
||||
|
||||
private void reset(String instant) {
|
||||
this.commitBuffer.remove(instant);
|
||||
this.compactionPlanCache.remove(instant);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
try {
|
||||
super.close();
|
||||
eventHandler.closed("", null);
|
||||
} catch (Exception e) {
|
||||
eventHandler.closed(e.getMessage(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,6 +158,11 @@ public class HoodieFlinkCompactor {
|
||||
*/
|
||||
private final ExecutorService executor;
|
||||
|
||||
/**
|
||||
* Closed handler
|
||||
*/
|
||||
private CompactEventHandler eventHandler;
|
||||
|
||||
public AsyncCompactionService(FlinkCompactionConfig cfg, Configuration conf, StreamExecutionEnvironment env) throws Exception {
|
||||
this.cfg = cfg;
|
||||
this.conf = conf;
|
||||
@@ -179,6 +184,14 @@ public class HoodieFlinkCompactor {
|
||||
this.writeClient = StreamerUtil.createWriteClient(conf);
|
||||
this.writeConfig = writeClient.getConfig();
|
||||
this.table = writeClient.getHoodieTable();
|
||||
this.eventHandler = CompactEventHandler.defaultHandler();
|
||||
}
|
||||
|
||||
public AsyncCompactionService(FlinkCompactionConfig cfg, Configuration conf, StreamExecutionEnvironment env, CompactEventHandler eventHandler) throws Exception {
|
||||
this(cfg, conf, env);
|
||||
if (eventHandler != null) {
|
||||
this.eventHandler = eventHandler;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -204,21 +217,23 @@ public class HoodieFlinkCompactor {
|
||||
}, executor), executor);
|
||||
}
|
||||
|
||||
private void compactClosed(String message, Exception e) {
|
||||
eventHandler.closed(message, e);
|
||||
}
|
||||
|
||||
private void compact() throws Exception {
|
||||
table.getMetaClient().reloadActiveTimeline();
|
||||
|
||||
// checks the compaction plan and do compaction.
|
||||
if (cfg.schedule) {
|
||||
Option<String> compactionInstantTimeOption = CompactionUtil.getCompactionInstantTime(metaClient);
|
||||
if (compactionInstantTimeOption.isPresent()) {
|
||||
boolean scheduled = writeClient.scheduleCompactionAtInstant(compactionInstantTimeOption.get(), Option.empty());
|
||||
if (!scheduled) {
|
||||
// do nothing.
|
||||
LOG.info("No compaction plan for this job ");
|
||||
return;
|
||||
}
|
||||
table.getMetaClient().reloadActiveTimeline();
|
||||
boolean scheduled = writeClient.scheduleCompaction(Option.empty()).isPresent();
|
||||
if (!scheduled) {
|
||||
// do nothing.
|
||||
LOG.info("No compaction plan for this job ");
|
||||
compactClosed("No compaction plan for this job ", null);
|
||||
return;
|
||||
}
|
||||
table.getMetaClient().reloadActiveTimeline();
|
||||
}
|
||||
|
||||
// fetch the instant based on the configured execution sequence
|
||||
@@ -227,6 +242,7 @@ public class HoodieFlinkCompactor {
|
||||
if (requested.isEmpty()) {
|
||||
// do nothing.
|
||||
LOG.info("No compaction plan scheduled, turns on the compaction plan schedule with --schedule option");
|
||||
compactClosed("No compaction plan scheduled", null);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -257,6 +273,7 @@ public class HoodieFlinkCompactor {
|
||||
if (compactionPlans.isEmpty()) {
|
||||
// No compaction plan, do nothing and return.
|
||||
LOG.info("No compaction plan for instant " + String.join(",", compactionInstantTimes));
|
||||
compactClosed("No compaction plan for instant " + String.join(",", compactionInstantTimes), null);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -270,6 +287,8 @@ public class HoodieFlinkCompactor {
|
||||
LOG.warn("The compaction plan was fetched through the auxiliary path(.tmp) but not the meta path(.hoodie).\n"
|
||||
+ "Clean the compaction plan in auxiliary path and cancels the compaction");
|
||||
CompactionUtil.cleanInstant(table.getMetaClient(), instant);
|
||||
compactClosed("The compaction plan was fetched through the auxiliary path(.tmp) but not the meta path(.hoodie).\n"
|
||||
+ "Clean the compaction plan in auxiliary path and cancels the compaction", null);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -295,7 +314,7 @@ public class HoodieFlinkCompactor {
|
||||
TypeInformation.of(CompactionCommitEvent.class),
|
||||
new ProcessOperator<>(new CompactFunction(conf)))
|
||||
.setParallelism(compactionParallelism)
|
||||
.addSink(new CompactionCommitSink(conf))
|
||||
.addSink(new CompactionCommitSink(conf, eventHandler))
|
||||
.name("compaction_commit")
|
||||
.uid("uid_compaction_commit")
|
||||
.setParallelism(1);
|
||||
|
||||
@@ -122,7 +122,7 @@ public class Pipelines {
|
||||
}
|
||||
return dataStream
|
||||
.transform(opIdentifier("bucket_bulk_insert", conf), TypeInformation.of(Object.class), operatorFactory)
|
||||
.uid("uid_bucket_bulk_insert" + conf.getString(FlinkOptions.TABLE_NAME))
|
||||
.uid("uid_bucket_bulk_insert" + conf.getString(FlinkOptions.PATH))
|
||||
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS))
|
||||
.addSink(DummySink.INSTANCE)
|
||||
.name("dummy");
|
||||
@@ -198,7 +198,7 @@ public class Pipelines {
|
||||
|
||||
return dataStream
|
||||
.transform(opIdentifier("hoodie_append_write", conf), TypeInformation.of(Object.class), operatorFactory)
|
||||
.uid("uid_hoodie_stream_write" + conf.getString(FlinkOptions.TABLE_NAME))
|
||||
.uid("uid_hoodie_stream_write" + conf.getString(FlinkOptions.PATH))
|
||||
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ public class Pipelines {
|
||||
TypeInformation.of(HoodieRecord.class),
|
||||
new BootstrapOperator<>(conf))
|
||||
.setParallelism(conf.getOptional(FlinkOptions.INDEX_BOOTSTRAP_TASKS).orElse(defaultParallelism))
|
||||
.uid("uid_index_bootstrap_" + conf.getString(FlinkOptions.TABLE_NAME));
|
||||
.uid("uid_index_bootstrap_" + conf.getString(FlinkOptions.PATH));
|
||||
}
|
||||
|
||||
return dataStream1;
|
||||
@@ -286,7 +286,7 @@ public class Pipelines {
|
||||
TypeInformation.of(HoodieRecord.class),
|
||||
new BatchBootstrapOperator<>(conf))
|
||||
.setParallelism(conf.getOptional(FlinkOptions.INDEX_BOOTSTRAP_TASKS).orElse(defaultParallelism))
|
||||
.uid("uid_batch_index_bootstrap_" + conf.getString(FlinkOptions.TABLE_NAME));
|
||||
.uid("uid_batch_index_bootstrap_" + conf.getString(FlinkOptions.PATH));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -328,7 +328,7 @@ public class Pipelines {
|
||||
BucketIndexPartitioner<HoodieKey> partitioner = new BucketIndexPartitioner<>(bucketNum, indexKeyFields);
|
||||
return dataStream.partitionCustom(partitioner, HoodieRecord::getKey)
|
||||
.transform(opIdentifier("bucket_write", conf), TypeInformation.of(Object.class), operatorFactory)
|
||||
.uid("uid_bucket_write" + conf.getString(FlinkOptions.TABLE_NAME))
|
||||
.uid("uid_bucket_write" + conf.getString(FlinkOptions.PATH))
|
||||
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
|
||||
} else {
|
||||
WriteOperatorFactory<HoodieRecord> operatorFactory = StreamWriteOperator.getFactory(conf);
|
||||
@@ -339,12 +339,12 @@ public class Pipelines {
|
||||
"bucket_assigner",
|
||||
TypeInformation.of(HoodieRecord.class),
|
||||
new KeyedProcessOperator<>(new BucketAssignFunction<>(conf)))
|
||||
.uid("uid_bucket_assigner_" + conf.getString(FlinkOptions.TABLE_NAME))
|
||||
.uid("uid_bucket_assigner_" + conf.getString(FlinkOptions.PATH))
|
||||
.setParallelism(conf.getOptional(FlinkOptions.BUCKET_ASSIGN_TASKS).orElse(defaultParallelism))
|
||||
// shuffle by fileId(bucket id)
|
||||
.keyBy(record -> record.getCurrentLocation().getFileId())
|
||||
.transform(opIdentifier("stream_write", conf), TypeInformation.of(Object.class), operatorFactory)
|
||||
.uid("uid_stream_write" + conf.getString(FlinkOptions.TABLE_NAME))
|
||||
.uid("uid_stream_write" + conf.getString(FlinkOptions.PATH))
|
||||
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,10 +127,14 @@ public class CompactionUtil {
|
||||
* @param conf The configuration
|
||||
*/
|
||||
public static void inferChangelogMode(Configuration conf, HoodieTableMetaClient metaClient) throws Exception {
|
||||
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
|
||||
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchemaFromDataFile();
|
||||
if (tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null) {
|
||||
conf.setBoolean(FlinkOptions.CHANGELOG_ENABLED, true);
|
||||
try {
|
||||
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
|
||||
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchemaFromDataFile();
|
||||
if (tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null) {
|
||||
conf.setBoolean(FlinkOptions.CHANGELOG_ENABLED, true);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Could not get schema from data file, CHANGELOG_ENABLE is set to default", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-flink-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink1.13.x</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-flink-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink1.14.x</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-flink-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink1.15.x</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-flink-datasource</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
<artifactId>hudi-integ-test</artifactId>
|
||||
|
||||
@@ -19,13 +19,13 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-kafka-connect</artifactId>
|
||||
<description>Kafka Connect Sink Connector for Hudi</description>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark-common_${scala.binary.version}</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark-common_${scala.binary.version}</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark_${scala.binary.version}</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark_${scala.binary.version}</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark2_${scala.binary.version}</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark2_${scala.binary.version}</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark3.1.x_2.12</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark3.1.x_2.12</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
<parent>
|
||||
<artifactId>hudi-spark-datasource</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>hudi-spark3.3.x_2.12</artifactId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
|
||||
<name>hudi-spark3.3.x_2.12</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<parent>
|
||||
<artifactId>hudi</artifactId>
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
4
pom.xml
4
pom.xml
@@ -29,7 +29,7 @@
|
||||
<groupId>org.apache.hudi</groupId>
|
||||
<artifactId>hudi</artifactId>
|
||||
<packaging>pom</packaging>
|
||||
<version>0.12.0-SNAPSHOT</version>
|
||||
<version>0.12.0-eshore-SNAPSHOT</version>
|
||||
<description>Apache Hudi brings stream style processing on big data</description>
|
||||
<url>https://github.com/apache/hudi</url>
|
||||
<name>Hudi</name>
|
||||
@@ -110,7 +110,7 @@
|
||||
<log4j2.version>2.17.2</log4j2.version>
|
||||
<slf4j.version>1.7.30</slf4j.version>
|
||||
<joda.version>2.9.9</joda.version>
|
||||
<hadoop.version>2.10.1</hadoop.version>
|
||||
<hadoop.version>3.1.2</hadoop.version>
|
||||
<hive.groupid>org.apache.hive</hive.groupid>
|
||||
<hive.version>2.3.1</hive.version>
|
||||
<hive.exec.classifier>core</hive.exec.classifier>
|
||||
|
||||
Reference in New Issue
Block a user