1
0

[HUDI-379] Refactor the codes based on new JavadocStyle code style rule (#1079)

This commit is contained in:
lamber-ken
2019-12-06 12:59:28 +08:00
committed by leesf
parent c06d89b648
commit 2745b7552f
137 changed files with 434 additions and 433 deletions

View File

@@ -160,7 +160,7 @@ public class HiveSyncTool {
/**
* Syncs the list of storage parititions passed in (checks if the partition is in hive, if not adds it or if the
* partition path does not match, it updates the partition path)
* partition path does not match, it updates the partition path).
*/
private void syncPartitions(List<String> writtenPartitionsSince) {
try {

View File

@@ -132,7 +132,7 @@ public class HoodieHiveClient {
}
/**
* Add the (NEW) partitons to the table
* Add the (NEW) partitons to the table.
*/
void addPartitionsToTable(List<String> partitionsToAdd) {
if (partitionsToAdd.isEmpty()) {
@@ -145,7 +145,7 @@ public class HoodieHiveClient {
}
/**
* Partition path has changed - update the path for te following partitions
* Partition path has changed - update the path for te following partitions.
*/
void updatePartitionsToTable(List<String> changedPartitions) {
if (changedPartitions.isEmpty()) {
@@ -172,7 +172,7 @@ public class HoodieHiveClient {
}
/**
* Generate Hive Partition from partition values
* Generate Hive Partition from partition values.
*
* @param partition Partition path
* @return
@@ -241,7 +241,7 @@ public class HoodieHiveClient {
}
/**
* Scan table partitions
* Scan table partitions.
*/
public List<Partition> scanTablePartitions() throws TException {
return client.listPartitions(syncConfig.databaseName, syncConfig.tableName, (short) -1);
@@ -274,7 +274,7 @@ public class HoodieHiveClient {
}
/**
* Get the table schema
* Get the table schema.
*/
public Map<String, String> getTableSchema() {
if (syncConfig.useJdbc) {
@@ -428,7 +428,7 @@ public class HoodieHiveClient {
}
/**
* Read the schema from the log file on path
* Read the schema from the log file on path.
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
private MessageType readSchemaFromLogFile(Option<HoodieInstant> lastCompactionCommitOpt, Path path)
@@ -443,7 +443,7 @@ public class HoodieHiveClient {
}
/**
* Read the parquet schema from a parquet File
* Read the parquet schema from a parquet File.
*/
private MessageType readSchemaFromDataFile(Path parquetFilePath) throws IOException {
LOG.info("Reading schema from " + parquetFilePath);
@@ -468,7 +468,7 @@ public class HoodieHiveClient {
}
/**
* Execute a update in hive metastore with this SQL
* Execute a update in hive metastore with this SQL.
*
* @param s SQL to execute
*/
@@ -490,7 +490,7 @@ public class HoodieHiveClient {
}
/**
* Execute a update in hive using Hive Driver
* Execute a update in hive using Hive Driver.
*
* @param sql SQL statement to execute
*/
@@ -663,7 +663,7 @@ public class HoodieHiveClient {
}
/**
* Partition Event captures any partition that needs to be added or updated
* Partition Event captures any partition that needs to be added or updated.
*/
static class PartitionEvent {

View File

@@ -22,7 +22,7 @@ import java.util.ArrayList;
import java.util.List;
/**
* Extractor for Non-partitioned hive tables
* Extractor for Non-partitioned hive tables.
*/
public class NonPartitionedExtractor implements PartitionValueExtractor {

View File

@@ -29,7 +29,7 @@ import java.util.List;
import java.util.Map;
/**
* Represents the schema difference between the storage schema and hive table schema
* Represents the schema difference between the storage schema and hive table schema.
*/
public class SchemaDifference {

View File

@@ -49,14 +49,14 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
* Schema Utilities
* Schema Utilities.
*/
public class SchemaUtil {
private static final Logger LOG = LogManager.getLogger(SchemaUtil.class);
/**
* Get the schema difference between the storage schema and hive table schema
* Get the schema difference between the storage schema and hive table schema.
*/
public static SchemaDifference getSchemaDifference(MessageType storageSchema, Map<String, String> tableSchema,
List<String> partitionKeys) {
@@ -135,7 +135,7 @@ public class SchemaUtil {
}
/**
* Returns equivalent Hive table schema read from a parquet file
* Returns equivalent Hive table schema read from a parquet file.
*
* @param messageType : Parquet Schema
* @return : Hive Table schema read from parquet file MAP[String,String]
@@ -158,7 +158,7 @@ public class SchemaUtil {
}
/**
* Convert one field data type of parquet schema into an equivalent Hive schema
* Convert one field data type of parquet schema into an equivalent Hive schema.
*
* @param parquetType : Single paruet field
* @return : Equivalent sHive schema
@@ -272,7 +272,7 @@ public class SchemaUtil {
}
/**
* Return a 'struct' Hive schema from a list of Parquet fields
* Return a 'struct' Hive schema from a list of Parquet fields.
*
* @param parquetFields : list of parquet fields
* @return : Equivalent 'struct' Hive schema
@@ -324,14 +324,14 @@ public class SchemaUtil {
}
/**
* Create a 'Map' schema from Parquet map field
* Create a 'Map' schema from Parquet map field.
*/
private static String createHiveMap(String keyType, String valueType) {
return "MAP< " + keyType + ", " + valueType + ">";
}
/**
* Create an Array Hive schema from equivalent parquet list type
* Create an Array Hive schema from equivalent parquet list type.
*/
private static String createHiveArray(Type elementType, String elementName) {
StringBuilder array = new StringBuilder();
@@ -425,7 +425,7 @@ public class SchemaUtil {
}
/**
* Read the schema from the log file on path
* Read the schema from the log file on path.
*
* @return
*/

View File

@@ -74,7 +74,7 @@ public class TestHiveSyncTool {
}
/**
* Testing converting array types to Hive field declaration strings, according to the Parquet-113 spec:
* Testing converting array types to Hive field declaration strings. According to the Parquet-113 spec:
* https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists
*/
@Test

View File

@@ -64,7 +64,7 @@ public class HiveTestService {
private static final int CONNECTION_TIMEOUT = 30000;
/**
* Configuration settings
* Configuration settings.
*/
private Configuration hadoopConf;
private String workDir;