1
0

[HUDI-1719] hive on spark/mr,Incremental query of the mor table, the partition field is incorrect (#2720)

This commit is contained in:
xiarixiaoyao
2021-05-20 23:00:08 +08:00
committed by GitHub
parent 928b09ea0b
commit 081061e14b
5 changed files with 116 additions and 0 deletions

View File

@@ -147,4 +147,12 @@ public abstract class AbstractRealtimeRecordReader {
public Schema getHiveSchema() {
return hiveSchema;
}
public RealtimeSplit getSplit() {
return split;
}
public JobConf getJobConf() {
return jobConf;
}
}

View File

@@ -22,6 +22,7 @@ import org.apache.hudi.common.util.ValidationUtils;
import org.apache.hudi.hadoop.hive.HoodieCombineRealtimeFileSplit;
import org.apache.hudi.hadoop.utils.HoodieRealtimeRecordReaderUtils;
import org.apache.hadoop.hive.ql.io.IOContextMap;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.InputSplit;
@@ -71,6 +72,9 @@ public class HoodieCombineRealtimeRecordReader implements RecordReader<NullWrita
} else if (recordReaders.size() > 0) {
this.currentRecordReader.close();
this.currentRecordReader = recordReaders.remove(0);
AbstractRealtimeRecordReader reader = (AbstractRealtimeRecordReader)currentRecordReader.getReader();
// when switch reader, ioctx should be updated
IOContextMap.get(reader.getJobConf()).setInputPath(reader.getSplit().getPath());
return next(key, value);
} else {
return false;

View File

@@ -103,4 +103,8 @@ public class HoodieRealtimeRecordReader implements RecordReader<NullWritable, Ar
public float getProgress() throws IOException {
return this.reader.getProgress();
}
public RecordReader<NullWritable, ArrayWritable> getReader() {
return this.reader;
}
}