public class HoodieAppendHandle<T,I,K,O> extends HoodieWriteHandle<T,I,K,O>
| Modifier and Type | Field and Description |
|---|---|
protected long |
estimatedNumberOfBytesWritten |
protected String |
fileId |
protected Map<HoodieLogBlock.HeaderMetadataType,String> |
header |
protected long |
insertRecordsWritten |
protected Iterator<HoodieRecord<T>> |
recordItr |
protected long |
recordsDeleted |
protected long |
recordsWritten |
protected List<WriteStatus> |
statuses |
protected long |
updatedRecordsWritten |
protected HoodieLogFormat.Writer |
writer |
newRecordLocation, partitionPath, recordMerger, schemaOnReadEnabled, taskContextSupplier, timer, writeSchema, writeSchemaWithMetaFields, writeStatus, writeTokenconfig, fs, hoodieTable, instantTime| Constructor and Description |
|---|
HoodieAppendHandle(HoodieWriteConfig config,
String instantTime,
HoodieTable<T,I,K,O> hoodieTable,
String partitionPath,
String fileId,
Iterator<HoodieRecord<T>> recordItr,
TaskContextSupplier taskContextSupplier) |
HoodieAppendHandle(HoodieWriteConfig config,
String instantTime,
HoodieTable<T,I,K,O> hoodieTable,
String partitionPath,
String fileId,
Iterator<HoodieRecord<T>> recordItr,
TaskContextSupplier taskContextSupplier,
Map<HoodieLogBlock.HeaderMetadataType,String> header)
This is used by log compaction only.
|
HoodieAppendHandle(HoodieWriteConfig config,
String instantTime,
HoodieTable<T,I,K,O> hoodieTable,
String partitionPath,
String fileId,
TaskContextSupplier sparkTaskContextSupplier) |
| Modifier and Type | Method and Description |
|---|---|
protected boolean |
addBlockIdentifier() |
protected void |
appendDataAndDeleteBlocks(Map<HoodieLogBlock.HeaderMetadataType,String> header,
boolean appendDeleteBlocks)
Appends data and delete blocks.
|
boolean |
canWrite(HoodieRecord record)
Determines whether we can accept the incoming records, into the current file.
|
List<WriteStatus> |
close() |
void |
doAppend() |
protected void |
doWrite(HoodieRecord record,
org.apache.avro.Schema schema,
TypedProperties props)
Perform the actual writing of the given record into the backing file.
|
IOType |
getIOType() |
protected boolean |
isUpdateRecord(HoodieRecord<T> hoodieRecord)
Returns whether the hoodie record is an UPDATE.
|
protected boolean |
needsUpdateLocation()
Whether there is need to update the record location.
|
void |
write(Map<String,HoodieRecord<T>> recordMap) |
List<WriteStatus> |
writeStatuses() |
createLogWriter, createLogWriter, createLogWriter, createMarkerFile, getAttemptId, getConfig, getFileId, getFileSystem, getHoodieTableMetaClient, getPartitionId, getPartitionPath, getStageId, getWriterSchema, getWriterSchemaWithMetaFields, isClosed, makeNewFilePath, makeNewPath, markClosed, toAvroRecord, writeprotected final String fileId
protected Iterator<HoodieRecord<T>> recordItr
protected HoodieLogFormat.Writer writer
protected final List<WriteStatus> statuses
protected long recordsWritten
protected long recordsDeleted
protected long updatedRecordsWritten
protected long insertRecordsWritten
protected long estimatedNumberOfBytesWritten
protected final Map<HoodieLogBlock.HeaderMetadataType,String> header
public HoodieAppendHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T,I,K,O> hoodieTable, String partitionPath, String fileId, Iterator<HoodieRecord<T>> recordItr, TaskContextSupplier taskContextSupplier, Map<HoodieLogBlock.HeaderMetadataType,String> header)
public HoodieAppendHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T,I,K,O> hoodieTable, String partitionPath, String fileId, Iterator<HoodieRecord<T>> recordItr, TaskContextSupplier taskContextSupplier)
public HoodieAppendHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T,I,K,O> hoodieTable, String partitionPath, String fileId, TaskContextSupplier sparkTaskContextSupplier)
protected boolean isUpdateRecord(HoodieRecord<T> hoodieRecord)
public void doAppend()
protected void appendDataAndDeleteBlocks(Map<HoodieLogBlock.HeaderMetadataType,String> header, boolean appendDeleteBlocks)
public boolean canWrite(HoodieRecord record)
HoodieWriteHandle- Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max file size
protected void doWrite(HoodieRecord record, org.apache.avro.Schema schema, TypedProperties props)
HoodieWriteHandlepublic List<WriteStatus> close()
public void write(Map<String,HoodieRecord<T>> recordMap)
public List<WriteStatus> writeStatuses()
writeStatuses in class HoodieWriteHandle<T,I,K,O>protected boolean needsUpdateLocation()
protected boolean addBlockIdentifier()
Copyright © 2023 The Apache Software Foundation. All rights reserved.