@InterfaceAudience.Private public class HRegion extends Object implements HeapSize, PropagatingConfigurationObserver, Region
An Region is defined by its table and its key extent.
Locking at the Region level serves only one purpose: preventing the region from being closed (and consequently split) while other operations are ongoing. Each row level operation obtains both a row lock and a region read lock for the duration of the operation. While a scanner is being constructed, getScanner holds a read lock. If the scanner is successfully constructed, it holds a read lock until it is closed. A close takes out a write lock and consequently will block for ongoing operations and will block new operations from starting while the close is in progress.
| Modifier and Type | Class and Description |
|---|---|
static interface |
HRegion.BulkLoadListener
Listener class to enable callers of bulkLoadHFile() to perform any necessary pre/post
processing of a given bulkload call
|
static interface |
HRegion.FlushResult |
static class |
HRegion.FlushResultImpl
Objects from this class are created when flushing to describe all the different states that
that method ends up in.
|
static class |
HRegion.RowLockImpl
Class used to represent a lock on a row.
|
Region.Operation, Region.RowLock| Modifier and Type | Field and Description |
|---|---|
static String |
CLOSE_WAIT_ABORT |
static String |
CLOSE_WAIT_INTERVAL |
static String |
CLOSE_WAIT_TIME |
static String |
COMPACTION_AFTER_BULKLOAD_ENABLE
Parameter name for compaction after bulkload
|
protected org.apache.hadoop.conf.Configuration |
conf |
static long |
DEEP_OVERHEAD |
static int |
DEFAULT_CACHE_FLUSH_INTERVAL
Default interval for the memstore flush
|
static boolean |
DEFAULT_CLOSE_WAIT_ABORT |
static long |
DEFAULT_CLOSE_WAIT_INTERVAL |
static long |
DEFAULT_CLOSE_WAIT_TIME |
static boolean |
DEFAULT_FAIR_REENTRANT_CLOSE_LOCK |
static long |
DEFAULT_FLUSH_PER_CHANGES |
static int |
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE |
static int |
DEFAULT_MAX_CELL_SIZE |
static String |
DEFAULT_REGION_STORAGE_POLICY |
static boolean |
DEFAULT_USE_META_CELL_COMPARATOR |
static boolean |
DEFAULT_WAL_HSYNC |
static String |
FAIR_REENTRANT_CLOSE_LOCK
Conf key for fair locking policy
|
static long |
FIXED_OVERHEAD |
static String |
HBASE_MAX_CELL_SIZE_KEY |
static String |
HBASE_REGIONSERVER_MINIBATCH_SIZE |
protected long |
lastReplayedCompactionSeqId |
protected long |
lastReplayedOpenRegionSeqId
The sequence id of the last replayed open region event from the primary region.
|
static String |
LOAD_CFS_ON_DEMAND_CONFIG_KEY |
static long |
MAX_FLUSH_PER_CHANGES
The following MAX_FLUSH_PER_CHANGES is large enough because each KeyValue has 20+ bytes
overhead.
|
static String |
MEMSTORE_FLUSH_PER_CHANGES
Conf key to force a flush if there are already enough changes for one region in memstore
|
static String |
MEMSTORE_PERIODIC_FLUSH_INTERVAL
Conf key for the periodic flush interval
|
static String |
REGION_STORAGE_POLICY_KEY |
protected RegionServerServices |
rsServices |
static String |
SPECIAL_RECOVERED_EDITS_DIR
This is for for using HRegion as a local storage, where we may put the recovered edits in a
special place.
|
static String |
SPLIT_IGNORE_BLOCKING_ENABLED_KEY
Config for allow split when file count greater than the configured blocking file count
|
protected Map<byte[],HStore> |
stores |
static int |
SYSTEM_CACHE_FLUSH_INTERVAL
Default interval for System tables memstore flush
|
static String |
USE_META_CELL_COMPARATOR
Whether to use
MetaCellComparator even if we are not meta region. |
static String |
WAL_HSYNC_CONF_KEY |
| Constructor and Description |
|---|
HRegion(HRegionFileSystem fs,
WAL wal,
org.apache.hadoop.conf.Configuration confParam,
TableDescriptor htd,
RegionServerServices rsServices)
HRegion constructor.
|
HRegion(org.apache.hadoop.fs.Path tableDir,
WAL wal,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration confParam,
RegionInfo regionInfo,
TableDescriptor htd,
RegionServerServices rsServices)
Deprecated.
Use other constructors.
|
| Modifier and Type | Method and Description |
|---|---|
void |
addReadRequestsCount(long readRequestsCount) |
void |
addRegionToSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription desc,
ForeignExceptionSnare exnSnare)
Complete taking the snapshot on the region.
|
void |
addWriteRequestsCount(long writeRequestsCount) |
Result |
append(Append append)
Perform one or more append operations on a row.
|
Result |
append(Append append,
long nonceGroup,
long nonce) |
boolean |
areWritesEnabled() |
OperationStatus[] |
batchMutate(Mutation[] mutations)
Perform a batch of mutations.
|
OperationStatus[] |
batchMutate(Mutation[] mutations,
boolean atomic,
long nonceGroup,
long nonce) |
OperationStatus[] |
batchReplay(WALSplitUtil.MutationReplay[] mutations,
long replaySeqId) |
void |
blockUpdates() |
Map<byte[],List<org.apache.hadoop.fs.Path>> |
bulkLoadHFiles(Collection<Pair<byte[],String>> familyPaths,
boolean assignSeqId,
HRegion.BulkLoadListener bulkLoadListener)
Attempts to atomically load a group of hfiles.
|
Map<byte[],List<org.apache.hadoop.fs.Path>> |
bulkLoadHFiles(Collection<Pair<byte[],String>> familyPaths,
boolean assignSeqId,
HRegion.BulkLoadListener bulkLoadListener,
boolean copyFile,
List<String> clusterIds,
boolean replicate)
Attempts to atomically load a group of hfiles.
|
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
CompareOperator op,
ByteArrayComparable comparator,
TimeRange timeRange,
Mutation mutation)
Deprecated.
|
boolean |
checkAndMutate(byte[] row,
Filter filter,
TimeRange timeRange,
Mutation mutation)
Deprecated.
|
CheckAndMutateResult |
checkAndMutate(CheckAndMutate checkAndMutate)
Atomically checks if a row matches the conditions and if it does, it performs the actions.
|
CheckAndMutateResult |
checkAndMutate(CheckAndMutate checkAndMutate,
long nonceGroup,
long nonce) |
boolean |
checkAndRowMutate(byte[] row,
byte[] family,
byte[] qualifier,
CompareOperator op,
ByteArrayComparable comparator,
TimeRange timeRange,
RowMutations rm)
Deprecated.
|
boolean |
checkAndRowMutate(byte[] row,
Filter filter,
TimeRange timeRange,
RowMutations rm)
Deprecated.
|
void |
checkFamilies(Collection<byte[]> families)
Check the collection of families for validity.
|
Optional<byte[]> |
checkSplit() |
Optional<byte[]> |
checkSplit(boolean force)
Return the split point.
|
void |
checkTimestamps(Map<byte[],List<Cell>> familyMap,
long now)
Check the collection of families for valid timestamps
|
Map<byte[],List<HStoreFile>> |
close()
Close down this HRegion.
|
Map<byte[],List<HStoreFile>> |
close(boolean abort)
Close down this HRegion.
|
void |
closeRegionOperation()
Closes the region operation lock.
|
void |
closeRegionOperation(Region.Operation operation)
Closes the region operation lock.
|
void |
compact(boolean majorCompaction)
Synchronously compact all stores in the region.
|
boolean |
compact(CompactionContext compaction,
HStore store,
ThroughputController throughputController)
Called by compaction thread and after region is opened to compact the HStores if necessary.
|
boolean |
compact(CompactionContext compaction,
HStore store,
ThroughputController throughputController,
User user) |
void |
compactStores()
This is a helper function that compact all the stores synchronously.
|
static HDFSBlocksDistribution |
computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
TableDescriptor tableDescriptor,
RegionInfo regionInfo)
This is a helper function to compute HDFS block distribution on demand
|
static HDFSBlocksDistribution |
computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
TableDescriptor tableDescriptor,
RegionInfo regionInfo,
org.apache.hadoop.fs.Path tablePath)
This is a helper function to compute HDFS block distribution on demand
|
static HRegion |
createHRegion(org.apache.hadoop.conf.Configuration conf,
RegionInfo regionInfo,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir,
TableDescriptor tableDesc)
Create a region under the given table directory.
|
static HRegion |
createHRegion(RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
TableDescriptor hTableDescriptor,
WAL wal) |
static HRegion |
createHRegion(RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
TableDescriptor hTableDescriptor,
WAL wal,
boolean initialize)
Convenience method creating new HRegions.
|
static HRegion |
createHRegion(RegionInfo info,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.conf.Configuration conf,
TableDescriptor hTableDescriptor,
WAL wal,
boolean initialize,
RegionServerServices rsRpcServices)
Convenience method creating new HRegions.
|
static HRegionFileSystem |
createRegionDir(org.apache.hadoop.conf.Configuration configuration,
RegionInfo ri,
org.apache.hadoop.fs.Path rootDir)
Create the region directory in the filesystem.
|
void |
decrementCompactionsQueuedCount() |
protected void |
decrementFlushesQueuedCount() |
void |
delete(Delete delete)
Deletes the specified cells/row.
|
void |
deregisterChildren(ConfigurationManager manager) |
protected void |
doRegionCompactionPrep()
Do preparation for pending compaction.
|
boolean |
equals(Object o) |
com.google.protobuf.Message |
execService(com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall call)
Executes a single protocol buffer coprocessor endpoint
Service method using the
registered protocol handlers. |
HRegion.FlushResult |
flush(boolean flushAllStores)
Flush the cache.
|
HRegion.FlushResultImpl |
flushcache(List<byte[]> families,
boolean writeFlushRequestWalMarker,
FlushLifeCycleTracker tracker)
Flush the cache.
|
Result |
get(Get get)
Do a get based on the get parameter.
|
List<Cell> |
get(Get get,
boolean withCoprocessor)
Do a get based on the get parameter.
|
BlockCache |
getBlockCache() |
long |
getBlockedRequestsCount()
Returns the number of blocked requests
|
CellComparator |
getCellComparator()
The comparator to be used with the region
|
long |
getCheckAndMutateChecksFailed()
Returns the number of failed checkAndMutate guards
|
long |
getCheckAndMutateChecksPassed()
Returns the number of checkAndMutate guards that passed
|
CompactionState |
getCompactionState()
Returns if a given region is in compaction now.
|
int |
getCompactPriority()
Returns The priority that this region should have in the compaction queue
|
RegionCoprocessorHost |
getCoprocessorHost()
Returns the coprocessor host
|
long |
getDataInMemoryWithoutWAL()
Returns the size of data processed bypassing the WAL, in bytes
|
long |
getEarliestFlushTimeForAllStores() |
org.apache.hadoop.fs.FileSystem |
getFilesystem()
Returns
FileSystem being used by this region |
long |
getFilteredReadRequestsCount()
Returns filtered read requests count for this region
|
HDFSBlocksDistribution |
getHDFSBlocksDistribution() |
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionLoadStats |
getLoadStatistics()
Returns statistics about the current load of the region
|
ConcurrentHashMap<HashedBytes,org.apache.hadoop.hbase.regionserver.HRegion.RowLockContext> |
getLockedRows() |
long |
getMaxFlushedSeqId() |
Map<byte[],Long> |
getMaxStoreSeqId() |
long |
getMemStoreDataSize() |
long |
getMemStoreFlushSize() |
long |
getMemStoreHeapSize() |
long |
getMemStoreOffHeapSize() |
MetricsRegion |
getMetrics() |
MobFileCache |
getMobFileCache() |
MultiVersionConcurrencyControl |
getMVCC() |
protected long |
getNextSequenceId(WAL wal)
Method to safely get the next sequence number.
|
long |
getNumMutationsWithoutWAL()
Returns the number of mutations processed bypassing the WAL
|
long |
getOldestHfileTs(boolean majorCompactionOnly)
This can be used to determine the last time all files of this region were major compacted.
|
long |
getOldestSeqIdOfStore(byte[] familyName) |
long |
getOpenSeqNum()
Returns the latest sequence number that was read from storage when this region was opened
|
int |
getReadLockCount() |
org.apache.hadoop.conf.Configuration |
getReadOnlyConfiguration() |
long |
getReadPoint(IsolationLevel isolationLevel)
Returns readpoint considering given IsolationLevel.
|
long |
getReadRequestsCount()
Returns read requests count for this region
|
static org.apache.hadoop.fs.Path |
getRegionDir(org.apache.hadoop.fs.Path tabledir,
String name)
Deprecated.
For tests only; to be removed.
|
HRegionFileSystem |
getRegionFileSystem()
Returns the
HRegionFileSystem used by this region |
RegionInfo |
getRegionInfo()
Returns region information for this region
|
RegionServicesForStores |
getRegionServicesForStores()
Returns store services for this region, to access services required by store level needs
|
NavigableMap<byte[],Integer> |
getReplicationScope() |
Region.RowLock |
getRowLock(byte[] row)
Get an exclusive ( write lock ) lock on a given row.
|
Region.RowLock |
getRowLock(byte[] row,
boolean readLock)
Get a row lock for the specified row.
|
protected Region.RowLock |
getRowLockInternal(byte[] row,
boolean readLock,
Region.RowLock prevRowLock) |
org.apache.hadoop.hbase.regionserver.RegionScannerImpl |
getScanner(Scan scan)
Return an iterator that scans over the HRegion, returning the indicated columns and rows
specified by the
Scan. |
org.apache.hadoop.hbase.regionserver.RegionScannerImpl |
getScanner(Scan scan,
List<KeyValueScanner> additionalScanners)
Return an iterator that scans over the HRegion, returning the indicated columns and rows
specified by the
Scan. |
long |
getSmallestReadPoint() |
HStore |
getStore(byte[] column)
Return the Store for the given family
|
List<String> |
getStoreFileList(byte[][] columns)
Returns list of store file names for the given families
|
List<HStore> |
getStores()
Return the list of Stores managed by this region
|
TableDescriptor |
getTableDescriptor()
Returns table descriptor for this region
|
WAL |
getWAL()
Returns WAL in use for this region
|
org.apache.hadoop.fs.Path |
getWALRegionDir() |
long |
getWriteRequestsCount()
Returns write request count for this region
|
int |
hashCode() |
boolean |
hasReferences()
Returns True if this region has references.
|
long |
heapSize() |
Result |
increment(Increment increment)
Perform one or more increment operations on a row.
|
Result |
increment(Increment increment,
long nonceGroup,
long nonce) |
void |
incrementCompactionsQueuedCount() |
void |
incrementFlushesQueuedCount() |
long |
initialize()
Deprecated.
use HRegion.createHRegion() or HRegion.openHRegion()
|
protected HStore |
instantiateHStore(ColumnFamilyDescriptor family,
boolean warmup) |
protected org.apache.hadoop.hbase.regionserver.RegionScannerImpl |
instantiateRegionScanner(Scan scan,
List<KeyValueScanner> additionalScanners,
long nonceGroup,
long nonce) |
protected HRegion.FlushResultImpl |
internalFlushcache(WAL wal,
long myseqid,
Collection<HStore> storesToFlush,
MonitoredTask status,
boolean writeFlushWalMarker,
FlushLifeCycleTracker tracker)
Flush the memstore.
|
protected org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult |
internalPrepareFlushCache(WAL wal,
long myseqid,
Collection<HStore> storesToFlush,
MonitoredTask status,
boolean writeFlushWalMarker,
FlushLifeCycleTracker tracker) |
boolean |
isAvailable()
Returns true if region is available (not closed and not closing)
|
boolean |
isClosed()
Returns true if region is closed
|
boolean |
isClosing()
Returns True if closing process has started
|
boolean |
isLoadingCfsOnDemandDefault() |
boolean |
isMergeable()
Returns true if region is mergeable
|
boolean |
isReadOnly()
Returns True if region is read only
|
boolean |
isSplittable()
Returns true if region is splittable
|
Result |
mutateRow(RowMutations rm)
Performs multiple mutations atomically on a single row.
|
Result |
mutateRow(RowMutations rm,
long nonceGroup,
long nonce) |
void |
mutateRowsWithLocks(Collection<Mutation> mutations,
Collection<byte[]> rowsToLock,
long nonceGroup,
long nonce)
Perform atomic (all or none) mutations within the region.
|
static HRegion |
newHRegion(org.apache.hadoop.fs.Path tableDir,
WAL wal,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf,
RegionInfo regionInfo,
TableDescriptor htd,
RegionServerServices rsServices)
A utility method to create new instances of HRegion based on the
HConstants.REGION_IMPL
configuration property. |
void |
onConfigurationChange(org.apache.hadoop.conf.Configuration conf) |
static HRegion |
openHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
RegionInfo info,
TableDescriptor htd,
WAL wal)
Open a Region.
|
static HRegion |
openHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
RegionInfo info,
TableDescriptor htd,
WAL wal,
RegionServerServices rsServices,
CancelableProgressable reporter)
Open a Region.
|
static HRegion |
openHRegion(HRegion other,
CancelableProgressable reporter)
Useful when reopening a closed region (normally for unit tests)
|
static HRegion |
openHRegion(org.apache.hadoop.fs.Path rootDir,
RegionInfo info,
TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf)
Open a Region.
|
static HRegion |
openHRegion(org.apache.hadoop.fs.Path rootDir,
RegionInfo info,
TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf,
RegionServerServices rsServices,
CancelableProgressable reporter)
Open a Region.
|
static Region |
openHRegion(Region other,
CancelableProgressable reporter) |
static HRegion |
openHRegion(RegionInfo info,
TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf)
Open a Region.
|
static HRegion |
openHRegion(RegionInfo info,
TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf,
RegionServerServices rsServices,
CancelableProgressable reporter)
Open a Region.
|
static HRegion |
openHRegionFromTableDir(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir,
RegionInfo info,
TableDescriptor htd,
WAL wal,
RegionServerServices rsServices,
CancelableProgressable reporter)
Open a Region.
|
static HRegion |
openReadOnlyFileSystemHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir,
RegionInfo info,
TableDescriptor htd)
Open a Region on a read-only file-system (like hdfs snapshots)
|
void |
processRowsWithLocks(RowProcessor<?,?> processor)
Performs atomic multiple reads and writes on a given row.
|
void |
processRowsWithLocks(RowProcessor<?,?> processor,
long nonceGroup,
long nonce)
Performs atomic multiple reads and writes on a given row.
|
void |
processRowsWithLocks(RowProcessor<?,?> processor,
long timeout,
long nonceGroup,
long nonce)
Performs atomic multiple reads and writes on a given row.
|
void |
put(Put put)
Puts some data in the table.
|
boolean |
refreshStoreFiles()
Check the region's underlying store files, open the files that have not been opened yet, and
remove the store file readers for store files no longer available.
|
protected boolean |
refreshStoreFiles(boolean force) |
void |
registerChildren(ConfigurationManager manager) |
boolean |
registerService(com.google.protobuf.Service instance)
Registers a new protocol buffer
Service subclass as a coprocessor endpoint to be
available for handling Region#execService(com.google.protobuf.RpcController,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall) calls. |
void |
reportCompactionRequestEnd(boolean isMajor,
int numFiles,
long filesSizeCompacted) |
void |
reportCompactionRequestFailure() |
void |
reportCompactionRequestStart(boolean isMajor) |
void |
requestCompaction(byte[] family,
String why,
int priority,
boolean major,
CompactionLifeCycleTracker tracker)
Request compaction for the given family
|
void |
requestCompaction(String why,
int priority,
boolean major,
CompactionLifeCycleTracker tracker)
Request compaction on this region.
|
void |
requestFlush(FlushLifeCycleTracker tracker)
Request flush on this region.
|
protected void |
restoreEdit(HStore s,
Cell cell,
MemStoreSizing memstoreAccounting)
Used by tests
|
static boolean |
rowIsInRange(RegionInfo info,
byte[] row)
Determines if the specified row is within the row range specified by the specified RegionInfo
|
static boolean |
rowIsInRange(RegionInfo info,
byte[] row,
int offset,
short length) |
void |
setBlockCache(BlockCache blockCache)
Only used for unit test which doesn't start region server.
|
void |
setClosing(boolean closing)
Exposed for some very specific unit tests.
|
void |
setCoprocessorHost(RegionCoprocessorHost coprocessorHost) |
void |
setMobFileCache(MobFileCache mobFileCache)
Only used for unit test which doesn't start region server.
|
void |
setReadsEnabled(boolean readsEnabled) |
void |
setRestoredRegion(boolean restoredRegion) |
void |
setTableDescriptor(TableDescriptor desc) |
void |
setTimeoutForWriteLock(long timeoutForWriteLock)
The
doClose(boolean, org.apache.hadoop.hbase.monitoring.MonitoredTask) will block forever if someone tries proving the dead lock via the
unit test. |
void |
startRegionOperation()
This method needs to be called before any public call that reads or modifies data.
|
void |
startRegionOperation(Region.Operation op)
This method needs to be called before any public call that reads or modifies data.
|
String |
toString() |
void |
unblockUpdates() |
void |
waitForFlushes()
Wait for all current flushes of the region to complete
|
boolean |
waitForFlushes(long timeout)
Wait for all current flushes of the region to complete
|
void |
waitForFlushesAndCompactions()
Wait for all current flushes and compactions of the region to complete
|
static HRegion |
warmupHRegion(RegionInfo info,
TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf,
RegionServerServices rsServices,
CancelableProgressable reporter) |
protected void |
writeRegionOpenMarker(WAL wal,
long openSeqId) |
clone, finalize, getClass, notify, notifyAll, wait, wait, waitcheckAndMutate, checkAndMutate, checkAndRowMutate, checkAndRowMutatepublic static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY
public static final String HBASE_MAX_CELL_SIZE_KEY
public static final int DEFAULT_MAX_CELL_SIZE
public static final String HBASE_REGIONSERVER_MINIBATCH_SIZE
public static final int DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
public static final String WAL_HSYNC_CONF_KEY
public static final boolean DEFAULT_WAL_HSYNC
public static final String COMPACTION_AFTER_BULKLOAD_ENABLE
public static final String SPLIT_IGNORE_BLOCKING_ENABLED_KEY
public static final String REGION_STORAGE_POLICY_KEY
public static final String DEFAULT_REGION_STORAGE_POLICY
public static final String SPECIAL_RECOVERED_EDITS_DIR
public static final String USE_META_CELL_COMPARATOR
MetaCellComparator even if we are not meta region. Used when creating
master local region.public static final boolean DEFAULT_USE_META_CELL_COMPARATOR
protected volatile long lastReplayedOpenRegionSeqId
protected volatile long lastReplayedCompactionSeqId
protected final org.apache.hadoop.conf.Configuration conf
protected RegionServerServices rsServices
public static final String FAIR_REENTRANT_CLOSE_LOCK
public static final boolean DEFAULT_FAIR_REENTRANT_CLOSE_LOCK
public static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL
public static final int DEFAULT_CACHE_FLUSH_INTERVAL
public static final int SYSTEM_CACHE_FLUSH_INTERVAL
public static final String MEMSTORE_FLUSH_PER_CHANGES
public static final long DEFAULT_FLUSH_PER_CHANGES
public static final long MAX_FLUSH_PER_CHANGES
public static final String CLOSE_WAIT_ABORT
public static final boolean DEFAULT_CLOSE_WAIT_ABORT
public static final String CLOSE_WAIT_TIME
public static final long DEFAULT_CLOSE_WAIT_TIME
public static final String CLOSE_WAIT_INTERVAL
public static final long DEFAULT_CLOSE_WAIT_INTERVAL
public static final long FIXED_OVERHEAD
public static final long DEEP_OVERHEAD
@Deprecated public HRegion(org.apache.hadoop.fs.Path tableDir, WAL wal, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration confParam, RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices)
createHRegion(org.apache.hadoop.hbase.client.RegionInfo, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration, org.apache.hadoop.hbase.client.TableDescriptor, org.apache.hadoop.hbase.wal.WAL, boolean) or
openHRegion(org.apache.hadoop.hbase.client.RegionInfo, org.apache.hadoop.hbase.client.TableDescriptor, org.apache.hadoop.hbase.wal.WAL, org.apache.hadoop.conf.Configuration) method.tableDir - qualified path of directory where region should be located, usually the table
directory.wal - The WAL is the outbound log for any updates to the HRegion The wal file is a
logfile from the previous execution that's custom-computed for this HRegion.
The HRegionServer computes and sorts the appropriate wal info for this
HRegion. If there is a previous wal file (implying that the HRegion has been
written-to before), then read it from the supplied path.fs - is the filesystem.confParam - is global configuration settings.regionInfo - - RegionInfo that describes the region is new), then read them from the
supplied path.htd - the table descriptorrsServices - reference to RegionServerServices or nullpublic HRegion(HRegionFileSystem fs, WAL wal, org.apache.hadoop.conf.Configuration confParam, TableDescriptor htd, RegionServerServices rsServices)
createHRegion(org.apache.hadoop.hbase.client.RegionInfo, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration, org.apache.hadoop.hbase.client.TableDescriptor, org.apache.hadoop.hbase.wal.WAL, boolean) or
openHRegion(org.apache.hadoop.hbase.client.RegionInfo, org.apache.hadoop.hbase.client.TableDescriptor, org.apache.hadoop.hbase.wal.WAL, org.apache.hadoop.conf.Configuration) method.fs - is the filesystem.wal - The WAL is the outbound log for any updates to the HRegion The wal file is a
logfile from the previous execution that's custom-computed for this HRegion.
The HRegionServer computes and sorts the appropriate wal info for this
HRegion. If there is a previous wal file (implying that the HRegion has been
written-to before), then read it from the supplied path.confParam - is global configuration settings.htd - the table descriptorrsServices - reference to RegionServerServices or nullpublic void setRestoredRegion(boolean restoredRegion)
public long getSmallestReadPoint()
@Deprecated public long initialize() throws IOException
IOException - eprotected void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException
IOExceptionpublic boolean hasReferences()
public void blockUpdates()
public void unblockUpdates()
public HDFSBlocksDistribution getHDFSBlocksDistribution()
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf, TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException
conf - configurationtableDescriptor - TableDescriptor of the tableregionInfo - encoded name of the regionIOExceptionpublic static HDFSBlocksDistribution computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf, TableDescriptor tableDescriptor, RegionInfo regionInfo, org.apache.hadoop.fs.Path tablePath) throws IOException
conf - configurationtableDescriptor - TableDescriptor of the tableregionInfo - encoded name of the regiontablePath - the table directoryIOExceptionpublic RegionInfo getRegionInfo()
RegiongetRegionInfo in interface Regionpublic long getReadRequestsCount()
RegiongetReadRequestsCount in interface Regionpublic long getFilteredReadRequestsCount()
RegiongetFilteredReadRequestsCount in interface Regionpublic long getWriteRequestsCount()
RegiongetWriteRequestsCount in interface Regionpublic long getMemStoreDataSize()
getMemStoreDataSize in interface Regionpublic long getMemStoreHeapSize()
getMemStoreHeapSize in interface Regionpublic long getMemStoreOffHeapSize()
getMemStoreOffHeapSize in interface Regionpublic RegionServicesForStores getRegionServicesForStores()
public long getNumMutationsWithoutWAL()
RegiongetNumMutationsWithoutWAL in interface Regionpublic long getDataInMemoryWithoutWAL()
RegiongetDataInMemoryWithoutWAL in interface Regionpublic long getBlockedRequestsCount()
RegiongetBlockedRequestsCount in interface Regionpublic long getCheckAndMutateChecksPassed()
RegiongetCheckAndMutateChecksPassed in interface Regionpublic long getCheckAndMutateChecksFailed()
RegiongetCheckAndMutateChecksFailed in interface Regionpublic MetricsRegion getMetrics()
public boolean isClosed()
Regionpublic boolean isClosing()
Regionpublic boolean isReadOnly()
RegionisReadOnly in interface Regionpublic boolean isAvailable()
RegionisAvailable in interface Regionpublic boolean isSplittable()
RegionisSplittable in interface Regionpublic boolean isMergeable()
RegionisMergeable in interface Regionpublic boolean areWritesEnabled()
public MultiVersionConcurrencyControl getMVCC()
public long getMaxFlushedSeqId()
getMaxFlushedSeqId in interface Regionpublic long getReadPoint(IsolationLevel isolationLevel)
null for defaultpublic boolean isLoadingCfsOnDemandDefault()
public Map<byte[],List<HStoreFile>> close() throws IOException
This method could take some time to execute, so don't call it from a time-sensitive thread.
IOException - eDroppedSnapshotException - Thrown when replay of wal is required because a Snapshot was
not properly persisted. The region is put in closing mode, and
the caller MUST abort after this.public Map<byte[],List<HStoreFile>> close(boolean abort) throws IOException
abort - true if server is aborting (only during testing)IOException - eDroppedSnapshotException - Thrown when replay of wal is required because a Snapshot was
not properly persisted. The region is put in closing mode, and
the caller MUST abort after this.public void setClosing(boolean closing)
public void setTimeoutForWriteLock(long timeoutForWriteLock)
doClose(boolean, org.apache.hadoop.hbase.monitoring.MonitoredTask) will block forever if someone tries proving the dead lock via the
unit test. Instead of blocking, the doClose(boolean, org.apache.hadoop.hbase.monitoring.MonitoredTask) will throw exception if you set the
timeout.timeoutForWriteLock - the second time to wait for the write lock in
doClose(boolean, org.apache.hadoop.hbase.monitoring.MonitoredTask)public void waitForFlushesAndCompactions()
public void waitForFlushes()
public boolean waitForFlushes(long timeout)
RegionwaitForFlushes in interface Regiontimeout - The maximum time to wait in milliseconds.public org.apache.hadoop.conf.Configuration getReadOnlyConfiguration()
getReadOnlyConfiguration in interface RegionUnsupportedOperationException
if you try to set a configuration.public TableDescriptor getTableDescriptor()
RegiongetTableDescriptor in interface Regionpublic void setTableDescriptor(TableDescriptor desc)
public WAL getWAL()
public BlockCache getBlockCache()
public void setBlockCache(BlockCache blockCache)
public MobFileCache getMobFileCache()
public void setMobFileCache(MobFileCache mobFileCache)
public org.apache.hadoop.fs.FileSystem getFilesystem()
FileSystem being used by this regionpublic HRegionFileSystem getRegionFileSystem()
HRegionFileSystem used by this regionpublic org.apache.hadoop.fs.Path getWALRegionDir()
throws IOException
IOException - if there is an error getting WALRootDirpublic long getEarliestFlushTimeForAllStores()
getEarliestFlushTimeForAllStores in interface Regionpublic long getOldestHfileTs(boolean majorCompactionOnly)
throws IOException
RegiongetOldestHfileTs in interface RegionmajorCompactionOnly - Only consider HFile that are the result of major compactionIOExceptionprotected void doRegionCompactionPrep()
throws IOException
IOExceptionpublic void compact(boolean majorCompaction)
throws IOException
This operation could block for a long time, so don't call it from a time-sensitive thread.
Note that no locks are taken to prevent possible conflicts between compaction and splitting activities. The regionserver does not normally compact and split in parallel. However by calling this method you may introduce unexpected and unhandled concurrency. Don't do this unless you know what you are doing.
majorCompaction - True to force a major compaction regardless of thresholdsIOExceptionpublic void compactStores()
throws IOException
It is used by utilities and testing
IOExceptionpublic boolean compact(CompactionContext compaction, HStore store, ThroughputController throughputController) throws IOException
This operation could block for a long time, so don't call it from a time-sensitive thread. Note that no locking is necessary at this level because compaction only conflicts with a region split, and that cannot happen because the region server does them sequentially and not in parallel.
compaction - Compaction details, obtained by requestCompaction()IOExceptionpublic boolean compact(CompactionContext compaction, HStore store, ThroughputController throughputController, User user) throws IOException
IOExceptionpublic HRegion.FlushResult flush(boolean flushAllStores) throws IOException
When this method is called the cache will be flushed unless:
This method may block for some time, so it should not be called from a time-sensitive thread.
flushAllStores - whether we want to force a flush of all storesIOException - general io exceptions because a snapshot was not properly persisted.public HRegion.FlushResultImpl flushcache(List<byte[]> families, boolean writeFlushRequestWalMarker, FlushLifeCycleTracker tracker) throws IOException
This method may block for some time, so it should not be called from a time-sensitive thread.
families - stores of region to flush.writeFlushRequestWalMarker - whether to write the flush request marker to WALtracker - used to track the life cycle of this flushIOException - general io exceptionsDroppedSnapshotException - Thrown when replay of wal is required because a Snapshot was
not properly persisted. The region is put in closing mode, and
the caller MUST abort after this.protected HRegion.FlushResultImpl internalFlushcache(WAL wal, long myseqid, Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException
This method may block for some time. Every time you call it, we up the regions sequence id even if we don't flush; i.e. the returned region id will be at least one larger than the last edit applied to this region. The returned id does not refer to an actual edit. The returned id can be used for say installing a bulk loaded file just ahead of the last hfile that was the result of this flush, etc.
wal - Null if we're NOT to go via wal.myseqid - The seqid to use if wal is null writing out flush file.storesToFlush - The list of stores to flush.IOException - general io exceptionsDroppedSnapshotException - Thrown when replay of WAL is required.protected org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult internalPrepareFlushCache(WAL wal, long myseqid, Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException
IOExceptionprotected long getNextSequenceId(WAL wal) throws IOException
IOExceptionpublic org.apache.hadoop.hbase.regionserver.RegionScannerImpl getScanner(Scan scan) throws IOException
RegionScan.
This Iterator must be closed by the caller.
getScanner in interface Regionscan - configured ScanIOException - read exceptionspublic org.apache.hadoop.hbase.regionserver.RegionScannerImpl getScanner(Scan scan, List<KeyValueScanner> additionalScanners) throws IOException
RegionScan. The scanner will also include the additional scanners passed
along with the scanners for the specified Scan instance. Should be careful with the usage to
pass additional scanners only within this Region
This Iterator must be closed by the caller.
getScanner in interface Regionscan - configured ScanadditionalScanners - Any additional scanners to be usedIOException - read exceptionsprotected org.apache.hadoop.hbase.regionserver.RegionScannerImpl instantiateRegionScanner(Scan scan, List<KeyValueScanner> additionalScanners, long nonceGroup, long nonce) throws IOException
IOExceptionpublic void delete(Delete delete) throws IOException
Regiondelete in interface RegionIOExceptionpublic void put(Put put) throws IOException
Regionput in interface RegionIOExceptionpublic OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, long nonce) throws IOException
IOExceptionpublic OperationStatus[] batchMutate(Mutation[] mutations) throws IOException
RegionPlease do not operate on a same column of a single row in a batch, we will not consider the previous operation in the same batch when performing the operations in the batch.
batchMutate in interface Regionmutations - the list of mutationsIOExceptionpublic OperationStatus[] batchReplay(WALSplitUtil.MutationReplay[] mutations, long replaySeqId) throws IOException
IOException@Deprecated public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, TimeRange timeRange, Mutation mutation) throws IOException
RegioncheckAndMutate in interface Regionrow - to checkfamily - column family to checkqualifier - column qualifier to checkop - the comparison operatorcomparator - the expected valuetimeRange - time range to checkmutation - data to put if check succeedsIOException@Deprecated public boolean checkAndMutate(byte[] row, Filter filter, TimeRange timeRange, Mutation mutation) throws IOException
RegioncheckAndMutate in interface Regionrow - to checkfilter - the filtertimeRange - time range to checkmutation - data to put if check succeedsIOException@Deprecated public boolean checkAndRowMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, TimeRange timeRange, RowMutations rm) throws IOException
RegioncheckAndRowMutate in interface Regionrow - to checkfamily - column family to checkqualifier - column qualifier to checkop - the comparison operatorcomparator - the expected valuetimeRange - time range to checkrm - data to put if check succeedsIOException@Deprecated public boolean checkAndRowMutate(byte[] row, Filter filter, TimeRange timeRange, RowMutations rm) throws IOException
RegioncheckAndRowMutate in interface Regionrow - to checkfilter - the filtertimeRange - time range to checkrm - data to put if check succeedsIOExceptionpublic CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException
RegioncheckAndMutate in interface RegioncheckAndMutate - the CheckAndMutate objectIOException - if an error occurred in this methodpublic CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate, long nonceGroup, long nonce) throws IOException
IOExceptionpublic void addRegionToSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription desc, ForeignExceptionSnare exnSnare) throws IOException
ForeignExceptionSnare arg. (In the future other cancellable HRegion methods could
eventually add a ForeignExceptionSnare, or we could do something fancier).desc - snapshot description objectexnSnare - ForeignExceptionSnare that captures external exceptions in case we need to bail
out. This is allowed to be null and will just be ignored in that case.IOException - if there is an external or internal error causing the snapshot to failpublic void setReadsEnabled(boolean readsEnabled)
public void checkFamilies(Collection<byte[]> families) throws NoSuchColumnFamilyException
NoSuchColumnFamilyExceptionpublic void checkTimestamps(Map<byte[],List<Cell>> familyMap, long now) throws FailedSanityCheckException
now - current timestampFailedSanityCheckExceptionpublic boolean refreshStoreFiles()
throws IOException
RegionrefreshStoreFiles in interface RegionIOExceptionprotected boolean refreshStoreFiles(boolean force)
throws IOException
IOExceptionprotected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreAccounting)
s - Store to add edit too.cell - Cell to add.protected HStore instantiateHStore(ColumnFamilyDescriptor family, boolean warmup) throws IOException
IOExceptionpublic HStore getStore(byte[] column)
RegionUse with caution. Exposed for use of fixup utilities.
public List<HStore> getStores()
RegionUse with caution. Exposed for use of fixup utilities.
public List<String> getStoreFileList(byte[][] columns) throws IllegalArgumentException
RegiongetStoreFileList in interface RegionIllegalArgumentExceptionpublic Region.RowLock getRowLock(byte[] row) throws IOException
row - Which row to lock.IOExceptionpublic Region.RowLock getRowLock(byte[] row, boolean readLock) throws IOException
Region
The obtained locks should be released after use by Region.RowLock.release()
NOTE: the boolean passed here has changed. It used to be a boolean that stated whether or not to wait on the lock. Now it is whether it an exclusive lock is requested.
getRowLock in interface Regionrow - The row actions will be performed againstreadLock - is the lock reader or writer. True indicates that a non-exclusive lock is
requestedIOExceptionRegion.startRegionOperation(),
Region.startRegionOperation(Operation)protected Region.RowLock getRowLockInternal(byte[] row, boolean readLock, Region.RowLock prevRowLock) throws IOException
IOExceptionpublic int getReadLockCount()
public ConcurrentHashMap<HashedBytes,org.apache.hadoop.hbase.regionserver.HRegion.RowLockContext> getLockedRows()
public Map<byte[],List<org.apache.hadoop.fs.Path>> bulkLoadHFiles(Collection<Pair<byte[],String>> familyPaths, boolean assignSeqId, HRegion.BulkLoadListener bulkLoadListener) throws IOException
familyPaths - List of Pair<byte[] column family, String hfilePath>bulkLoadListener - Internal hooks enabling massaging/preparation of a file about to be
bulk loadedIOException - if failed unrecoverably.public Map<byte[],List<org.apache.hadoop.fs.Path>> bulkLoadHFiles(Collection<Pair<byte[],String>> familyPaths, boolean assignSeqId, HRegion.BulkLoadListener bulkLoadListener, boolean copyFile, List<String> clusterIds, boolean replicate) throws IOException
familyPaths - List of Pair<byte[] column family, String hfilePath>bulkLoadListener - Internal hooks enabling massaging/preparation of a file about to be
bulk loadedcopyFile - always copy hfiles if trueclusterIds - ids from clusters that had already handled the given bulkload event.IOException - if failed unrecoverably.public static HRegion newHRegion(org.apache.hadoop.fs.Path tableDir, WAL wal, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.conf.Configuration conf, RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices)
HConstants.REGION_IMPL
configuration property.tableDir - qualified path of directory where region should be located, usually the table
directory.wal - The WAL is the outbound log for any updates to the HRegion The wal file is a
logfile from the previous execution that's custom-computed for this HRegion.
The HRegionServer computes and sorts the appropriate wal info for this
HRegion. If there is a previous file (implying that the HRegion has been
written-to before), then read it from the supplied path.fs - is the filesystem.conf - is global configuration settings.regionInfo - - RegionInfo that describes the region is new), then read them from the
supplied path.htd - the table descriptorpublic static HRegion createHRegion(RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, TableDescriptor hTableDescriptor, WAL wal, boolean initialize) throws IOException
info - Info for region to create.rootDir - Root directory for HBase instancewal - shared WALinitialize - - true to initialize the regionIOExceptionpublic static HRegion createHRegion(RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, TableDescriptor hTableDescriptor, WAL wal, boolean initialize, RegionServerServices rsRpcServices) throws IOException
info - Info for region to create.rootDir - Root directory for HBase instancewal - shared WALinitialize - - true to initialize the regionrsRpcServices - An interface we can request flushes against.IOExceptionpublic static HRegion createHRegion(org.apache.hadoop.conf.Configuration conf, RegionInfo regionInfo, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tableDir, TableDescriptor tableDesc) throws IOException
IOExceptionpublic static HRegionFileSystem createRegionDir(org.apache.hadoop.conf.Configuration configuration, RegionInfo ri, org.apache.hadoop.fs.Path rootDir) throws IOException
IOExceptionpublic static HRegion createHRegion(RegionInfo info, org.apache.hadoop.fs.Path rootDir, org.apache.hadoop.conf.Configuration conf, TableDescriptor hTableDescriptor, WAL wal) throws IOException
IOExceptionpublic static HRegion openHRegion(RegionInfo info, TableDescriptor htd, WAL wal, org.apache.hadoop.conf.Configuration conf) throws IOException
info - Info for region to be opened.wal - WAL for region to use. This method will call WAL#setSequenceNumber(long) passing
the result of the call to HRegion#getMinSequenceId() to ensure the wal id is
properly kept up. HRegionStore does this every time it opens a new region.IOExceptionpublic static HRegion openHRegion(RegionInfo info, TableDescriptor htd, WAL wal, org.apache.hadoop.conf.Configuration conf, RegionServerServices rsServices, CancelableProgressable reporter) throws IOException
info - Info for region to be openedhtd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long)
passing the result of the call to HRegion#getMinSequenceId() to ensure the
wal id is properly kept up. HRegionStore does this every time it opens a new
region.conf - The Configuration object to use.rsServices - An interface we can request flushes against.reporter - An interface we can report progress against.IOExceptionpublic static HRegion openHRegion(org.apache.hadoop.fs.Path rootDir, RegionInfo info, TableDescriptor htd, WAL wal, org.apache.hadoop.conf.Configuration conf) throws IOException
rootDir - Root directory for HBase instanceinfo - Info for region to be opened.htd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long) passing
the result of the call to HRegion#getMinSequenceId() to ensure the wal id is
properly kept up. HRegionStore does this every time it opens a new region.conf - The Configuration object to use.IOExceptionpublic static HRegion openHRegion(org.apache.hadoop.fs.Path rootDir, RegionInfo info, TableDescriptor htd, WAL wal, org.apache.hadoop.conf.Configuration conf, RegionServerServices rsServices, CancelableProgressable reporter) throws IOException
rootDir - Root directory for HBase instanceinfo - Info for region to be opened.htd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long)
passing the result of the call to HRegion#getMinSequenceId() to ensure the
wal id is properly kept up. HRegionStore does this every time it opens a new
region.conf - The Configuration object to use.rsServices - An interface we can request flushes against.reporter - An interface we can report progress against.IOExceptionpublic static HRegion openHRegion(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootDir, RegionInfo info, TableDescriptor htd, WAL wal) throws IOException
conf - The Configuration object to use.fs - Filesystem to userootDir - Root directory for HBase instanceinfo - Info for region to be opened.htd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long) passing
the result of the call to HRegion#getMinSequenceId() to ensure the wal id is
properly kept up. HRegionStore does this every time it opens a new region.IOExceptionpublic static HRegion openHRegion(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path rootDir, RegionInfo info, TableDescriptor htd, WAL wal, RegionServerServices rsServices, CancelableProgressable reporter) throws IOException
conf - The Configuration object to use.fs - Filesystem to userootDir - Root directory for HBase instanceinfo - Info for region to be opened.htd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long)
passing the result of the call to HRegion#getMinSequenceId() to ensure the
wal id is properly kept up. HRegionStore does this every time it opens a new
region.rsServices - An interface we can request flushes against.reporter - An interface we can report progress against.IOExceptionpublic static HRegion openHRegionFromTableDir(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tableDir, RegionInfo info, TableDescriptor htd, WAL wal, RegionServerServices rsServices, CancelableProgressable reporter) throws IOException
conf - The Configuration object to use.fs - Filesystem to useinfo - Info for region to be opened.htd - the table descriptorwal - WAL for region to use. This method will call WAL#setSequenceNumber(long)
passing the result of the call to HRegion#getMinSequenceId() to ensure the
wal id is properly kept up. HRegionStore does this every time it opens a new
region.rsServices - An interface we can request flushes against.reporter - An interface we can report progress against.IOExceptionpublic NavigableMap<byte[],Integer> getReplicationScope()
public static HRegion openHRegion(HRegion other, CancelableProgressable reporter) throws IOException
other - original objectreporter - An interface we can report progress against.IOExceptionpublic static Region openHRegion(Region other, CancelableProgressable reporter) throws IOException
IOExceptionpublic static HRegion openReadOnlyFileSystemHRegion(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tableDir, RegionInfo info, TableDescriptor htd) throws IOException
conf - The Configuration object to use.fs - Filesystem to useinfo - Info for region to be opened.htd - the table descriptorIOExceptionpublic static HRegion warmupHRegion(RegionInfo info, TableDescriptor htd, WAL wal, org.apache.hadoop.conf.Configuration conf, RegionServerServices rsServices, CancelableProgressable reporter) throws IOException
IOException@Deprecated public static org.apache.hadoop.fs.Path getRegionDir(org.apache.hadoop.fs.Path tabledir, String name)
tabledir - qualified path for tablename - ENCODED region namepublic static boolean rowIsInRange(RegionInfo info, byte[] row)
info - RegionInfo that specifies the row rangerow - row to be checkedpublic static boolean rowIsInRange(RegionInfo info, byte[] row, int offset, short length)
public Result get(Get get) throws IOException
Regionget in interface Regionget - query parametersIOExceptionpublic List<Cell> get(Get get, boolean withCoprocessor) throws IOException
Regionget in interface Regionget - query parameterswithCoprocessor - invoke coprocessor or not. We don't want to always invoke cp.IOExceptionpublic Result mutateRow(RowMutations rm) throws IOException
RegionmutateRow in interface Regionrm - object that specifies the set of mutations to perform atomicallyIOExceptionpublic Result mutateRow(RowMutations rm, long nonceGroup, long nonce) throws IOException
IOExceptionpublic void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock, long nonceGroup, long nonce) throws IOException
mutateRowsWithLocks in interface Regionmutations - The list of mutations to perform. mutations can contain
operations for multiple rows. Caller has to ensure that all rows are
contained in this region.rowsToLock - Rows to locknonceGroup - Optional nonce group of the operation (client Id)nonce - Optional nonce of the operation (unique random id to ensure "more
idempotence") If multiple rows are locked care should be taken that
rowsToLock is sorted in order to avoid deadlocks.IOExceptionpublic org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionLoadStats getLoadStatistics()
public void processRowsWithLocks(RowProcessor<?,?> processor) throws IOException
RegionprocessRowsWithLocks in interface Regionprocessor - The object defines the reads and writes to a row.IOExceptionpublic void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce) throws IOException
RegionprocessRowsWithLocks in interface Regionprocessor - The object defines the reads and writes to a row.nonceGroup - Optional nonce group of the operation (client Id)nonce - Optional nonce of the operation (unique random id to ensure "more
idempotence")IOExceptionpublic void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce) throws IOException
RegionprocessRowsWithLocks in interface Regionprocessor - The object defines the reads and writes to a row.timeout - The timeout of the processor.process() execution Use a negative number to
switch off the time boundnonceGroup - Optional nonce group of the operation (client Id)nonce - Optional nonce of the operation (unique random id to ensure "more
idempotence")IOExceptionpublic Result append(Append append) throws IOException
Regionappend in interface RegionIOExceptionpublic Result append(Append append, long nonceGroup, long nonce) throws IOException
IOExceptionpublic Result increment(Increment increment) throws IOException
Regionincrement in interface RegionIOExceptionpublic Result increment(Increment increment, long nonceGroup, long nonce) throws IOException
IOExceptionpublic boolean registerService(com.google.protobuf.Service instance)
Service subclass as a coprocessor endpoint to be
available for handling Region#execService(com.google.protobuf.RpcController,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall) calls.
Only a single instance may be registered per region for a given Service subclass (the
instances are keyed on Descriptors.ServiceDescriptor.getFullName().
After the first registration, subsequent calls with the same service name will fail with a
return value of false.
instance - the Service subclass instance to expose as a coprocessor endpointtrue if the registration was successful, false otherwisepublic com.google.protobuf.Message execService(com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall call)
throws IOException
Service method using the
registered protocol handlers. Service implementations must be registered via the
registerService(com.google.protobuf.Service) method before they are available.controller - an RpcContoller implementation to pass to the invoked servicecall - a CoprocessorServiceCall instance identifying the service, method,
and parameters for the method invocationMessage instance containing the method's resultIOException - if no registered service handler is found or an error occurs during the
invocationregisterService(com.google.protobuf.Service)public Optional<byte[]> checkSplit()
public Optional<byte[]> checkSplit(boolean force)
public int getCompactPriority()
public RegionCoprocessorHost getCoprocessorHost()
public void setCoprocessorHost(RegionCoprocessorHost coprocessorHost)
coprocessorHost - the new coprocessor hostpublic void startRegionOperation()
throws IOException
Region
Region.closeRegionOperation() MUST then always be called after the operation has completed,
whether it succeeded or failed.
startRegionOperation in interface RegionIOExceptionpublic void startRegionOperation(Region.Operation op) throws IOException
Region
Region.closeRegionOperation() MUST then always be called after the operation has completed,
whether it succeeded or failed.
startRegionOperation in interface Regionop - The operation is about to be taken on the regionIOExceptionpublic void closeRegionOperation()
throws IOException
RegioncloseRegionOperation in interface RegionIOExceptionpublic void closeRegionOperation(Region.Operation operation) throws IOException
RegionRegion.startRegionOperation(Operation)closeRegionOperation in interface RegionIOExceptionpublic long getOpenSeqNum()
public Map<byte[],Long> getMaxStoreSeqId()
getMaxStoreSeqId in interface Regionpublic long getOldestSeqIdOfStore(byte[] familyName)
public CompactionState getCompactionState()
RegiongetCompactionState in interface Regionpublic void reportCompactionRequestStart(boolean isMajor)
public void reportCompactionRequestEnd(boolean isMajor,
int numFiles,
long filesSizeCompacted)
public void reportCompactionRequestFailure()
public void incrementCompactionsQueuedCount()
public void decrementCompactionsQueuedCount()
public void incrementFlushesQueuedCount()
protected void decrementFlushesQueuedCount()
public void onConfigurationChange(org.apache.hadoop.conf.Configuration conf)
onConfigurationChange in interface ConfigurationObserverpublic void registerChildren(ConfigurationManager manager)
registerChildren in interface PropagatingConfigurationObserverpublic void deregisterChildren(ConfigurationManager manager)
deregisterChildren in interface PropagatingConfigurationObserverpublic CellComparator getCellComparator()
RegiongetCellComparator in interface Regionpublic long getMemStoreFlushSize()
public void requestCompaction(String why, int priority, boolean major, CompactionLifeCycleTracker tracker) throws IOException
RegionrequestCompaction in interface RegionIOExceptionpublic void requestCompaction(byte[] family,
String why,
int priority,
boolean major,
CompactionLifeCycleTracker tracker)
throws IOException
RegionrequestCompaction in interface RegionIOExceptionpublic void requestFlush(FlushLifeCycleTracker tracker) throws IOException
RegionrequestFlush in interface RegionIOExceptionpublic void addReadRequestsCount(long readRequestsCount)
public void addWriteRequestsCount(long writeRequestsCount)
Copyright © 2007–2020 The Apache Software Foundation. All rights reserved.