| org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(Configuration, Class>...)
|
| org.apache.hadoop.hbase.client.HTableWrapper.batch(List extends Row>)
|
| org.apache.hadoop.hbase.client.HTableWrapper.batchCallback(List extends Row>, Batch.Callback)
|
| org.apache.hadoop.hbase.regionserver.HRegion.bulkLoadHFiles(Collection>, boolean, Region.BulkLoadListener) |
| org.apache.hadoop.hbase.regionserver.Region.bulkLoadHFiles(Collection>, boolean, Region.BulkLoadListener)
Do not use, see HBASE-22380
|
| org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.calculateRebalancedSplits(List, JobContext, long) |
| org.apache.hadoop.hbase.ipc.RpcServerInterface.call(BlockingService, Descriptors.MethodDescriptor, Message, CellScanner, long, MonitoredRPCHandler)
As of release 1.3, this will be removed in HBase 3.0
|
| org.apache.hadoop.hbase.regionserver.Store.compact(CompactionContext, ThroughputController)
see compact(CompactionContext, ThroughputController, User)
|
| org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(Job, HTable)
|
| org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String) |
| org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.create(Configuration, PriorityFunction)
since 1.0.0.
|
| org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.create(Configuration, PriorityFunction)
since 1.0.0.
|
| org.apache.hadoop.hbase.regionserver.FifoRpcSchedulerFactory.create(Configuration, PriorityFunction) |
| org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.doBulkLoad(Path, HTable)
|
| org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.execute(Server, RegionServerServices)
use #execute(Server, RegionServerServices, User)
|
| org.apache.hadoop.hbase.regionserver.SplitTransaction.execute(Server, RegionServerServices)
use #execute(Server, RegionServerServices, User)
|
| org.apache.hadoop.hbase.client.HTableWrapper.exists(List) |
| org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.getAuths(byte[], boolean) |
| org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.getAuths(byte[], boolean)
|
| org.apache.hadoop.hbase.wal.WAL.getEarliestMemstoreSeqNum(byte[])
|
| org.apache.hadoop.hbase.TagRewriteCell.getFamily() |
| org.apache.hadoop.hbase.mapred.TableInputFormatBase.getHTable()
|
| org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getHTable()
|
| org.apache.hadoop.hbase.TagRewriteCell.getMvccVersion() |
| org.apache.hadoop.hbase.regionserver.Region.getOldestSeqIdOfStore(byte[])
Since version 1.2.0. Exposes too much about our internals; shutting it down.
Do not use.
|
| org.apache.hadoop.hbase.http.HttpServer.getPort() |
| org.apache.hadoop.hbase.http.InfoServer.getPort() |
| org.apache.hadoop.hbase.TagRewriteCell.getQualifier() |
| org.apache.hadoop.hbase.TagRewriteCell.getRow() |
| org.apache.hadoop.hbase.client.HTableWrapper.getRowOrBefore(byte[], byte[]) |
| org.apache.hadoop.hbase.client.HTableWrapper.getRpcTimeout() |
| org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean)
Do not write further code which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level concepts.
|
| org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean)
Do not write further code which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level concepts.
|
| org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplitKey(byte[], byte[], boolean) |
| org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[]) |
| org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[]) |
| org.apache.hadoop.hbase.TagRewriteCell.getValue() |
| org.apache.hadoop.hbase.util.FSUtils.getWrongWALRegionDir(Configuration, TableName, String)
For compatibility, will be removed in 4.0.0.
|
| org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.havingSystemAuth(byte[]) |
| org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.havingSystemAuth(byte[])
|
| org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job, String)
|
| org.apache.hadoop.hbase.regionserver.HRegion.initialize()
use HRegion.createHRegion() or HRegion.openHRegion()
|
| org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor.initializeQueues(int, Class extends BlockingQueue>, Object...) |
| org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus) |
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext, Store, StoreFile)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext)
|
| org.apache.hadoop.hbase.master.MasterCoprocessorHost.postGetTableDescriptors(List) |
| org.apache.hadoop.hbase.coprocessor.MasterObserver.postGetTableDescriptors(ObserverContext, List)
Use postGetTableDescriptors with regex instead.
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean, long)
This hook is no longer called by the RegionServer
|
| org.apache.hadoop.hbase.regionserver.RegionServerServices.postOpenDeployTasks(Region)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext, Region, Region)
Use postCompleteSplit() instead
|
| org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postWALRestore(HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.postWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.coprocessor.WALObserver.postWALWrite(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List extends KeyValueScanner>, ScanType, long, InternalScanner)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List extends KeyValueScanner>, ScanType, long, InternalScanner, CompactionRequest)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext, Store, List)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlushScannerOpen(ObserverContext, Store, KeyValueScanner, InternalScanner)
|
| org.apache.hadoop.hbase.master.MasterCoprocessorHost.preGetTableDescriptors(List, List) |
| org.apache.hadoop.hbase.coprocessor.MasterObserver.preGetTableDescriptors(ObserverContext, List, List)
Use preGetTableDescriptors with regex instead.
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean)
This hook is no longer called by the RegionServer
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext)
Use preSplit(
final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow)
|
| org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preWALRestore(HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.coprocessor.RegionObserver.preWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.coprocessor.WALObserver.preWALWrite(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
| org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
Writables are going away. Use the pb serialization methods instead.
Remove in a release after 0.96 goes out. This is here only to migrate
old Reference files written with Writables before 0.96.
|
| org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, HRegionInfo...)
|
| org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, long, HRegionInfo...)
|
| org.apache.hadoop.hbase.regionserver.Store.requestCompaction(int, CompactionRequest)
see requestCompaction(int, CompactionRequest, User)
|
| org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[]) |
| org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[], int, int) |
| org.apache.hadoop.hbase.master.snapshot.SnapshotManager.restoreSnapshot(HBaseProtos.SnapshotDescription) |
| org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS(InetAddress)
mistakenly made public in 0.98.7. scope will change to package-private
|
| org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.rollback(Server, RegionServerServices)
use #rollback(Server, RegionServerServices, User)
|
| org.apache.hadoop.hbase.regionserver.SplitTransaction.rollback(Server, RegionServerServices)
use #rollback(Server, RegionServerServices, User)
|
| org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[]) |
| org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[], int, int) |
| org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[]) |
| org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[], int, int) |
| org.apache.hadoop.hbase.http.HttpServer.Builder.setBindAddress(String) |
| org.apache.hadoop.hbase.mapred.TableInputFormatBase.setHTable(HTable)
|
| org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.setHTable(HTable)
|
| org.apache.hadoop.hbase.mapreduce.TableRecordReader.setHTable(Table)
Use setTable() instead.
|
| org.apache.hadoop.hbase.http.HttpServer.Builder.setName(String) |
| org.apache.hadoop.hbase.http.HttpServer.Builder.setPort(int) |
| org.apache.hadoop.hbase.client.HTableWrapper.setRpcTimeout(int) |
| org.apache.hadoop.hbase.regionserver.RegionSplitPolicy.skipStoreFileRangeCheck()
|
| org.apache.hadoop.hbase.regionserver.RegionMergeTransactionImpl.stepsAfterPONR(Server, RegionServerServices, HRegion) |
| org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.stepsAfterPONR(Server, RegionServerServices, PairOfSameType) |
| org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
|
| org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput)
Don't use these Writables methods. Use PB instead.
|