public class OHTable extends Object implements org.apache.hadoop.hbase.client.Table
| 限定符和类型 | 类和说明 |
|---|---|
static class |
OHTable.OHOpType |
| 构造器和说明 |
|---|
OHTable(byte[] tableName,
com.alipay.oceanbase.rpc.ObTableClient obTableClient,
ExecutorService executePool)
Creates an object to access a HBase table.
|
OHTable(org.apache.hadoop.conf.Configuration configuration,
byte[] tableName)
Creates an object to access a HBase table.
|
OHTable(org.apache.hadoop.conf.Configuration configuration,
byte[] tableName,
ExecutorService executePool)
Creates an object to access a HBase table.
|
OHTable(org.apache.hadoop.conf.Configuration configuration,
String tableName)
Creates an object to access a HBase table.
|
OHTable(org.apache.hadoop.hbase.client.Connection connection,
ObTableBuilderBase builder,
OHConnectionConfiguration connectionConfig,
ExecutorService executePool) |
OHTable(org.apache.hadoop.hbase.TableName tableName,
org.apache.hadoop.hbase.client.Connection connection,
OHConnectionConfiguration connectionConfig,
ExecutorService executePool) |
| 限定符和类型 | 方法和说明 |
|---|---|
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append)
例如 key = "key", c1="a",在c1后面append,使c1="aaa"
原子操作
|
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
<R> void |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperation |
buildObTableBatchOperation(List<org.apache.hadoop.hbase.Cell> keyValueList,
boolean putToAppend,
List<byte[]> qualifiers) |
static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperationRequest |
buildObTableBatchOperationRequest(com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperation obTableBatchOperation,
String targetTableName) |
static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableOperation |
buildObTableOperation(org.apache.hadoop.hbase.Cell kv,
boolean putToAppend) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
例如当 key="key001", family = "family", c1="a" 时,才执行 delete 操作,该命令是原子的
|
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder |
checkAndMutate(byte[] row,
byte[] family) |
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations rowMutations) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
例如当 key="key001", family = "family", c1="a" 时,才执行 put 操作,该命令是原子的
|
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
static void |
checkFamilyViolation(Collection<byte[]> families,
boolean check_empty_family) |
static void |
checkFamilyViolationForOneFamily(Collection<byte[]> families) |
void |
close() |
static int |
compareByteArray(byte[] bt1,
byte[] bt2) |
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel |
coprocessorService(byte[] row) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
static ThreadPoolExecutor |
createDefaultThreadPoolExecutor(int coreSize,
int maxThreads,
long keepAliveTime)
创建默认的线程池
Using the "direct handoff" approach, new threads will only be created
if it is necessary and will grow unbounded.
|
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
boolean |
exists(org.apache.hadoop.hbase.client.Get get)
Test for the existence of columns in the table, as specified in the Get.
|
boolean[] |
exists(List<org.apache.hadoop.hbase.client.Get> gets) |
boolean[] |
existsAll(List<org.apache.hadoop.hbase.client.Get> gets) |
void |
flushCommits() |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
static OHTable.OHOpType |
getDeleteType(org.apache.hadoop.hbase.Cell.Type type) |
org.apache.hadoop.hbase.client.TableDescriptor |
getDescriptor() |
byte[][] |
getEndKeys() |
org.apache.hadoop.hbase.TableName |
getName() |
int |
getOperationTimeout() |
int |
getReadRpcTimeout() |
int |
getRpcTimeout() |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
List<org.apache.hadoop.hbase.client.ResultScanner> |
getScanners(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> |
getStartEndKeys() |
byte[][] |
getStartKeys() |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
static String |
getTargetTableName(String tableNameString,
String familyString,
org.apache.hadoop.conf.Configuration conf) |
int |
getWriteRpcTimeout() |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment)
例如 key = "key", c2=1,在c2后面increment,在c2后面加2,变成 c2=3
原子操作
|
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
直接通过 column 名进行 increment 操作
|
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability) |
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
refreshTableEntry(String familyString,
boolean hasTestLoad) |
void |
setOperationTimeout(int operationTimeout) |
void |
setRpcTimeout(int rpcTimeout) |
void |
setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor) |
public OHTable(org.apache.hadoop.conf.Configuration configuration,
String tableName)
throws IOException
configuration instance. Uses already-populated
region cache if one is available, populated by any other OHTable instances
sharing this configuration instance. Recommended.configuration - Configuration object to use.tableName - Name of the table.IllegalArgumentException - if the param errorIOException - if a remote or network exception occurspublic OHTable(org.apache.hadoop.conf.Configuration configuration,
byte[] tableName)
throws IOException
configuration instance. Uses already-populated
region cache if one is available, populated by any other OHTable instances
sharing this configuration instance. Recommended.configuration - Configuration object to use.tableName - Name of the table.IOException - if a remote or network exception occursIllegalArgumentException - if the param errorpublic OHTable(org.apache.hadoop.conf.Configuration configuration,
byte[] tableName,
ExecutorService executePool)
throws IOException
configuration instance. Uses already-populated
region cache if one is available, populated by any other OHTable instances
sharing this configuration instance.
Use this constructor when the ExecutorService is externally managed.configuration - Configuration object to use.tableName - Name of the table.executePool - ExecutorService to be used.IOException - if a remote or network exception occursIllegalArgumentException - if the param error@InterfaceAudience.Private
public OHTable(byte[] tableName,
com.alipay.oceanbase.rpc.ObTableClient obTableClient,
ExecutorService executePool)
connection instance.
Use this constructor when the ExecutorService and HConnection instance are
externally managed.tableName - Name of the table.obTableClient - Oceanbase obTableClient to be used.executePool - ExecutorService to be used.IllegalArgumentException - if the param errorpublic OHTable(org.apache.hadoop.hbase.TableName tableName,
org.apache.hadoop.hbase.client.Connection connection,
OHConnectionConfiguration connectionConfig,
ExecutorService executePool)
throws IOException
IOExceptionpublic OHTable(org.apache.hadoop.hbase.client.Connection connection,
ObTableBuilderBase builder,
OHConnectionConfiguration connectionConfig,
ExecutorService executePool)
throws IOException
IOException@InterfaceAudience.Private public static ThreadPoolExecutor createDefaultThreadPoolExecutor(int coreSize, int maxThreads, long keepAliveTime)
coreSize - core sizemaxThreads - max threadskeepAliveTime - keep alive timepublic org.apache.hadoop.hbase.TableName getName()
getName 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.conf.Configuration getConfiguration()
getConfiguration 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
getTableDescriptor 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.client.TableDescriptor getDescriptor()
throws IOException
getDescriptor 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.Tableget - the GetIOException - epublic boolean[] existsAll(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
existsAll 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean[] exists(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException
batch 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic <R> void batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
batchCallback 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionInterruptedExceptionpublic static int compareByteArray(byte[] bt1,
byte[] bt2)
public org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic List<org.apache.hadoop.hbase.client.ResultScanner> getScanners(org.apache.hadoop.hbase.client.Scan scan) throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.Tablerow - rowfamily - familyqualifier - qualifiervalue - valueput - putIOException - if failedpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.Tablerow - rowfamily - familyqualifier - qualifiervalue - valuedelete - deleteIOException - if failedpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations rowMutations)
throws IOException
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder checkAndMutate(byte[] row,
byte[] family)
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.Tablepublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
mutateRow 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
append 在接口中 org.apache.hadoop.hbase.client.Tableappend - appendIOException - if failedpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
increment 在接口中 org.apache.hadoop.hbase.client.Tableincrement - incrementIOException - if failedpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.Tablerow - rowfamily - familyqualifier - qualifieramount - amountIOException - if failedpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void flushCommits()
throws IOException
IOExceptionpublic void close()
throws IOException
close 在接口中 Closeableclose 在接口中 AutoCloseableclose 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel coprocessorService(byte[] row)
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablepublic <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> Map<byte[],R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
throws com.google.protobuf.ServiceException,
Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic void setOperationTimeout(int operationTimeout)
setOperationTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getOperationTimeout()
getOperationTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic void setRpcTimeout(int rpcTimeout)
setRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getRpcTimeout()
getRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getReadRpcTimeout()
getReadRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getWriteRpcTimeout()
getWriteRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic void setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor)
public static String getTargetTableName(String tableNameString, String familyString, org.apache.hadoop.conf.Configuration conf)
public static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperation buildObTableBatchOperation(List<org.apache.hadoop.hbase.Cell> keyValueList, boolean putToAppend, List<byte[]> qualifiers)
public static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableOperation buildObTableOperation(org.apache.hadoop.hbase.Cell kv,
boolean putToAppend)
public static com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperationRequest buildObTableBatchOperationRequest(com.alipay.oceanbase.rpc.protocol.payload.impl.execute.ObTableBatchOperation obTableBatchOperation,
String targetTableName)
public static void checkFamilyViolation(Collection<byte[]> families, boolean check_empty_family)
public static void checkFamilyViolationForOneFamily(Collection<byte[]> families)
public void refreshTableEntry(String familyString, boolean hasTestLoad) throws Exception
Exceptionpublic byte[][] getStartKeys()
throws IOException
IOExceptionpublic byte[][] getEndKeys()
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> getStartEndKeys()
throws IOException
IOExceptionpublic static OHTable.OHOpType getDeleteType(org.apache.hadoop.hbase.Cell.Type type)
Copyright © 2025. All rights reserved.