public class OHTableClient extends Object implements org.apache.hadoop.hbase.client.Table, Lifecycle
| 构造器和说明 |
|---|
OHTableClient(String tableNameString,
org.apache.hadoop.conf.Configuration conf)
The constructor.
|
| 限定符和类型 | 方法和说明 |
|---|---|
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
<R> void |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder |
checkAndMutate(byte[] row,
byte[] family) |
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutations) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
void |
close()
Closes this resource, relinquishing any underlying resources.
|
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel |
coprocessorService(byte[] row) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
boolean |
exists(org.apache.hadoop.hbase.client.Get get) |
boolean[] |
exists(List<org.apache.hadoop.hbase.client.Get> gets) |
boolean[] |
existsAll(List<org.apache.hadoop.hbase.client.Get> list) |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
org.apache.hadoop.hbase.client.TableDescriptor |
getDescriptor() |
byte[][] |
getEndKeys() |
org.apache.hadoop.hbase.TableName |
getName() |
int |
getOperationTimeout() |
int |
getRpcTimeout() |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
List<org.apache.hadoop.hbase.client.ResultScanner> |
getScanners(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> |
getStartEndKeys() |
byte[][] |
getStartKeys() |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
String |
getTableNameString() |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability) |
void |
init()
Initial the OHTableClient, must be init before use.
|
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
refreshTableEntry(String familyString,
boolean hasTestLoad) |
void |
setOperationTimeout(int i) |
void |
setRpcTimeout(int i) |
void |
setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor) |
void |
setTableNameString(String tableNameString) |
public OHTableClient(String tableNameString, org.apache.hadoop.conf.Configuration conf)
NOTE: Required parameters in conf:
Configuration conf = new Configuration();
conf.set(HBASE_OCEANBASE_PARAM_URL, "http://param_url.com?database=test");
conf.set(HBASE_OCEANBASE_FULL_USER_NAME, "username");
conf.set(HBASE_OCEANBASE_PASSWORD, "password");
conf.set(HBASE_OCEANBASE_SYS_USER_NAME, "sys_user_name");
conf.set(HBASE_OCEANBASE_SYS_PASSWORD, "sys_password");
tableNameString - table nameconf - configurepublic void setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor)
public void init()
throws Exception
public void close()
throws IOException
LifecycleWhile this interface method is declared to throw Exception, implementers are strongly encouraged to
declare concrete implementations of the close method to
throw more specific exceptions, or to throw no exception at all
if the close operation cannot fail.
Cases where the close operation may fail require careful
attention by implementers. It is strongly advised to relinquish
the underlying resources and to internally mark the
resource as closed, prior to throwing the exception. The close method is unlikely to be invoked more than once and so
this ensures that the resources are released in a timely manner.
Furthermore it reduces problems that could arise when the resource
wraps, or is wrapped, by another resource.
Implementers of this interface are also strongly advised
to not have the close method throw InterruptedException.
This exception interacts with a thread's interrupted status,
and runtime misbehavior is likely to occur if an InterruptedException is suppressed.
More generally, if it would cause problems for an
exception to be suppressed, the AutoCloseable.close
method should not throw it.
Note that unlike the close
method of Closeable, this close method
is not required to be idempotent. In other words,
calling this close method more than once may have some
visible side effect, unlike Closeable.close which is
required to have no effect if called more than once.
However, implementers of this interface are strongly encouraged
to make their close methods idempotent.
close 在接口中 Lifecycleclose 在接口中 Closeableclose 在接口中 AutoCloseableclose 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel coprocessorService(byte[] row)
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablepublic <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> Map<byte[],R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
throws com.google.protobuf.ServiceException,
Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.Tablecom.google.protobuf.ServiceExceptionThrowablepublic boolean checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutations)
throws IOException
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder checkAndMutate(byte[] row,
byte[] family)
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.Tablepublic void setOperationTimeout(int i)
setOperationTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getOperationTimeout()
getOperationTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic void setRpcTimeout(int i)
setRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic int getRpcTimeout()
getRpcTimeout 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.TableName getName()
getName 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.conf.Configuration getConfiguration()
getConfiguration 在接口中 org.apache.hadoop.hbase.client.Tablepublic org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
throws IOException
getTableDescriptor 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.TableDescriptor getDescriptor()
throws IOException
getDescriptor 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean[] existsAll(List<org.apache.hadoop.hbase.client.Get> list) throws IOException
existsAll 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean[] exists(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException, InterruptedException
batch 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionInterruptedExceptionpublic <R> void batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
batchCallback 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionInterruptedExceptionpublic org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
get 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic List<org.apache.hadoop.hbase.client.ResultScanner> getScanners(org.apache.hadoop.hbase.client.Scan scan) throws IOException
IOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
put 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
throws IOException
mutateRow 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
append 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
increment 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic String getTableNameString()
public void setTableNameString(String tableNameString)
public void refreshTableEntry(String familyString, boolean hasTestLoad) throws Exception
Exceptionpublic byte[][] getStartKeys()
throws IOException
IOExceptionpublic byte[][] getEndKeys()
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> getStartEndKeys()
throws IOException
IOExceptionCopyright © 2025. All rights reserved.