public class OHTableClient extends Object implements org.apache.hadoop.hbase.client.HTableInterface, Lifecycle
| 构造器和说明 |
|---|
OHTableClient(String tableNameString,
org.apache.hadoop.conf.Configuration conf)
The constructor.
|
| 限定符和类型 | 方法和说明 |
|---|---|
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append) |
Object[] |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
void |
close()
Closes this resource, relinquishing any underlying resources.
|
<T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol,R> |
coprocessorExec(Class<T> protocol,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) |
<T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol,R> |
coprocessorExec(Class<T> protocol,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol> |
coprocessorProxy(Class<T> protocol,
byte[] row) |
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
boolean |
exists(org.apache.hadoop.hbase.client.Get get) |
void |
flushCommits() |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
byte[][] |
getEndKeys() |
org.apache.hadoop.hbase.client.Result |
getRowOrBefore(byte[] row,
byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> |
getStartEndKeys() |
byte[][] |
getStartKeys() |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
byte[] |
getTableName() |
String |
getTableNameString() |
long |
getWriteBufferSize() |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
boolean writeToWAL) |
void |
init()
Initial the OHTableClient, must be init before use.
|
boolean |
isAutoFlush() |
org.apache.hadoop.hbase.client.RowLock |
lockRow(byte[] row) |
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
refreshTableEntry(String familyString,
boolean hasTestLoad) |
void |
setAutoFlush(boolean autoFlush) |
void |
setAutoFlush(boolean autoFlush,
boolean clearBufferOnFail) |
void |
setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor) |
void |
setTableNameString(String tableNameString) |
void |
setWriteBufferSize(long writeBufferSize) |
void |
unlockRow(org.apache.hadoop.hbase.client.RowLock rl) |
public OHTableClient(String tableNameString, org.apache.hadoop.conf.Configuration conf)
NOTE: Required parameters in conf:
Configuration conf = new Configuration();
conf.set(HBASE_OCEANBASE_PARAM_URL, "http://param_url.com?database=test");
conf.set(HBASE_OCEANBASE_FULL_USER_NAME, "username");
conf.set(HBASE_OCEANBASE_PASSWORD, "password");
conf.set(HBASE_OCEANBASE_SYS_USER_NAME, "sys_user_name");
conf.set(HBASE_OCEANBASE_SYS_PASSWORD, "sys_password");
tableNameString - table nameconf - configurepublic void setRuntimeBatchExecutor(ExecutorService runtimeBatchExecutor)
public void init()
throws Exception
public void close()
throws IOException
LifecycleWhile this interface method is declared to throw Exception, implementers are strongly encouraged to
declare concrete implementations of the close method to
throw more specific exceptions, or to throw no exception at all
if the close operation cannot fail.
Cases where the close operation may fail require careful
attention by implementers. It is strongly advised to relinquish
the underlying resources and to internally mark the
resource as closed, prior to throwing the exception. The close method is unlikely to be invoked more than once and so
this ensures that the resources are released in a timely manner.
Furthermore it reduces problems that could arise when the resource
wraps, or is wrapped, by another resource.
Implementers of this interface are also strongly advised
to not have the close method throw InterruptedException.
This exception interacts with a thread's interrupted status,
and runtime misbehavior is likely to occur if an InterruptedException is suppressed.
More generally, if it would cause problems for an
exception to be suppressed, the AutoCloseable.close
method should not throw it.
Note that unlike the close
method of Closeable, this close method
is not required to be idempotent. In other words,
calling this close method more than once may have some
visible side effect, unlike Closeable.close which is
required to have no effect if called more than once.
However, implementers of this interface are strongly encouraged
to make their close methods idempotent.
close 在接口中 Lifecycleclose 在接口中 Closeableclose 在接口中 AutoCloseableclose 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.RowLock lockRow(byte[] row)
throws IOException
lockRow 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void unlockRow(org.apache.hadoop.hbase.client.RowLock rl)
throws IOException
unlockRow 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic <T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol> T coprocessorProxy(Class<T> protocol, byte[] row)
coprocessorProxy 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic <T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol,R> Map<byte[],R> coprocessorExec(Class<T> protocol, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws IOException, Throwable
coprocessorExec 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionThrowablepublic <T extends org.apache.hadoop.hbase.ipc.CoprocessorProtocol,R> void coprocessorExec(Class<T> protocol, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, Throwable
coprocessorExec 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionThrowablepublic void setAutoFlush(boolean autoFlush)
setAutoFlush 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic void setAutoFlush(boolean autoFlush,
boolean clearBufferOnFail)
setAutoFlush 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic long getWriteBufferSize()
getWriteBufferSize 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic void setWriteBufferSize(long writeBufferSize)
throws IOException
setWriteBufferSize 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic byte[] getTableName()
getTableName 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic org.apache.hadoop.conf.Configuration getConfiguration()
getConfiguration 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
throws IOException
getTableDescriptor 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException, InterruptedException
batch 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionInterruptedExceptionpublic Object[] batch(List<? extends org.apache.hadoop.hbase.client.Row> actions) throws IOException, InterruptedException
batch 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionInterruptedExceptionpublic org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
get 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
get 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.Result getRowOrBefore(byte[] row,
byte[] family)
throws IOException
getRowOrBefore 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
put 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
put 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
throws IOException
mutateRow 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
append 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
increment 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
boolean writeToWAL)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic boolean isAutoFlush()
isAutoFlush 在接口中 org.apache.hadoop.hbase.client.HTableInterfacepublic void flushCommits()
throws IOException
flushCommits 在接口中 org.apache.hadoop.hbase.client.HTableInterfaceIOExceptionpublic String getTableNameString()
public void setTableNameString(String tableNameString)
public void refreshTableEntry(String familyString, boolean hasTestLoad) throws Exception
Exceptionpublic byte[][] getStartKeys()
throws IOException
IOExceptionpublic byte[][] getEndKeys()
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> getStartEndKeys()
throws IOException
IOExceptionCopyright © 2024. All rights reserved.