public class AliHBaseUETable extends org.apache.hadoop.hbase.client.HTable implements AliHBaseTableInterface, org.apache.hadoop.hbase.client.RegionLocator
| 构造器和说明 |
|---|
AliHBaseUETable(org.apache.hadoop.hbase.TableName tableName,
AliHBaseUEConnection connection) |
| 限定符和类型 | 方法和说明 |
|---|---|
void |
addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexConfig config,
com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields) |
void |
addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexConfig config,
List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields) |
void |
addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields) |
void |
addExternalIndex(List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields) |
void |
addExternalIndex(String targetIndexName,
com.alibaba.lindorm.client.core.meta.ExternalIndexType type,
com.alibaba.lindorm.client.core.meta.ExternalIndexRowFormatterType rowFormatterType,
com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields) |
void |
addExternalIndex(String targetIndexName,
com.alibaba.lindorm.client.core.meta.ExternalIndexType type,
com.alibaba.lindorm.client.core.meta.ExternalIndexRowFormatterType rowFormatterType,
List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields)
Add the fields to index to the TableDescriptor
|
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
int rpcTimeout) |
<R> void |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype) |
<R extends com.google.protobuf.Message> |
batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
buildExternalIndex()
build index for data in the table
|
void |
cancelBuildExternalIndex()
Cancel the currently building external index job
|
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder |
checkAndMutate(byte[] row,
byte[] family) |
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation) |
boolean |
checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
void |
clearRegionCache() |
void |
close() |
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel |
coprocessorService(byte[] row) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
boolean |
exists(org.apache.hadoop.hbase.client.Get get) |
boolean[] |
exists(List<org.apache.hadoop.hbase.client.Get> gets) |
boolean[] |
existsAll(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
List<org.apache.hadoop.hbase.HRegionLocation> |
getAllRegionLocations() |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
protected org.apache.hadoop.hbase.client.Connection |
getConnection() |
org.apache.hadoop.hbase.client.TableDescriptor |
getDescriptor() |
byte[][] |
getEndKeys() |
org.apache.hadoop.hbase.TableName |
getName() |
int |
getOperationTimeout() |
long |
getOperationTimeout(TimeUnit unit) |
int |
getReadRpcTimeout() |
long |
getReadRpcTimeout(TimeUnit unit) |
org.apache.hadoop.hbase.HRegionLocation |
getRegionLocation(byte[] row) |
org.apache.hadoop.hbase.HRegionLocation |
getRegionLocation(byte[] row,
boolean b) |
org.apache.hadoop.hbase.client.RegionLocator |
getRegionLocator() |
int |
getRpcTimeout() |
long |
getRpcTimeout(TimeUnit unit) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> |
getStartEndKeys() |
byte[][] |
getStartKeys() |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
int |
getWriteRpcTimeout() |
long |
getWriteRpcTimeout(TimeUnit unit) |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability) |
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
<R> void |
processBatchCallback(List<? extends org.apache.hadoop.hbase.client.Row> list,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
removeExternalIndex(List<String> fields)
remove indexed field
|
void |
removeExternalIndex(String... fields) |
void |
setOperationTimeout(int operationTimeout) |
void |
setReadRpcTimeout(int readRpcTimeout) |
void |
setRpcTimeout(int rpcTimeout) |
void |
setWriteRpcTimeout(int writeRpcTimeout) |
void |
validatePut(org.apache.hadoop.hbase.client.Put put) |
public AliHBaseUETable(org.apache.hadoop.hbase.TableName tableName,
AliHBaseUEConnection connection)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.HRegionLocation getRegionLocation(byte[] row)
throws IOException
getRegionLocation 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic org.apache.hadoop.hbase.HRegionLocation getRegionLocation(byte[] row,
boolean b)
throws IOException
getRegionLocation 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic List<org.apache.hadoop.hbase.HRegionLocation> getAllRegionLocations() throws IOException
getAllRegionLocations 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic byte[][] getStartKeys()
throws IOException
getStartKeys 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic byte[][] getEndKeys()
throws IOException
getEndKeys 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic org.apache.hadoop.hbase.util.Pair<byte[][],byte[][]> getStartEndKeys()
throws IOException
getStartEndKeys 在接口中 org.apache.hadoop.hbase.client.RegionLocatorIOExceptionpublic org.apache.hadoop.hbase.TableName getName()
getName 在接口中 org.apache.hadoop.hbase.client.RegionLocatorgetName 在接口中 org.apache.hadoop.hbase.client.TablegetName 在类中 org.apache.hadoop.hbase.client.HTablepublic org.apache.hadoop.conf.Configuration getConfiguration()
getConfiguration 在接口中 org.apache.hadoop.hbase.client.TablegetConfiguration 在类中 org.apache.hadoop.hbase.client.HTablepublic org.apache.hadoop.hbase.client.TableDescriptor getDescriptor()
throws IOException
getDescriptor 在接口中 org.apache.hadoop.hbase.client.TablegetDescriptor 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.Tableexists 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean[] exists(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
exists 在接口中 org.apache.hadoop.hbase.client.Tableexists 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException
batch 在接口中 org.apache.hadoop.hbase.client.Tablebatch 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic <R> void batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException
batchCallback 在接口中 org.apache.hadoop.hbase.client.TablebatchCallback 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
get 在接口中 org.apache.hadoop.hbase.client.Tableget 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
get 在接口中 org.apache.hadoop.hbase.client.Tableget 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TablegetScanner 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TablegetScanner 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
getScanner 在接口中 org.apache.hadoop.hbase.client.TablegetScanner 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
put 在接口中 org.apache.hadoop.hbase.client.Tableput 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
put 在接口中 org.apache.hadoop.hbase.client.Tableput 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.Tabledelete 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
delete 在接口中 org.apache.hadoop.hbase.client.Tabledelete 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder checkAndMutate(byte[] row,
byte[] family)
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TablecheckAndMutate 在类中 org.apache.hadoop.hbase.client.HTablepublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TablecheckAndPut 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TablecheckAndPut 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
checkAndPut 在接口中 org.apache.hadoop.hbase.client.TablecheckAndPut 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TablecheckAndDelete 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TablecheckAndDelete 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
checkAndDelete 在接口中 org.apache.hadoop.hbase.client.TablecheckAndDelete 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp compareOp,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation)
throws IOException
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TablecheckAndMutate 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic boolean checkAndMutate(byte[] row,
byte[] family,
byte[] qualifier,
org.apache.hadoop.hbase.CompareOperator op,
byte[] value,
org.apache.hadoop.hbase.client.RowMutations mutation)
throws IOException
checkAndMutate 在接口中 org.apache.hadoop.hbase.client.TablecheckAndMutate 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
throws IOException
mutateRow 在接口中 org.apache.hadoop.hbase.client.TablemutateRow 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
append 在接口中 org.apache.hadoop.hbase.client.Tableappend 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
increment 在接口中 org.apache.hadoop.hbase.client.Tableincrement 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void close()
throws IOException
close 在接口中 Closeableclose 在接口中 AutoCloseableclose 在接口中 org.apache.hadoop.hbase.client.Tableclose 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic long getRpcTimeout(TimeUnit unit)
getRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic long getReadRpcTimeout(TimeUnit unit)
getReadRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetReadRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic long getWriteRpcTimeout(TimeUnit unit)
getWriteRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetWriteRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic long getOperationTimeout(TimeUnit unit)
getOperationTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetOperationTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel coprocessorService(byte[] row)
coprocessorService 在接口中 org.apache.hadoop.hbase.client.TablecoprocessorService 在类中 org.apache.hadoop.hbase.client.HTablepublic <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.TablecoprocessorService 在类中 org.apache.hadoop.hbase.client.HTablecom.google.protobuf.ServiceExceptionThrowablepublic <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
coprocessorService 在接口中 org.apache.hadoop.hbase.client.TablecoprocessorService 在类中 org.apache.hadoop.hbase.client.HTablecom.google.protobuf.ServiceExceptionThrowablepublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, int rpcTimeout) throws InterruptedException, IOException
batch 在类中 org.apache.hadoop.hbase.client.HTableInterruptedExceptionIOExceptionpublic <R> void processBatchCallback(List<? extends org.apache.hadoop.hbase.client.Row> list, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
processBatchCallback 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionInterruptedExceptionpublic org.apache.hadoop.hbase.client.RegionLocator getRegionLocator()
getRegionLocator 在类中 org.apache.hadoop.hbase.client.HTableprotected org.apache.hadoop.hbase.client.Connection getConnection()
getConnection 在类中 org.apache.hadoop.hbase.client.HTablepublic org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
throws IOException
getTableDescriptor 在接口中 org.apache.hadoop.hbase.client.TablegetTableDescriptor 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableincrementColumnValue 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability)
throws IOException
incrementColumnValue 在接口中 org.apache.hadoop.hbase.client.TableincrementColumnValue 在类中 org.apache.hadoop.hbase.client.HTableIOExceptionpublic void validatePut(org.apache.hadoop.hbase.client.Put put)
throws IllegalArgumentException
validatePut 在类中 org.apache.hadoop.hbase.client.HTableIllegalArgumentExceptionpublic void clearRegionCache()
clearRegionCache 在类中 org.apache.hadoop.hbase.client.HTablepublic int getRpcTimeout()
getRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic void setRpcTimeout(int rpcTimeout)
setRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablesetRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic int getReadRpcTimeout()
getReadRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetReadRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic void setReadRpcTimeout(int readRpcTimeout)
setReadRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablesetReadRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic int getWriteRpcTimeout()
getWriteRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetWriteRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic void setWriteRpcTimeout(int writeRpcTimeout)
setWriteRpcTimeout 在接口中 org.apache.hadoop.hbase.client.TablesetWriteRpcTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic int getOperationTimeout()
getOperationTimeout 在接口中 org.apache.hadoop.hbase.client.TablegetOperationTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic void setOperationTimeout(int operationTimeout)
setOperationTimeout 在接口中 org.apache.hadoop.hbase.client.TablesetOperationTimeout 在类中 org.apache.hadoop.hbase.client.HTablepublic <R extends com.google.protobuf.Message> Map<byte[],R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.TablebatchCoprocessorService 在类中 org.apache.hadoop.hbase.client.HTablecom.google.protobuf.ServiceExceptionThrowablepublic <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback)
throws com.google.protobuf.ServiceException,
Throwable
batchCoprocessorService 在接口中 org.apache.hadoop.hbase.client.TablebatchCoprocessorService 在类中 org.apache.hadoop.hbase.client.HTablecom.google.protobuf.ServiceExceptionThrowablepublic boolean[] existsAll(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
existsAll 在接口中 org.apache.hadoop.hbase.client.TableIOExceptionpublic void addExternalIndex(String targetIndexName, com.alibaba.lindorm.client.core.meta.ExternalIndexType type, com.alibaba.lindorm.client.core.meta.ExternalIndexRowFormatterType rowFormatterType, List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields) throws IOException
AliHBaseTableInterfaceaddExternalIndex 在接口中 AliHBaseTableInterfacetargetIndexName - the target index name, for example, if the indexer is solr, then it will
be the collection name, if indexer is ES, then index name will be it. The
targetIndexName will be set to TableIndexConfig and stored in TableDescriptor.
TargetIndexName will be compared each time if index field changed. The fields in
one table can only index to the same target. Set targetIndexName to null to use
the stored targetIndexName directly.type - the indexer type, only SOLR can supported for now.
All fields in one table can only be indexed to one indexer, so the type will be
compared with the previous stored. Set type to null to use the stored type directly.rowFormatterType - Rowkey of HBase KeyValue will be indexed to an unique field named 'id' in the external index.
User may use this 'id' field to read back the whole row from HBase. the 'id' field is String type
and rowkey of HBase is byte[]. RowFormatterType is used to Tell the system how to index
rowkey of HBase KeyValue to a unique id in the external indexer.
If your rowkey can convert to a string, then use ExternalIndexRowFormatterType.STRING.
It will use Bytes.toString(byte[]) to convert rowkey to an unique "id" string in the external indexer.
Otherwise use HEX, it wll use org.apache.commons.codec.binary.Hex.encodeAsString(byte[]) to
convert rowkey to an unique "id" string in the external indexfields - the field to add. To define a column to index, a ExternalIndexField class need to be
constructed. For example, if we want to index f:q1 in Solr, the filed name in solr f
or this field is q1_s, and the data stored in f:q1 is string, type will be STRING.
Then the code will be like this:
ExternalIndexField field = new ExternalIndexField("f", "q1", "q1_s", ValueType.STRING)
When indexing this filed, Bytes.toString(f:q1) will be used to convert it to a String,
when type=LONG, then Bytes.toLong(f:q1) will be used. It is important to set field type right,
otherwise, the converting will wrong and end up in data corruption.
Type supported:INT, LONG, STRING, BOOLEAN, FLOAT, DOUBLE, SHORT("short"), BIGDECIMALIOExceptionpublic void addExternalIndex(String targetIndexName, com.alibaba.lindorm.client.core.meta.ExternalIndexType type, com.alibaba.lindorm.client.core.meta.ExternalIndexRowFormatterType rowFormatterType, com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields) throws IOException
public void addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexConfig config,
List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields)
throws IOException
addExternalIndex 在接口中 AliHBaseTableInterfaceconfig - see ExternalIndexConfig for more details, config target index name, index type and rowformatter type inside config.IOExceptionAliHBaseTableInterface.addExternalIndex(String, ExternalIndexType, ExternalIndexRowFormatterType, List)public void addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexConfig config,
com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields)
throws IOException
addExternalIndex 在接口中 AliHBaseTableInterfaceconfig - see ExternalIndexConfig for more details, config target index name, index type and rowformatter type inside config.IOExceptionAliHBaseTableInterface.addExternalIndex(String, ExternalIndexType, ExternalIndexRowFormatterType, List)public void addExternalIndex(List<com.alibaba.lindorm.client.core.meta.ExternalIndexField> fields) throws IOException
public void addExternalIndex(com.alibaba.lindorm.client.core.meta.ExternalIndexField... fields)
throws IOException
addExternalIndex 在接口中 AliHBaseTableInterfaceIOExceptionfor detatilspublic void removeExternalIndex(List<String> fields) throws IOException
AliHBaseTableInterfaceremoveExternalIndex 在接口中 AliHBaseTableInterfacefields - the field to remove, field name should include family name and qualifier name
For example, if we want to stop to index a column with family 'f' and qualifier name 'q1'
then filed name will be 'f:q1'IOExceptionpublic void removeExternalIndex(String... fields) throws IOException
removeExternalIndex 在接口中 AliHBaseTableInterfaceIOExceptionpublic void buildExternalIndex()
throws IOException
AliHBaseTableInterfacebuildExternalIndex 在接口中 AliHBaseTableInterfaceIOExceptionpublic void cancelBuildExternalIndex()
throws IOException
AliHBaseTableInterfacecancelBuildExternalIndex 在接口中 AliHBaseTableInterfaceIOExceptionCopyright © 2019. All rights reserved.