public class MetadataTableUtil extends Object
| Modifier and Type | Method and Description |
|---|---|
static void |
addBulkLoadInProgressFlag(AccumuloServerContext context,
String path) |
static void |
addDeleteEntries(KeyExtent extent,
Set<FileRef> datafilesToDelete,
ClientContext context) |
static void |
addDeleteEntry(AccumuloServerContext context,
String tableId,
String path) |
static void |
addLogEntry(ClientContext context,
LogEntry entry,
ZooLock zooLock) |
static void |
addTablet(KeyExtent extent,
String path,
ClientContext context,
char timeType,
ZooLock lock) |
static void |
chopped(AccumuloServerContext context,
KeyExtent extent,
ZooLock zooLock) |
static void |
cloneTable(ClientContext context,
String srcTableId,
String tableId,
VolumeManager volumeManager) |
static Mutation |
createDeleteMutation(String tableId,
String pathToRemove) |
static void |
createReplicationTable(ClientContext context)
During an upgrade from 1.6 to 1.7, we need to add the replication table
|
static void |
deleteTable(String tableId,
boolean insertDeletes,
ClientContext context,
ZooLock lock) |
static void |
finishSplit(KeyExtent extent,
Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove,
ClientContext context,
ZooLock zooLock) |
static void |
finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove,
ClientContext context,
ZooLock zooLock) |
static Map<FileRef,Long> |
getBulkFilesLoaded(ClientContext context,
KeyExtent extent) |
static List<FileRef> |
getBulkFilesLoaded(Connector conn,
KeyExtent extent,
long tid) |
static SortedMap<FileRef,DataFileValue> |
getDataFileSizes(KeyExtent extent,
ClientContext context) |
static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> |
getFileAndLogEntries(ClientContext context,
KeyExtent extent) |
static Iterator<LogEntry> |
getLogEntries(ClientContext context) |
static List<LogEntry> |
getLogEntries(ClientContext context,
KeyExtent extent) |
static Writer |
getMetadataTable(ClientContext context) |
static String |
getRootTabletDir() |
static SortedMap<org.apache.hadoop.io.Text,SortedMap<ColumnFQ,Value>> |
getTabletEntries(SortedMap<Key,Value> tabletKeyValues,
List<ColumnFQ> columns) |
static void |
moveMetaDeleteMarkers(ClientContext context)
During an upgrade we need to move deletion requests for files under the !METADATA table to the root tablet.
|
static void |
moveMetaDeleteMarkersFrom14(ClientContext context) |
static void |
removeBulkLoadEntries(Connector conn,
String tableId,
long tid) |
static void |
removeBulkLoadInProgressFlag(AccumuloServerContext context,
String path) |
static void |
removeScanFiles(KeyExtent extent,
Set<FileRef> scanFiles,
ClientContext context,
ZooLock zooLock) |
static void |
removeUnusedWALEntries(AccumuloServerContext context,
KeyExtent extent,
List<LogEntry> logEntries,
ZooLock zooLock) |
static void |
rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
ClientContext context,
ZooLock zooLock) |
static void |
setRootTabletDir(String dir) |
static void |
splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
SortedMap<FileRef,DataFileValue> datafiles,
SortedMap<FileRef,DataFileValue> lowDatafileSizes,
SortedMap<FileRef,DataFileValue> highDatafileSizes,
List<FileRef> highDatafilesToRemove) |
static void |
splitTablet(KeyExtent extent,
org.apache.hadoop.io.Text oldPrevEndRow,
double splitRatio,
ClientContext context,
ZooLock zooLock) |
static void |
update(ClientContext context,
ZooLock zooLock,
Mutation m,
KeyExtent extent) |
static void |
update(Writer t,
ZooLock zooLock,
Mutation m) |
static void |
updateTabletCompactID(KeyExtent extent,
long compactID,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletDataFile(long tid,
KeyExtent extent,
Map<FileRef,DataFileValue> estSizes,
String time,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletDir(KeyExtent extent,
String newDir,
ClientContext context,
ZooLock lock) |
static void |
updateTabletFlushID(KeyExtent extent,
long flushID,
ClientContext context,
ZooLock zooLock) |
static void |
updateTabletVolumes(KeyExtent extent,
List<LogEntry> logsToRemove,
List<LogEntry> logsToAdd,
List<FileRef> filesToRemove,
SortedMap<FileRef,DataFileValue> filesToAdd,
String newDir,
ZooLock zooLock,
AccumuloServerContext context) |
public static Writer getMetadataTable(ClientContext context)
public static void update(ClientContext context, ZooLock zooLock, Mutation m, KeyExtent extent)
public static void updateTabletFlushID(KeyExtent extent, long flushID, ClientContext context, ZooLock zooLock)
public static void updateTabletCompactID(KeyExtent extent, long compactID, ClientContext context, ZooLock zooLock)
public static void updateTabletDataFile(long tid,
KeyExtent extent,
Map<FileRef,DataFileValue> estSizes,
String time,
ClientContext context,
ZooLock zooLock)
public static void updateTabletDir(KeyExtent extent, String newDir, ClientContext context, ZooLock lock)
public static void addTablet(KeyExtent extent, String path, ClientContext context, char timeType, ZooLock lock)
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context)
public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException
IOExceptionpublic static void rollBackSplit(org.apache.hadoop.io.Text metadataEntry,
org.apache.hadoop.io.Text oldPrevEndRow,
ClientContext context,
ZooLock zooLock)
public static void splitTablet(KeyExtent extent, org.apache.hadoop.io.Text oldPrevEndRow, double splitRatio, ClientContext context, ZooLock zooLock)
public static void finishSplit(org.apache.hadoop.io.Text metadataEntry,
Map<FileRef,DataFileValue> datafileSizes,
List<FileRef> highDatafilesToRemove,
ClientContext context,
ZooLock zooLock)
public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, ClientContext context, ZooLock zooLock)
public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, ClientContext context) throws IOException
IOExceptionpublic static void addDeleteEntry(AccumuloServerContext context, String tableId, String path) throws IOException
IOExceptionpublic static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException
IOExceptionpublic static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, ClientContext context, ZooLock zooLock)
public static void splitDatafiles(org.apache.hadoop.io.Text table,
org.apache.hadoop.io.Text midRow,
double splitRatio,
Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
SortedMap<FileRef,DataFileValue> datafiles,
SortedMap<FileRef,DataFileValue> lowDatafileSizes,
SortedMap<FileRef,DataFileValue> highDatafileSizes,
List<FileRef> highDatafilesToRemove)
public static void deleteTable(String tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException
AccumuloExceptionIOExceptionpublic static void addLogEntry(ClientContext context, LogEntry entry, ZooLock zooLock)
public static void setRootTabletDir(String dir) throws IOException
IOExceptionpublic static String getRootTabletDir() throws IOException
IOExceptionpublic static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws org.apache.zookeeper.KeeperException, InterruptedException, IOException
org.apache.zookeeper.KeeperExceptionInterruptedExceptionIOExceptionpublic static List<LogEntry> getLogEntries(ClientContext context, KeyExtent extent) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOExceptionorg.apache.zookeeper.KeeperExceptionInterruptedExceptionpublic static Iterator<LogEntry> getLogEntries(ClientContext context) throws IOException, org.apache.zookeeper.KeeperException, InterruptedException
IOExceptionorg.apache.zookeeper.KeeperExceptionInterruptedExceptionpublic static void removeUnusedWALEntries(AccumuloServerContext context, KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock)
public static void cloneTable(ClientContext context, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception
Exceptionpublic static void chopped(AccumuloServerContext context, KeyExtent extent, ZooLock zooLock)
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception
Exceptionpublic static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException
IOExceptionpublic static Map<FileRef,Long> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException
IOExceptionpublic static void addBulkLoadInProgressFlag(AccumuloServerContext context, String path)
public static void removeBulkLoadInProgressFlag(AccumuloServerContext context, String path)
public static void createReplicationTable(ClientContext context) throws IOException
IOExceptionpublic static void moveMetaDeleteMarkers(ClientContext context)
public static void moveMetaDeleteMarkersFrom14(ClientContext context)
Copyright © 2015 Apache Accumulo Project. All rights reserved.