public class GridGgfsHadoopFileSystem extends org.apache.hadoop.fs.AbstractFileSystem implements Closeable
GGFS Hadoop 2.x file system driver over file system API. To use
GGFS as Hadoop file system, you should configure this class
in Hadoop's core-site.xml as follows:
<property>
<name>fs.default.name</name>
<value>ggfs://ipc</value>
</property>
<property>
<name>fs.ggfs.impl</name>
<value>org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopFileSystem</value>
</property>
You should also add GridGain JAR and all libraries to Hadoop classpath. To
do this, add following lines to conf/hadoop-env.sh script in Hadoop
distribution:
export GRIDGAIN_HOME=/path/to/GridGain/distribution export HADOOP_CLASSPATH=$GRIDGAIN_HOME/gridgain*.jar for f in $GRIDGAIN_HOME/libs/*.jar; do export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f; done
job-submitter or job-scheduler processes, while data nodes are usually
started together with Hadoop task-tracker processes.
For sample client and data node configuration refer to config/hadoop/default-config-client.xml
and config/hadoop/default-config.xml configuration files in GridGain installation.
| Constructor and Description |
|---|
GridGgfsHadoopFileSystem(URI name,
org.apache.hadoop.conf.Configuration cfg) |
| Modifier and Type | Method and Description |
|---|---|
void |
checkPath(org.apache.hadoop.fs.Path path) |
void |
close() |
void |
colocateFileWrites(boolean colocateFileWrites)
Public setter that can be used by direct users of FS or Visor.
|
org.apache.hadoop.fs.FSDataOutputStream |
createInternal(org.apache.hadoop.fs.Path f,
EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
org.apache.hadoop.fs.permission.FsPermission perm,
int bufSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt,
boolean createParent) |
boolean |
delete(org.apache.hadoop.fs.Path f,
boolean recursive) |
protected void |
finalize() |
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.Path path,
long start,
long len) |
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f) |
org.apache.hadoop.fs.FileStatus |
getFileStatus(org.apache.hadoop.fs.Path f) |
org.apache.hadoop.fs.FsStatus |
getFsStatus() |
org.apache.hadoop.fs.FsServerDefaults |
getServerDefaults() |
URI |
getUri() |
int |
getUriDefaultPort() |
org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.Path f) |
void |
mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission perm,
boolean createParent) |
org.gridgain.grid.ggfs.GridGgfsMode |
mode(org.apache.hadoop.fs.Path path)
Resolve path mode.
|
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path f,
int bufSize) |
void |
renameInternal(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst) |
void |
setOwner(org.apache.hadoop.fs.Path p,
String usr,
String grp) |
void |
setPermission(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.permission.FsPermission perm) |
boolean |
setReplication(org.apache.hadoop.fs.Path f,
short replication) |
void |
setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime) |
void |
setVerifyChecksum(boolean verifyChecksum) |
boolean |
supportsSymlinks() |
String |
toString() |
checkScheme, clearStatistics, create, createFileSystem, createSymlink, equals, get, getAclStatus, getAllStatistics, getCanonicalServiceName, getDelegationTokens, getFileLinkStatus, getFsStatus, getHomeDirectory, getInitialWorkingDirectory, getLinkTarget, getStatistics, getStatistics, getUriPath, hashCode, isValidName, listCorruptFileBlocks, listLocatedStatus, listStatusIterator, makeQualified, modifyAclEntries, open, printStatistics, removeAcl, removeAclEntries, removeDefaultAcl, rename, renameInternal, resolvePath, setAclpublic GridGgfsHadoopFileSystem(URI name, org.apache.hadoop.conf.Configuration cfg) throws URISyntaxException, IOException
name - URI for file system.cfg - Configuration.URISyntaxException - if name has invalid syntax.IOException - If initialization failed.public void checkPath(org.apache.hadoop.fs.Path path)
checkPath in class org.apache.hadoop.fs.AbstractFileSystempublic void colocateFileWrites(boolean colocateFileWrites)
colocateFileWrites - Whether all ongoing file writes should be colocated.public void close()
throws IOException
close in interface Closeableclose in interface AutoCloseableIOExceptionprotected void finalize()
throws Throwable
public URI getUri()
getUri in class org.apache.hadoop.fs.AbstractFileSystempublic int getUriDefaultPort()
getUriDefaultPort in class org.apache.hadoop.fs.AbstractFileSystempublic org.apache.hadoop.fs.FsServerDefaults getServerDefaults()
throws IOException
getServerDefaults in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic boolean setReplication(org.apache.hadoop.fs.Path f,
short replication)
throws IOException
setReplication in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic void setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime)
throws IOException
setTimes in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FsStatus getFsStatus()
throws IOException
getFsStatus in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic void setPermission(org.apache.hadoop.fs.Path p,
org.apache.hadoop.fs.permission.FsPermission perm)
throws IOException
setPermission in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic void setOwner(org.apache.hadoop.fs.Path p,
String usr,
String grp)
throws IOException
setOwner in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path f,
int bufSize)
throws IOException
open in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream createInternal(org.apache.hadoop.fs.Path f,
EnumSet<org.apache.hadoop.fs.CreateFlag> flag,
org.apache.hadoop.fs.permission.FsPermission perm,
int bufSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
org.apache.hadoop.fs.Options.ChecksumOpt checksumOpt,
boolean createParent)
throws IOException
createInternal in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic boolean supportsSymlinks()
supportsSymlinks in class org.apache.hadoop.fs.AbstractFileSystempublic void renameInternal(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
throws IOException
renameInternal in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic boolean delete(org.apache.hadoop.fs.Path f,
boolean recursive)
throws IOException
delete in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic void setVerifyChecksum(boolean verifyChecksum)
throws IOException
setVerifyChecksum in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f)
throws IOException
getFileChecksum in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.Path f)
throws IOException
listStatus in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic void mkdir(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission perm,
boolean createParent)
throws IOException
mkdir in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.FileStatus getFileStatus(org.apache.hadoop.fs.Path f)
throws IOException
getFileStatus in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.Path path,
long start,
long len)
throws IOException
getFileBlockLocations in class org.apache.hadoop.fs.AbstractFileSystemIOExceptionpublic org.gridgain.grid.ggfs.GridGgfsMode mode(org.apache.hadoop.fs.Path path)
path - HDFS path.Copyright © 2014. All rights reserved.