public class GoogleHadoopFS
extends AbstractFileSystem
| Constructor and Description |
|---|
GoogleHadoopFS(GoogleHadoopFileSystem ghfs,
java.net.URI uri,
org.apache.hadoop.conf.Configuration conf) |
GoogleHadoopFS(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf) |
| Modifier and Type | Method and Description |
|---|---|
void |
checkPath(org.apache.hadoop.fs.Path path)
Only accept valid AbstractFileSystem and GoogleHadoopFileSystem Paths.
|
org.apache.hadoop.fs.FSDataOutputStream |
createInternal(org.apache.hadoop.fs.Path file,
java.util.EnumSet<CreateFlag> flag,
org.apache.hadoop.fs.permission.FsPermission absolutePermission,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
ChecksumOpt checksumOpt,
boolean createParent) |
boolean |
delete(org.apache.hadoop.fs.Path f,
boolean recursive) |
org.apache.hadoop.fs.BlockLocation[] |
getFileBlockLocations(org.apache.hadoop.fs.Path f,
long start,
long len) |
org.apache.hadoop.fs.FileChecksum |
getFileChecksum(org.apache.hadoop.fs.Path f) |
org.apache.hadoop.fs.FileStatus |
getFileStatus(org.apache.hadoop.fs.Path f) |
FsStatus |
getFsStatus() |
FsServerDefaults |
getServerDefaults() |
java.net.URI |
getUri()
This is overridden to use GoogleHadoopFileSystem's URI, because AbstractFileSystem appends the
default port to the authority.
|
int |
getUriDefaultPort() |
boolean |
isValidName(java.lang.String src)
Follow HDFS conventions except allow for ':' in paths.
|
org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.Path f) |
void |
mkdir(org.apache.hadoop.fs.Path dir,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean createParent) |
org.apache.hadoop.fs.FSDataInputStream |
open(org.apache.hadoop.fs.Path f,
int bufferSize) |
void |
renameInternal(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst) |
void |
setOwner(org.apache.hadoop.fs.Path f,
java.lang.String username,
java.lang.String groupname) |
void |
setPermission(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission) |
boolean |
setReplication(org.apache.hadoop.fs.Path f,
short replication) |
void |
setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime) |
void |
setVerifyChecksum(boolean verifyChecksum) |
public GoogleHadoopFS(java.net.URI uri,
org.apache.hadoop.conf.Configuration conf)
throws java.net.URISyntaxException,
java.io.IOException
java.net.URISyntaxExceptionjava.io.IOExceptionpublic GoogleHadoopFS(GoogleHadoopFileSystem ghfs, java.net.URI uri, org.apache.hadoop.conf.Configuration conf) throws java.net.URISyntaxException, java.io.IOException
java.net.URISyntaxExceptionjava.io.IOExceptionpublic org.apache.hadoop.fs.FSDataOutputStream createInternal(org.apache.hadoop.fs.Path file,
java.util.EnumSet<CreateFlag> flag,
org.apache.hadoop.fs.permission.FsPermission absolutePermission,
int bufferSize,
short replication,
long blockSize,
org.apache.hadoop.util.Progressable progress,
ChecksumOpt checksumOpt,
boolean createParent)
throws java.io.IOException
java.io.IOExceptionpublic int getUriDefaultPort()
public java.net.URI getUri()
public boolean isValidName(java.lang.String src)
public void checkPath(org.apache.hadoop.fs.Path path)
public FsServerDefaults getServerDefaults()
throws java.io.IOException
java.io.IOExceptionpublic void mkdir(org.apache.hadoop.fs.Path dir,
org.apache.hadoop.fs.permission.FsPermission permission,
boolean createParent)
throws java.io.IOException
java.io.IOExceptionpublic boolean delete(org.apache.hadoop.fs.Path f,
boolean recursive)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FSDataInputStream open(org.apache.hadoop.fs.Path f,
int bufferSize)
throws java.io.IOException
java.io.IOExceptionpublic boolean setReplication(org.apache.hadoop.fs.Path f,
short replication)
throws java.io.IOException
java.io.IOExceptionpublic void renameInternal(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
throws java.io.IOException
java.io.IOExceptionpublic void setPermission(org.apache.hadoop.fs.Path f,
org.apache.hadoop.fs.permission.FsPermission permission)
throws java.io.IOException
java.io.IOExceptionpublic void setOwner(org.apache.hadoop.fs.Path f,
java.lang.String username,
java.lang.String groupname)
throws java.io.IOException
java.io.IOExceptionpublic void setTimes(org.apache.hadoop.fs.Path f,
long mtime,
long atime)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FileChecksum getFileChecksum(org.apache.hadoop.fs.Path f)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus getFileStatus(org.apache.hadoop.fs.Path f)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.BlockLocation[] getFileBlockLocations(org.apache.hadoop.fs.Path f,
long start,
long len)
throws java.io.IOException
java.io.IOExceptionpublic FsStatus getFsStatus()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.Path f)
throws java.io.IOException
java.io.IOExceptionpublic void setVerifyChecksum(boolean verifyChecksum)
Copyright © 2019. All rights reserved.