001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018 package org.apache.hadoop.hdfs;
019
020 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
021 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
022 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
023 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
024 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
025 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
026 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
027 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
028 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
029 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
030 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
031 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
032 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
033 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
034 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
035 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
036 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
037 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
038 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
039 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
040 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
041 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
042 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
043 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
044 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
045 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
046 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
047 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
048 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
049 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
050 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL;
051 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT;
052 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
053 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
054 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
055 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
056 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
057
058 import java.io.BufferedOutputStream;
059 import java.io.DataInputStream;
060 import java.io.DataOutputStream;
061 import java.io.FileNotFoundException;
062 import java.io.IOException;
063 import java.io.InputStream;
064 import java.io.OutputStream;
065 import java.net.InetAddress;
066 import java.net.InetSocketAddress;
067 import java.net.NetworkInterface;
068 import java.net.Socket;
069 import java.net.SocketException;
070 import java.net.SocketAddress;
071 import java.net.URI;
072 import java.net.UnknownHostException;
073 import java.util.ArrayList;
074 import java.util.Collections;
075 import java.util.EnumSet;
076 import java.util.HashMap;
077 import java.util.LinkedHashMap;
078 import java.util.List;
079 import java.util.Map;
080 import java.util.Random;
081
082 import javax.net.SocketFactory;
083
084 import org.apache.commons.logging.Log;
085 import org.apache.commons.logging.LogFactory;
086 import org.apache.hadoop.classification.InterfaceAudience;
087 import org.apache.hadoop.conf.Configuration;
088 import org.apache.hadoop.fs.BlockLocation;
089 import org.apache.hadoop.fs.BlockStorageLocation;
090 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
091 import org.apache.hadoop.fs.ContentSummary;
092 import org.apache.hadoop.fs.CreateFlag;
093 import org.apache.hadoop.fs.FileAlreadyExistsException;
094 import org.apache.hadoop.fs.FileSystem;
095 import org.apache.hadoop.fs.FsServerDefaults;
096 import org.apache.hadoop.fs.FsStatus;
097 import org.apache.hadoop.fs.HdfsBlockLocation;
098 import org.apache.hadoop.fs.InvalidPathException;
099 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
100 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
101 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
102 import org.apache.hadoop.fs.Options;
103 import org.apache.hadoop.fs.Options.ChecksumOpt;
104 import org.apache.hadoop.fs.ParentNotDirectoryException;
105 import org.apache.hadoop.fs.Path;
106 import org.apache.hadoop.fs.UnresolvedLinkException;
107 import org.apache.hadoop.fs.VolumeId;
108 import org.apache.hadoop.fs.permission.FsPermission;
109 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
110 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
111 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
112 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
113 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
114 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
115 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
116 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
117 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
118 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
119 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
120 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
121 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
122 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
123 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
124 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
125 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
126 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
127 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
128 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
129 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
130 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
131 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
132 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
133 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
134 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
135 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
136 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
137 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
138 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
139 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
140 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
141 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
142 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
143 import org.apache.hadoop.hdfs.server.namenode.NameNode;
144 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
145 import org.apache.hadoop.io.DataOutputBuffer;
146 import org.apache.hadoop.io.EnumSetWritable;
147 import org.apache.hadoop.io.IOUtils;
148 import org.apache.hadoop.io.MD5Hash;
149 import org.apache.hadoop.io.Text;
150 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
151 import org.apache.hadoop.ipc.Client;
152 import org.apache.hadoop.ipc.RPC;
153 import org.apache.hadoop.ipc.RemoteException;
154 import org.apache.hadoop.net.DNS;
155 import org.apache.hadoop.net.NetUtils;
156 import org.apache.hadoop.security.AccessControlException;
157 import org.apache.hadoop.security.UserGroupInformation;
158 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
159 import org.apache.hadoop.security.token.Token;
160 import org.apache.hadoop.security.token.TokenRenewer;
161 import org.apache.hadoop.util.DataChecksum;
162 import org.apache.hadoop.util.DataChecksum.Type;
163 import org.apache.hadoop.util.Progressable;
164 import org.apache.hadoop.util.Time;
165
166 import com.google.common.annotations.VisibleForTesting;
167 import com.google.common.base.Joiner;
168 import com.google.common.base.Preconditions;
169 import com.google.common.net.InetAddresses;
170
171 /********************************************************
172 * DFSClient can connect to a Hadoop Filesystem and
173 * perform basic file tasks. It uses the ClientProtocol
174 * to communicate with a NameNode daemon, and connects
175 * directly to DataNodes to read/write block data.
176 *
177 * Hadoop DFS users should obtain an instance of
178 * DistributedFileSystem, which uses DFSClient to handle
179 * filesystem tasks.
180 *
181 ********************************************************/
182 @InterfaceAudience.Private
183 public class DFSClient implements java.io.Closeable {
184 public static final Log LOG = LogFactory.getLog(DFSClient.class);
185 public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
186 static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
187
188 private final Configuration conf;
189 private final Conf dfsClientConf;
190 final ClientProtocol namenode;
191 /* The service used for delegation tokens */
192 private Text dtService;
193
194 final UserGroupInformation ugi;
195 volatile boolean clientRunning = true;
196 volatile long lastLeaseRenewal;
197 private volatile FsServerDefaults serverDefaults;
198 private volatile long serverDefaultsLastUpdate;
199 final String clientName;
200 SocketFactory socketFactory;
201 final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
202 final FileSystem.Statistics stats;
203 private final String authority;
204 final PeerCache peerCache;
205 private Random r = new Random();
206 private SocketAddress[] localInterfaceAddrs;
207 private DataEncryptionKey encryptionKey;
208 private boolean shouldUseLegacyBlockReaderLocal;
209 private final CachingStrategy defaultReadCachingStrategy;
210 private final CachingStrategy defaultWriteCachingStrategy;
211
212 /**
213 * DFSClient configuration
214 */
215 public static class Conf {
216 final int hdfsTimeout; // timeout value for a DFS operation.
217 final int maxFailoverAttempts;
218 final int failoverSleepBaseMillis;
219 final int failoverSleepMaxMillis;
220 final int maxBlockAcquireFailures;
221 final int confTime;
222 final int ioBufferSize;
223 final ChecksumOpt defaultChecksumOpt;
224 final int writePacketSize;
225 final int socketTimeout;
226 final int socketCacheCapacity;
227 final long socketCacheExpiry;
228 final long excludedNodesCacheExpiry;
229 /** Wait time window (in msec) if BlockMissingException is caught */
230 final int timeWindow;
231 final int nCachedConnRetry;
232 final int nBlockWriteRetry;
233 final int nBlockWriteLocateFollowingRetry;
234 final long defaultBlockSize;
235 final long prefetchSize;
236 final short defaultReplication;
237 final String taskId;
238 final FsPermission uMask;
239 final boolean connectToDnViaHostname;
240 final boolean getHdfsBlocksMetadataEnabled;
241 final int getFileBlockStorageLocationsNumThreads;
242 final int getFileBlockStorageLocationsTimeout;
243
244 final boolean useLegacyBlockReader;
245 final boolean useLegacyBlockReaderLocal;
246 final String domainSocketPath;
247 final boolean skipShortCircuitChecksums;
248 final int shortCircuitBufferSize;
249 final boolean shortCircuitLocalReads;
250 final boolean domainSocketDataTraffic;
251 final int shortCircuitStreamsCacheSize;
252 final long shortCircuitStreamsCacheExpiryMs;
253
254 public Conf(Configuration conf) {
255 // The hdfsTimeout is currently the same as the ipc timeout
256 hdfsTimeout = Client.getTimeout(conf);
257
258 maxFailoverAttempts = conf.getInt(
259 DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
260 DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
261 failoverSleepBaseMillis = conf.getInt(
262 DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
263 DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
264 failoverSleepMaxMillis = conf.getInt(
265 DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
266 DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
267
268 maxBlockAcquireFailures = conf.getInt(
269 DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
270 DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
271 confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
272 HdfsServerConstants.WRITE_TIMEOUT);
273 ioBufferSize = conf.getInt(
274 CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
275 CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
276 defaultChecksumOpt = getChecksumOptFromConf(conf);
277 socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
278 HdfsServerConstants.READ_TIMEOUT);
279 /** dfs.write.packet.size is an internal config variable */
280 writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
281 DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
282 defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
283 DFS_BLOCK_SIZE_DEFAULT);
284 defaultReplication = (short) conf.getInt(
285 DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
286 taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
287 socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
288 DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
289 socketCacheExpiry = conf.getLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
290 DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
291 excludedNodesCacheExpiry = conf.getLong(
292 DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
293 DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
294 prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
295 10 * defaultBlockSize);
296 timeWindow = conf.getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
297 nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
298 DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
299 nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
300 DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
301 nBlockWriteLocateFollowingRetry = conf.getInt(
302 DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
303 DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
304 uMask = FsPermission.getUMask(conf);
305 connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
306 DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
307 getHdfsBlocksMetadataEnabled = conf.getBoolean(
308 DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
309 DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
310 getFileBlockStorageLocationsNumThreads = conf.getInt(
311 DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
312 DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
313 getFileBlockStorageLocationsTimeout = conf.getInt(
314 DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT,
315 DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_DEFAULT);
316
317 useLegacyBlockReader = conf.getBoolean(
318 DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
319 DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
320 useLegacyBlockReaderLocal = conf.getBoolean(
321 DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
322 DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
323 shortCircuitLocalReads = conf.getBoolean(
324 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
325 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
326 domainSocketDataTraffic = conf.getBoolean(
327 DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
328 DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
329 domainSocketPath = conf.getTrimmed(
330 DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
331 DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
332
333 if (BlockReaderLocal.LOG.isDebugEnabled()) {
334 BlockReaderLocal.LOG.debug(
335 DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
336 + " = " + useLegacyBlockReaderLocal);
337 BlockReaderLocal.LOG.debug(
338 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY
339 + " = " + shortCircuitLocalReads);
340 BlockReaderLocal.LOG.debug(
341 DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
342 + " = " + domainSocketDataTraffic);
343 BlockReaderLocal.LOG.debug(
344 DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
345 + " = " + domainSocketPath);
346 }
347
348 skipShortCircuitChecksums = conf.getBoolean(
349 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
350 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
351 shortCircuitBufferSize = conf.getInt(
352 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY,
353 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT);
354 shortCircuitStreamsCacheSize = conf.getInt(
355 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY,
356 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT);
357 shortCircuitStreamsCacheExpiryMs = conf.getLong(
358 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
359 DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT);
360 }
361
362 private DataChecksum.Type getChecksumType(Configuration conf) {
363 final String checksum = conf.get(
364 DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
365 DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
366 try {
367 return DataChecksum.Type.valueOf(checksum);
368 } catch(IllegalArgumentException iae) {
369 LOG.warn("Bad checksum type: " + checksum + ". Using default "
370 + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
371 return DataChecksum.Type.valueOf(
372 DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
373 }
374 }
375
376 // Construct a checksum option from conf
377 private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
378 DataChecksum.Type type = getChecksumType(conf);
379 int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
380 DFS_BYTES_PER_CHECKSUM_DEFAULT);
381 return new ChecksumOpt(type, bytesPerChecksum);
382 }
383
384 // create a DataChecksum with the default option.
385 private DataChecksum createChecksum() throws IOException {
386 return createChecksum(null);
387 }
388
389 private DataChecksum createChecksum(ChecksumOpt userOpt)
390 throws IOException {
391 // Fill in any missing field with the default.
392 ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
393 defaultChecksumOpt, userOpt);
394 DataChecksum dataChecksum = DataChecksum.newDataChecksum(
395 myOpt.getChecksumType(),
396 myOpt.getBytesPerChecksum());
397 if (dataChecksum == null) {
398 throw new IOException("Invalid checksum type specified: "
399 + myOpt.getChecksumType().name());
400 }
401 return dataChecksum;
402 }
403 }
404
405 public Conf getConf() {
406 return dfsClientConf;
407 }
408
409 Configuration getConfiguration() {
410 return conf;
411 }
412
413 /**
414 * A map from file names to {@link DFSOutputStream} objects
415 * that are currently being written by this client.
416 * Note that a file can only be written by a single client.
417 */
418 private final Map<String, DFSOutputStream> filesBeingWritten
419 = new HashMap<String, DFSOutputStream>();
420
421 private final DomainSocketFactory domainSocketFactory;
422
423 /**
424 * Same as this(NameNode.getAddress(conf), conf);
425 * @see #DFSClient(InetSocketAddress, Configuration)
426 * @deprecated Deprecated at 0.21
427 */
428 @Deprecated
429 public DFSClient(Configuration conf) throws IOException {
430 this(NameNode.getAddress(conf), conf);
431 }
432
433 public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
434 this(NameNode.getUri(address), conf);
435 }
436
437 /**
438 * Same as this(nameNodeUri, conf, null);
439 * @see #DFSClient(URI, Configuration, FileSystem.Statistics)
440 */
441 public DFSClient(URI nameNodeUri, Configuration conf
442 ) throws IOException {
443 this(nameNodeUri, conf, null);
444 }
445
446 /**
447 * Same as this(nameNodeUri, null, conf, stats);
448 * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics)
449 */
450 public DFSClient(URI nameNodeUri, Configuration conf,
451 FileSystem.Statistics stats)
452 throws IOException {
453 this(nameNodeUri, null, conf, stats);
454 }
455
456 /**
457 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
458 * If HA is enabled and a positive value is set for
459 * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
460 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
461 * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode
462 * must be null.
463 */
464 @VisibleForTesting
465 public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
466 Configuration conf, FileSystem.Statistics stats)
467 throws IOException {
468 // Copy only the required DFSClient configuration
469 this.dfsClientConf = new Conf(conf);
470 this.shouldUseLegacyBlockReaderLocal =
471 this.dfsClientConf.useLegacyBlockReaderLocal;
472 if (this.dfsClientConf.useLegacyBlockReaderLocal) {
473 LOG.debug("Using legacy short-circuit local reads.");
474 }
475 this.conf = conf;
476 this.stats = stats;
477 this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
478 this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
479
480 this.ugi = UserGroupInformation.getCurrentUser();
481
482 this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
483 this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" +
484 DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId();
485
486 int numResponseToDrop = conf.getInt(
487 DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
488 DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
489 NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
490 if (numResponseToDrop > 0) {
491 // This case is used for testing.
492 LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
493 + " is set to " + numResponseToDrop
494 + ", this hacked client will proactively drop responses");
495 proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
496 nameNodeUri, ClientProtocol.class, numResponseToDrop);
497 }
498
499 if (proxyInfo != null) {
500 this.dtService = proxyInfo.getDelegationTokenService();
501 this.namenode = proxyInfo.getProxy();
502 } else if (rpcNamenode != null) {
503 // This case is used for testing.
504 Preconditions.checkArgument(nameNodeUri == null);
505 this.namenode = rpcNamenode;
506 dtService = null;
507 } else {
508 Preconditions.checkArgument(nameNodeUri != null,
509 "null URI");
510 proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
511 ClientProtocol.class);
512 this.dtService = proxyInfo.getDelegationTokenService();
513 this.namenode = proxyInfo.getProxy();
514 }
515
516 // read directly from the block file if configured.
517 this.domainSocketFactory = new DomainSocketFactory(dfsClientConf);
518
519 String localInterfaces[] =
520 conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
521 localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
522 if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
523 LOG.debug("Using local interfaces [" +
524 Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
525 Joiner.on(',').join(localInterfaceAddrs) + "]");
526 }
527
528 this.peerCache = PeerCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
529 Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ?
530 null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
531 Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ?
532 null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
533 Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ?
534 null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
535 this.defaultReadCachingStrategy =
536 new CachingStrategy(readDropBehind, readahead);
537 this.defaultWriteCachingStrategy =
538 new CachingStrategy(writeDropBehind, readahead);
539 }
540
541 /**
542 * Return the socket addresses to use with each configured
543 * local interface. Local interfaces may be specified by IP
544 * address, IP address range using CIDR notation, interface
545 * name (e.g. eth0) or sub-interface name (e.g. eth0:0).
546 * The socket addresses consist of the IPs for the interfaces
547 * and the ephemeral port (port 0). If an IP, IP range, or
548 * interface name matches an interface with sub-interfaces
549 * only the IP of the interface is used. Sub-interfaces can
550 * be used by specifying them explicitly (by IP or name).
551 *
552 * @return SocketAddresses for the configured local interfaces,
553 * or an empty array if none are configured
554 * @throws UnknownHostException if a given interface name is invalid
555 */
556 private static SocketAddress[] getLocalInterfaceAddrs(
557 String interfaceNames[]) throws UnknownHostException {
558 List<SocketAddress> localAddrs = new ArrayList<SocketAddress>();
559 for (String interfaceName : interfaceNames) {
560 if (InetAddresses.isInetAddress(interfaceName)) {
561 localAddrs.add(new InetSocketAddress(interfaceName, 0));
562 } else if (NetUtils.isValidSubnet(interfaceName)) {
563 for (InetAddress addr : NetUtils.getIPs(interfaceName, false)) {
564 localAddrs.add(new InetSocketAddress(addr, 0));
565 }
566 } else {
567 for (String ip : DNS.getIPs(interfaceName, false)) {
568 localAddrs.add(new InetSocketAddress(ip, 0));
569 }
570 }
571 }
572 return localAddrs.toArray(new SocketAddress[localAddrs.size()]);
573 }
574
575 /**
576 * Select one of the configured local interfaces at random. We use a random
577 * interface because other policies like round-robin are less effective
578 * given that we cache connections to datanodes.
579 *
580 * @return one of the local interface addresses at random, or null if no
581 * local interfaces are configured
582 */
583 SocketAddress getRandomLocalInterfaceAddr() {
584 if (localInterfaceAddrs.length == 0) {
585 return null;
586 }
587 final int idx = r.nextInt(localInterfaceAddrs.length);
588 final SocketAddress addr = localInterfaceAddrs[idx];
589 if (LOG.isDebugEnabled()) {
590 LOG.debug("Using local interface " + addr);
591 }
592 return addr;
593 }
594
595 /**
596 * Return the number of times the client should go back to the namenode
597 * to retrieve block locations when reading.
598 */
599 int getMaxBlockAcquireFailures() {
600 return dfsClientConf.maxBlockAcquireFailures;
601 }
602
603 /**
604 * Return the timeout that clients should use when writing to datanodes.
605 * @param numNodes the number of nodes in the pipeline.
606 */
607 int getDatanodeWriteTimeout(int numNodes) {
608 return (dfsClientConf.confTime > 0) ?
609 (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
610 }
611
612 int getDatanodeReadTimeout(int numNodes) {
613 return dfsClientConf.socketTimeout > 0 ?
614 (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
615 dfsClientConf.socketTimeout) : 0;
616 }
617
618 int getHdfsTimeout() {
619 return dfsClientConf.hdfsTimeout;
620 }
621
622 @VisibleForTesting
623 public String getClientName() {
624 return clientName;
625 }
626
627 void checkOpen() throws IOException {
628 if (!clientRunning) {
629 IOException result = new IOException("Filesystem closed");
630 throw result;
631 }
632 }
633
634 /** Return the lease renewer instance. The renewer thread won't start
635 * until the first output stream is created. The same instance will
636 * be returned until all output streams are closed.
637 */
638 public LeaseRenewer getLeaseRenewer() throws IOException {
639 return LeaseRenewer.getInstance(authority, ugi, this);
640 }
641
642 /** Get a lease and start automatic renewal */
643 private void beginFileLease(final String src, final DFSOutputStream out)
644 throws IOException {
645 getLeaseRenewer().put(src, out, this);
646 }
647
648 /** Stop renewal of lease for the file. */
649 void endFileLease(final String src) throws IOException {
650 getLeaseRenewer().closeFile(src, this);
651 }
652
653
654 /** Put a file. Only called from LeaseRenewer, where proper locking is
655 * enforced to consistently update its local dfsclients array and
656 * client's filesBeingWritten map.
657 */
658 void putFileBeingWritten(final String src, final DFSOutputStream out) {
659 synchronized(filesBeingWritten) {
660 filesBeingWritten.put(src, out);
661 // update the last lease renewal time only when there was no
662 // writes. once there is one write stream open, the lease renewer
663 // thread keeps it updated well with in anyone's expiration time.
664 if (lastLeaseRenewal == 0) {
665 updateLastLeaseRenewal();
666 }
667 }
668 }
669
670 /** Remove a file. Only called from LeaseRenewer. */
671 void removeFileBeingWritten(final String src) {
672 synchronized(filesBeingWritten) {
673 filesBeingWritten.remove(src);
674 if (filesBeingWritten.isEmpty()) {
675 lastLeaseRenewal = 0;
676 }
677 }
678 }
679
680 /** Is file-being-written map empty? */
681 boolean isFilesBeingWrittenEmpty() {
682 synchronized(filesBeingWritten) {
683 return filesBeingWritten.isEmpty();
684 }
685 }
686
687 /** @return true if the client is running */
688 boolean isClientRunning() {
689 return clientRunning;
690 }
691
692 long getLastLeaseRenewal() {
693 return lastLeaseRenewal;
694 }
695
696 void updateLastLeaseRenewal() {
697 synchronized(filesBeingWritten) {
698 if (filesBeingWritten.isEmpty()) {
699 return;
700 }
701 lastLeaseRenewal = Time.now();
702 }
703 }
704
705 /**
706 * Renew leases.
707 * @return true if lease was renewed. May return false if this
708 * client has been closed or has no files open.
709 **/
710 boolean renewLease() throws IOException {
711 if (clientRunning && !isFilesBeingWrittenEmpty()) {
712 try {
713 namenode.renewLease(clientName);
714 updateLastLeaseRenewal();
715 return true;
716 } catch (IOException e) {
717 // Abort if the lease has already expired.
718 final long elapsed = Time.now() - getLastLeaseRenewal();
719 if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
720 LOG.warn("Failed to renew lease for " + clientName + " for "
721 + (elapsed/1000) + " seconds (>= soft-limit ="
722 + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
723 + "Closing all files being written ...", e);
724 closeAllFilesBeingWritten(true);
725 } else {
726 // Let the lease renewer handle it and retry.
727 throw e;
728 }
729 }
730 }
731 return false;
732 }
733
734 /**
735 * Close connections the Namenode.
736 */
737 void closeConnectionToNamenode() {
738 RPC.stopProxy(namenode);
739 }
740
741 /** Abort and release resources held. Ignore all errors. */
742 void abort() {
743 clientRunning = false;
744 closeAllFilesBeingWritten(true);
745
746 try {
747 // remove reference to this client and stop the renewer,
748 // if there is no more clients under the renewer.
749 getLeaseRenewer().closeClient(this);
750 } catch (IOException ioe) {
751 LOG.info("Exception occurred while aborting the client " + ioe);
752 }
753 closeConnectionToNamenode();
754 }
755
756 /** Close/abort all files being written. */
757 private void closeAllFilesBeingWritten(final boolean abort) {
758 for(;;) {
759 final String src;
760 final DFSOutputStream out;
761 synchronized(filesBeingWritten) {
762 if (filesBeingWritten.isEmpty()) {
763 return;
764 }
765 src = filesBeingWritten.keySet().iterator().next();
766 out = filesBeingWritten.remove(src);
767 }
768 if (out != null) {
769 try {
770 if (abort) {
771 out.abort();
772 } else {
773 out.close();
774 }
775 } catch(IOException ie) {
776 LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
777 ie);
778 }
779 }
780 }
781 }
782
783 /**
784 * Close the file system, abandoning all of the leases and files being
785 * created and close connections to the namenode.
786 */
787 @Override
788 public synchronized void close() throws IOException {
789 if(clientRunning) {
790 closeAllFilesBeingWritten(false);
791 clientRunning = false;
792 getLeaseRenewer().closeClient(this);
793 // close connections to the namenode
794 closeConnectionToNamenode();
795 }
796 }
797
798 /**
799 * Get the default block size for this cluster
800 * @return the default block size in bytes
801 */
802 public long getDefaultBlockSize() {
803 return dfsClientConf.defaultBlockSize;
804 }
805
806 /**
807 * @see ClientProtocol#getPreferredBlockSize(String)
808 */
809 public long getBlockSize(String f) throws IOException {
810 try {
811 return namenode.getPreferredBlockSize(f);
812 } catch (IOException ie) {
813 LOG.warn("Problem getting block size", ie);
814 throw ie;
815 }
816 }
817
818 /**
819 * Get server default values for a number of configuration params.
820 * @see ClientProtocol#getServerDefaults()
821 */
822 public FsServerDefaults getServerDefaults() throws IOException {
823 long now = Time.now();
824 if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
825 serverDefaults = namenode.getServerDefaults();
826 serverDefaultsLastUpdate = now;
827 }
828 return serverDefaults;
829 }
830
831 /**
832 * Get a canonical token service name for this client's tokens. Null should
833 * be returned if the client is not using tokens.
834 * @return the token service for the client
835 */
836 @InterfaceAudience.LimitedPrivate( { "HDFS" })
837 public String getCanonicalServiceName() {
838 return (dtService != null) ? dtService.toString() : null;
839 }
840
841 /**
842 * @see ClientProtocol#getDelegationToken(Text)
843 */
844 public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
845 throws IOException {
846 assert dtService != null;
847 Token<DelegationTokenIdentifier> token =
848 namenode.getDelegationToken(renewer);
849
850 if (token != null) {
851 token.setService(this.dtService);
852 LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
853 } else {
854 LOG.info("Cannot get delegation token from " + renewer);
855 }
856 return token;
857
858 }
859
860 /**
861 * Renew a delegation token
862 * @param token the token to renew
863 * @return the new expiration time
864 * @throws InvalidToken
865 * @throws IOException
866 * @deprecated Use Token.renew instead.
867 */
868 @Deprecated
869 public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
870 throws InvalidToken, IOException {
871 LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
872 try {
873 return token.renew(conf);
874 } catch (InterruptedException ie) {
875 throw new RuntimeException("caught interrupted", ie);
876 } catch (RemoteException re) {
877 throw re.unwrapRemoteException(InvalidToken.class,
878 AccessControlException.class);
879 }
880 }
881
882 private static Map<String, Boolean> localAddrMap = Collections
883 .synchronizedMap(new HashMap<String, Boolean>());
884
885 static boolean isLocalAddress(InetSocketAddress targetAddr) {
886 InetAddress addr = targetAddr.getAddress();
887 Boolean cached = localAddrMap.get(addr.getHostAddress());
888 if (cached != null) {
889 if (LOG.isTraceEnabled()) {
890 LOG.trace("Address " + targetAddr +
891 (cached ? " is local" : " is not local"));
892 }
893 return cached;
894 }
895
896 boolean local = NetUtils.isLocalAddress(addr);
897
898 if (LOG.isTraceEnabled()) {
899 LOG.trace("Address " + targetAddr +
900 (local ? " is local" : " is not local"));
901 }
902 localAddrMap.put(addr.getHostAddress(), local);
903 return local;
904 }
905
906 /**
907 * Should the block access token be refetched on an exception
908 *
909 * @param ex Exception received
910 * @param targetAddr Target datanode address from where exception was received
911 * @return true if block access token has expired or invalid and it should be
912 * refetched
913 */
914 private static boolean tokenRefetchNeeded(IOException ex,
915 InetSocketAddress targetAddr) {
916 /*
917 * Get a new access token and retry. Retry is needed in 2 cases. 1) When
918 * both NN and DN re-started while DFSClient holding a cached access token.
919 * 2) In the case that NN fails to update its access key at pre-set interval
920 * (by a wide margin) and subsequently restarts. In this case, DN
921 * re-registers itself with NN and receives a new access key, but DN will
922 * delete the old access key from its memory since it's considered expired
923 * based on the estimated expiration date.
924 */
925 if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
926 LOG.info("Access token was invalid when connecting to " + targetAddr
927 + " : " + ex);
928 return true;
929 }
930 return false;
931 }
932
933 /**
934 * Cancel a delegation token
935 * @param token the token to cancel
936 * @throws InvalidToken
937 * @throws IOException
938 * @deprecated Use Token.cancel instead.
939 */
940 @Deprecated
941 public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
942 throws InvalidToken, IOException {
943 LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
944 try {
945 token.cancel(conf);
946 } catch (InterruptedException ie) {
947 throw new RuntimeException("caught interrupted", ie);
948 } catch (RemoteException re) {
949 throw re.unwrapRemoteException(InvalidToken.class,
950 AccessControlException.class);
951 }
952 }
953
954 @InterfaceAudience.Private
955 public static class Renewer extends TokenRenewer {
956
957 static {
958 //Ensure that HDFS Configuration files are loaded before trying to use
959 // the renewer.
960 HdfsConfiguration.init();
961 }
962
963 @Override
964 public boolean handleKind(Text kind) {
965 return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
966 }
967
968 @SuppressWarnings("unchecked")
969 @Override
970 public long renew(Token<?> token, Configuration conf) throws IOException {
971 Token<DelegationTokenIdentifier> delToken =
972 (Token<DelegationTokenIdentifier>) token;
973 ClientProtocol nn = getNNProxy(delToken, conf);
974 try {
975 return nn.renewDelegationToken(delToken);
976 } catch (RemoteException re) {
977 throw re.unwrapRemoteException(InvalidToken.class,
978 AccessControlException.class);
979 }
980 }
981
982 @SuppressWarnings("unchecked")
983 @Override
984 public void cancel(Token<?> token, Configuration conf) throws IOException {
985 Token<DelegationTokenIdentifier> delToken =
986 (Token<DelegationTokenIdentifier>) token;
987 LOG.info("Cancelling " +
988 DelegationTokenIdentifier.stringifyToken(delToken));
989 ClientProtocol nn = getNNProxy(delToken, conf);
990 try {
991 nn.cancelDelegationToken(delToken);
992 } catch (RemoteException re) {
993 throw re.unwrapRemoteException(InvalidToken.class,
994 AccessControlException.class);
995 }
996 }
997
998 private static ClientProtocol getNNProxy(
999 Token<DelegationTokenIdentifier> token, Configuration conf)
1000 throws IOException {
1001 URI uri = HAUtil.getServiceUriFromToken(token);
1002 if (HAUtil.isTokenForLogicalUri(token) &&
1003 !HAUtil.isLogicalUri(conf, uri)) {
1004 // If the token is for a logical nameservice, but the configuration
1005 // we have disagrees about that, we can't actually renew it.
1006 // This can be the case in MR, for example, if the RM doesn't
1007 // have all of the HA clusters configured in its configuration.
1008 throw new IOException("Unable to map logical nameservice URI '" +
1009 uri + "' to a NameNode. Local configuration does not have " +
1010 "a failover proxy provider configured.");
1011 }
1012
1013 NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
1014 NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
1015 assert info.getDelegationTokenService().equals(token.getService()) :
1016 "Returned service '" + info.getDelegationTokenService().toString() +
1017 "' doesn't match expected service '" +
1018 token.getService().toString() + "'";
1019
1020 return info.getProxy();
1021 }
1022
1023 @Override
1024 public boolean isManaged(Token<?> token) throws IOException {
1025 return true;
1026 }
1027
1028 }
1029
1030 /**
1031 * Report corrupt blocks that were discovered by the client.
1032 * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
1033 */
1034 public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
1035 namenode.reportBadBlocks(blocks);
1036 }
1037
1038 public short getDefaultReplication() {
1039 return dfsClientConf.defaultReplication;
1040 }
1041
1042 public LocatedBlocks getLocatedBlocks(String src, long start)
1043 throws IOException {
1044 return getLocatedBlocks(src, start, dfsClientConf.prefetchSize);
1045 }
1046
1047 /*
1048 * This is just a wrapper around callGetBlockLocations, but non-static so that
1049 * we can stub it out for tests.
1050 */
1051 @VisibleForTesting
1052 public LocatedBlocks getLocatedBlocks(String src, long start, long length)
1053 throws IOException {
1054 return callGetBlockLocations(namenode, src, start, length);
1055 }
1056
1057 /**
1058 * @see ClientProtocol#getBlockLocations(String, long, long)
1059 */
1060 static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
1061 String src, long start, long length)
1062 throws IOException {
1063 try {
1064 return namenode.getBlockLocations(src, start, length);
1065 } catch(RemoteException re) {
1066 throw re.unwrapRemoteException(AccessControlException.class,
1067 FileNotFoundException.class,
1068 UnresolvedPathException.class);
1069 }
1070 }
1071
1072 /**
1073 * Recover a file's lease
1074 * @param src a file's path
1075 * @return true if the file is already closed
1076 * @throws IOException
1077 */
1078 boolean recoverLease(String src) throws IOException {
1079 checkOpen();
1080
1081 try {
1082 return namenode.recoverLease(src, clientName);
1083 } catch (RemoteException re) {
1084 throw re.unwrapRemoteException(FileNotFoundException.class,
1085 AccessControlException.class,
1086 UnresolvedPathException.class);
1087 }
1088 }
1089
1090 /**
1091 * Get block location info about file
1092 *
1093 * getBlockLocations() returns a list of hostnames that store
1094 * data for a specific file region. It returns a set of hostnames
1095 * for every block within the indicated region.
1096 *
1097 * This function is very useful when writing code that considers
1098 * data-placement when performing operations. For example, the
1099 * MapReduce system tries to schedule tasks on the same machines
1100 * as the data-block the task processes.
1101 */
1102 public BlockLocation[] getBlockLocations(String src, long start,
1103 long length) throws IOException, UnresolvedLinkException {
1104 LocatedBlocks blocks = getLocatedBlocks(src, start, length);
1105 BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
1106 HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
1107 for (int i = 0; i < locations.length; i++) {
1108 hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
1109 }
1110 return hdfsLocations;
1111 }
1112
1113 /**
1114 * Get block location information about a list of {@link HdfsBlockLocation}.
1115 * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
1116 * get {@link BlockStorageLocation}s for blocks returned by
1117 * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
1118 * .
1119 *
1120 * This is done by making a round of RPCs to the associated datanodes, asking
1121 * the volume of each block replica. The returned array of
1122 * {@link BlockStorageLocation} expose this information as a
1123 * {@link VolumeId}.
1124 *
1125 * @param blockLocations
1126 * target blocks on which to query volume location information
1127 * @return volumeBlockLocations original block array augmented with additional
1128 * volume location information for each replica.
1129 */
1130 public BlockStorageLocation[] getBlockStorageLocations(
1131 List<BlockLocation> blockLocations) throws IOException,
1132 UnsupportedOperationException, InvalidBlockTokenException {
1133 if (!getConf().getHdfsBlocksMetadataEnabled) {
1134 throw new UnsupportedOperationException("Datanode-side support for " +
1135 "getVolumeBlockLocations() must also be enabled in the client " +
1136 "configuration.");
1137 }
1138 // Downcast blockLocations and fetch out required LocatedBlock(s)
1139 List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
1140 for (BlockLocation loc : blockLocations) {
1141 if (!(loc instanceof HdfsBlockLocation)) {
1142 throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
1143 "expected to be passed HdfsBlockLocations");
1144 }
1145 HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
1146 blocks.add(hdfsLoc.getLocatedBlock());
1147 }
1148
1149 // Re-group the LocatedBlocks to be grouped by datanodes, with the values
1150 // a list of the LocatedBlocks on the datanode.
1151 Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks =
1152 new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
1153 for (LocatedBlock b : blocks) {
1154 for (DatanodeInfo info : b.getLocations()) {
1155 if (!datanodeBlocks.containsKey(info)) {
1156 datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
1157 }
1158 List<LocatedBlock> l = datanodeBlocks.get(info);
1159 l.add(b);
1160 }
1161 }
1162
1163 // Make RPCs to the datanodes to get volume locations for its replicas
1164 List<HdfsBlocksMetadata> metadatas = BlockStorageLocationUtil
1165 .queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
1166 getConf().getFileBlockStorageLocationsNumThreads,
1167 getConf().getFileBlockStorageLocationsTimeout,
1168 getConf().connectToDnViaHostname);
1169
1170 // Regroup the returned VolumeId metadata to again be grouped by
1171 // LocatedBlock rather than by datanode
1172 Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
1173 .associateVolumeIdsWithBlocks(blocks, datanodeBlocks, metadatas);
1174
1175 // Combine original BlockLocations with new VolumeId information
1176 BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
1177 .convertToVolumeBlockLocations(blocks, blockVolumeIds);
1178
1179 return volumeBlockLocations;
1180 }
1181
1182 public DFSInputStream open(String src)
1183 throws IOException, UnresolvedLinkException {
1184 return open(src, dfsClientConf.ioBufferSize, true, null);
1185 }
1186
1187 /**
1188 * Create an input stream that obtains a nodelist from the
1189 * namenode, and then reads from all the right places. Creates
1190 * inner subclass of InputStream that does the right out-of-band
1191 * work.
1192 * @deprecated Use {@link #open(String, int, boolean)} instead.
1193 */
1194 @Deprecated
1195 public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
1196 FileSystem.Statistics stats)
1197 throws IOException, UnresolvedLinkException {
1198 return open(src, buffersize, verifyChecksum);
1199 }
1200
1201
1202 /**
1203 * Create an input stream that obtains a nodelist from the
1204 * namenode, and then reads from all the right places. Creates
1205 * inner subclass of InputStream that does the right out-of-band
1206 * work.
1207 */
1208 public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
1209 throws IOException, UnresolvedLinkException {
1210 checkOpen();
1211 // Get block info from namenode
1212 return new DFSInputStream(this, src, buffersize, verifyChecksum);
1213 }
1214
1215 /**
1216 * Get the namenode associated with this DFSClient object
1217 * @return the namenode associated with this DFSClient object
1218 */
1219 public ClientProtocol getNamenode() {
1220 return namenode;
1221 }
1222
1223 /**
1224 * Call {@link #create(String, boolean, short, long, Progressable)} with
1225 * default <code>replication</code> and <code>blockSize<code> and null <code>
1226 * progress</code>.
1227 */
1228 public OutputStream create(String src, boolean overwrite)
1229 throws IOException {
1230 return create(src, overwrite, dfsClientConf.defaultReplication,
1231 dfsClientConf.defaultBlockSize, null);
1232 }
1233
1234 /**
1235 * Call {@link #create(String, boolean, short, long, Progressable)} with
1236 * default <code>replication</code> and <code>blockSize<code>.
1237 */
1238 public OutputStream create(String src,
1239 boolean overwrite,
1240 Progressable progress) throws IOException {
1241 return create(src, overwrite, dfsClientConf.defaultReplication,
1242 dfsClientConf.defaultBlockSize, progress);
1243 }
1244
1245 /**
1246 * Call {@link #create(String, boolean, short, long, Progressable)} with
1247 * null <code>progress</code>.
1248 */
1249 public OutputStream create(String src,
1250 boolean overwrite,
1251 short replication,
1252 long blockSize) throws IOException {
1253 return create(src, overwrite, replication, blockSize, null);
1254 }
1255
1256 /**
1257 * Call {@link #create(String, boolean, short, long, Progressable, int)}
1258 * with default bufferSize.
1259 */
1260 public OutputStream create(String src, boolean overwrite, short replication,
1261 long blockSize, Progressable progress) throws IOException {
1262 return create(src, overwrite, replication, blockSize, progress,
1263 dfsClientConf.ioBufferSize);
1264 }
1265
1266 /**
1267 * Call {@link #create(String, FsPermission, EnumSet, short, long,
1268 * Progressable, int, ChecksumOpt)} with default <code>permission</code>
1269 * {@link FsPermission#getFileDefault()}.
1270 *
1271 * @param src File name
1272 * @param overwrite overwrite an existing file if true
1273 * @param replication replication factor for the file
1274 * @param blockSize maximum block size
1275 * @param progress interface for reporting client progress
1276 * @param buffersize underlying buffersize
1277 *
1278 * @return output stream
1279 */
1280 public OutputStream create(String src,
1281 boolean overwrite,
1282 short replication,
1283 long blockSize,
1284 Progressable progress,
1285 int buffersize)
1286 throws IOException {
1287 return create(src, FsPermission.getFileDefault(),
1288 overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
1289 : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
1290 buffersize, null);
1291 }
1292
1293 /**
1294 * Call {@link #create(String, FsPermission, EnumSet, boolean, short,
1295 * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
1296 * set to true.
1297 */
1298 public DFSOutputStream create(String src,
1299 FsPermission permission,
1300 EnumSet<CreateFlag> flag,
1301 short replication,
1302 long blockSize,
1303 Progressable progress,
1304 int buffersize,
1305 ChecksumOpt checksumOpt)
1306 throws IOException {
1307 return create(src, permission, flag, true,
1308 replication, blockSize, progress, buffersize, checksumOpt, null);
1309 }
1310
1311 /**
1312 * Create a new dfs file with the specified block replication
1313 * with write-progress reporting and return an output stream for writing
1314 * into the file.
1315 *
1316 * @param src File name
1317 * @param permission The permission of the directory being created.
1318 * If null, use default permission {@link FsPermission#getFileDefault()}
1319 * @param flag indicates create a new file or create/overwrite an
1320 * existing file or append to an existing file
1321 * @param createParent create missing parent directory if true
1322 * @param replication block replication
1323 * @param blockSize maximum block size
1324 * @param progress interface for reporting client progress
1325 * @param buffersize underlying buffer size
1326 * @param checksumOpt checksum options
1327 *
1328 * @return output stream
1329 *
1330 * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
1331 * boolean, short, long) for detailed description of exceptions thrown
1332 */
1333 public DFSOutputStream create(String src,
1334 FsPermission permission,
1335 EnumSet<CreateFlag> flag,
1336 boolean createParent,
1337 short replication,
1338 long blockSize,
1339 Progressable progress,
1340 int buffersize,
1341 ChecksumOpt checksumOpt) throws IOException {
1342 return create(src, permission, flag, createParent, replication, blockSize,
1343 progress, buffersize, checksumOpt, null);
1344 }
1345
1346 /**
1347 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
1348 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
1349 * a hint to where the namenode should place the file blocks.
1350 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
1351 * at the creation time only. HDFS could move the blocks during balancing or
1352 * replication, to move the blocks from favored nodes. A value of null means
1353 * no favored nodes for this create
1354 */
1355 public DFSOutputStream create(String src,
1356 FsPermission permission,
1357 EnumSet<CreateFlag> flag,
1358 boolean createParent,
1359 short replication,
1360 long blockSize,
1361 Progressable progress,
1362 int buffersize,
1363 ChecksumOpt checksumOpt,
1364 InetSocketAddress[] favoredNodes) throws IOException {
1365 checkOpen();
1366 if (permission == null) {
1367 permission = FsPermission.getFileDefault();
1368 }
1369 FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1370 if(LOG.isDebugEnabled()) {
1371 LOG.debug(src + ": masked=" + masked);
1372 }
1373 String[] favoredNodeStrs = null;
1374 if (favoredNodes != null) {
1375 favoredNodeStrs = new String[favoredNodes.length];
1376 for (int i = 0; i < favoredNodes.length; i++) {
1377 favoredNodeStrs[i] =
1378 favoredNodes[i].getHostName() + ":"
1379 + favoredNodes[i].getPort();
1380 }
1381 }
1382 final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
1383 src, masked, flag, createParent, replication, blockSize, progress,
1384 buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
1385 beginFileLease(src, result);
1386 return result;
1387 }
1388
1389 /**
1390 * Append to an existing file if {@link CreateFlag#APPEND} is present
1391 */
1392 private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
1393 int buffersize, Progressable progress) throws IOException {
1394 if (flag.contains(CreateFlag.APPEND)) {
1395 HdfsFileStatus stat = getFileInfo(src);
1396 if (stat == null) { // No file to append to
1397 // New file needs to be created if create option is present
1398 if (!flag.contains(CreateFlag.CREATE)) {
1399 throw new FileNotFoundException("failed to append to non-existent file "
1400 + src + " on client " + clientName);
1401 }
1402 return null;
1403 }
1404 return callAppend(stat, src, buffersize, progress);
1405 }
1406 return null;
1407 }
1408
1409 /**
1410 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
1411 * Progressable, int, ChecksumOpt)} except that the permission
1412 * is absolute (ie has already been masked with umask.
1413 */
1414 public DFSOutputStream primitiveCreate(String src,
1415 FsPermission absPermission,
1416 EnumSet<CreateFlag> flag,
1417 boolean createParent,
1418 short replication,
1419 long blockSize,
1420 Progressable progress,
1421 int buffersize,
1422 ChecksumOpt checksumOpt)
1423 throws IOException, UnresolvedLinkException {
1424 checkOpen();
1425 CreateFlag.validate(flag);
1426 DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
1427 if (result == null) {
1428 DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
1429 result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
1430 flag, createParent, replication, blockSize, progress, buffersize,
1431 checksum);
1432 }
1433 beginFileLease(src, result);
1434 return result;
1435 }
1436
1437 /**
1438 * Creates a symbolic link.
1439 *
1440 * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)
1441 */
1442 public void createSymlink(String target, String link, boolean createParent)
1443 throws IOException {
1444 try {
1445 FsPermission dirPerm =
1446 FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
1447 namenode.createSymlink(target, link, dirPerm, createParent);
1448 } catch (RemoteException re) {
1449 throw re.unwrapRemoteException(AccessControlException.class,
1450 FileAlreadyExistsException.class,
1451 FileNotFoundException.class,
1452 ParentNotDirectoryException.class,
1453 NSQuotaExceededException.class,
1454 DSQuotaExceededException.class,
1455 UnresolvedPathException.class,
1456 SnapshotAccessControlException.class);
1457 }
1458 }
1459
1460 /**
1461 * Resolve the *first* symlink, if any, in the path.
1462 *
1463 * @see ClientProtocol#getLinkTarget(String)
1464 */
1465 public String getLinkTarget(String path) throws IOException {
1466 checkOpen();
1467 try {
1468 return namenode.getLinkTarget(path);
1469 } catch (RemoteException re) {
1470 throw re.unwrapRemoteException(AccessControlException.class,
1471 FileNotFoundException.class);
1472 }
1473 }
1474
1475 /** Method to get stream returned by append call */
1476 private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
1477 int buffersize, Progressable progress) throws IOException {
1478 LocatedBlock lastBlock = null;
1479 try {
1480 lastBlock = namenode.append(src, clientName);
1481 } catch(RemoteException re) {
1482 throw re.unwrapRemoteException(AccessControlException.class,
1483 FileNotFoundException.class,
1484 SafeModeException.class,
1485 DSQuotaExceededException.class,
1486 UnsupportedOperationException.class,
1487 UnresolvedPathException.class,
1488 SnapshotAccessControlException.class);
1489 }
1490 return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
1491 lastBlock, stat, dfsClientConf.createChecksum());
1492 }
1493
1494 /**
1495 * Append to an existing HDFS file.
1496 *
1497 * @param src file name
1498 * @param buffersize buffer size
1499 * @param progress for reporting write-progress; null is acceptable.
1500 * @param statistics file system statistics; null is acceptable.
1501 * @return an output stream for writing into the file
1502 *
1503 * @see ClientProtocol#append(String, String)
1504 */
1505 public HdfsDataOutputStream append(final String src, final int buffersize,
1506 final Progressable progress, final FileSystem.Statistics statistics
1507 ) throws IOException {
1508 final DFSOutputStream out = append(src, buffersize, progress);
1509 return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
1510 }
1511
1512 private DFSOutputStream append(String src, int buffersize, Progressable progress)
1513 throws IOException {
1514 checkOpen();
1515 HdfsFileStatus stat = getFileInfo(src);
1516 if (stat == null) { // No file found
1517 throw new FileNotFoundException("failed to append to non-existent file "
1518 + src + " on client " + clientName);
1519 }
1520 final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
1521 beginFileLease(src, result);
1522 return result;
1523 }
1524
1525 /**
1526 * Set replication for an existing file.
1527 * @param src file name
1528 * @param replication
1529 *
1530 * @see ClientProtocol#setReplication(String, short)
1531 */
1532 public boolean setReplication(String src, short replication)
1533 throws IOException {
1534 try {
1535 return namenode.setReplication(src, replication);
1536 } catch(RemoteException re) {
1537 throw re.unwrapRemoteException(AccessControlException.class,
1538 FileNotFoundException.class,
1539 SafeModeException.class,
1540 DSQuotaExceededException.class,
1541 UnresolvedPathException.class,
1542 SnapshotAccessControlException.class);
1543 }
1544 }
1545
1546 /**
1547 * Rename file or directory.
1548 * @see ClientProtocol#rename(String, String)
1549 * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
1550 */
1551 @Deprecated
1552 public boolean rename(String src, String dst) throws IOException {
1553 checkOpen();
1554 try {
1555 return namenode.rename(src, dst);
1556 } catch(RemoteException re) {
1557 throw re.unwrapRemoteException(AccessControlException.class,
1558 NSQuotaExceededException.class,
1559 DSQuotaExceededException.class,
1560 UnresolvedPathException.class,
1561 SnapshotAccessControlException.class);
1562 }
1563 }
1564
1565 /**
1566 * Move blocks from src to trg and delete src
1567 * See {@link ClientProtocol#concat(String, String [])}.
1568 */
1569 public void concat(String trg, String [] srcs) throws IOException {
1570 checkOpen();
1571 try {
1572 namenode.concat(trg, srcs);
1573 } catch(RemoteException re) {
1574 throw re.unwrapRemoteException(AccessControlException.class,
1575 UnresolvedPathException.class,
1576 SnapshotAccessControlException.class);
1577 }
1578 }
1579 /**
1580 * Rename file or directory.
1581 * @see ClientProtocol#rename2(String, String, Options.Rename...)
1582 */
1583 public void rename(String src, String dst, Options.Rename... options)
1584 throws IOException {
1585 checkOpen();
1586 try {
1587 namenode.rename2(src, dst, options);
1588 } catch(RemoteException re) {
1589 throw re.unwrapRemoteException(AccessControlException.class,
1590 DSQuotaExceededException.class,
1591 FileAlreadyExistsException.class,
1592 FileNotFoundException.class,
1593 ParentNotDirectoryException.class,
1594 SafeModeException.class,
1595 NSQuotaExceededException.class,
1596 UnresolvedPathException.class,
1597 SnapshotAccessControlException.class);
1598 }
1599 }
1600 /**
1601 * Delete file or directory.
1602 * See {@link ClientProtocol#delete(String, boolean)}.
1603 */
1604 @Deprecated
1605 public boolean delete(String src) throws IOException {
1606 checkOpen();
1607 return namenode.delete(src, true);
1608 }
1609
1610 /**
1611 * delete file or directory.
1612 * delete contents of the directory if non empty and recursive
1613 * set to true
1614 *
1615 * @see ClientProtocol#delete(String, boolean)
1616 */
1617 public boolean delete(String src, boolean recursive) throws IOException {
1618 checkOpen();
1619 try {
1620 return namenode.delete(src, recursive);
1621 } catch(RemoteException re) {
1622 throw re.unwrapRemoteException(AccessControlException.class,
1623 FileNotFoundException.class,
1624 SafeModeException.class,
1625 UnresolvedPathException.class,
1626 SnapshotAccessControlException.class);
1627 }
1628 }
1629
1630 /** Implemented using getFileInfo(src)
1631 */
1632 public boolean exists(String src) throws IOException {
1633 checkOpen();
1634 return getFileInfo(src) != null;
1635 }
1636
1637 /**
1638 * Get a partial listing of the indicated directory
1639 * No block locations need to be fetched
1640 */
1641 public DirectoryListing listPaths(String src, byte[] startAfter)
1642 throws IOException {
1643 return listPaths(src, startAfter, false);
1644 }
1645
1646 /**
1647 * Get a partial listing of the indicated directory
1648 *
1649 * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
1650 * if the application wants to fetch a listing starting from
1651 * the first entry in the directory
1652 *
1653 * @see ClientProtocol#getListing(String, byte[], boolean)
1654 */
1655 public DirectoryListing listPaths(String src, byte[] startAfter,
1656 boolean needLocation)
1657 throws IOException {
1658 checkOpen();
1659 try {
1660 return namenode.getListing(src, startAfter, needLocation);
1661 } catch(RemoteException re) {
1662 throw re.unwrapRemoteException(AccessControlException.class,
1663 FileNotFoundException.class,
1664 UnresolvedPathException.class);
1665 }
1666 }
1667
1668 /**
1669 * Get the file info for a specific file or directory.
1670 * @param src The string representation of the path to the file
1671 * @return object containing information regarding the file
1672 * or null if file not found
1673 *
1674 * @see ClientProtocol#getFileInfo(String) for description of exceptions
1675 */
1676 public HdfsFileStatus getFileInfo(String src) throws IOException {
1677 checkOpen();
1678 try {
1679 return namenode.getFileInfo(src);
1680 } catch(RemoteException re) {
1681 throw re.unwrapRemoteException(AccessControlException.class,
1682 FileNotFoundException.class,
1683 UnresolvedPathException.class);
1684 }
1685 }
1686
1687 /**
1688 * Close status of a file
1689 * @return true if file is already closed
1690 */
1691 public boolean isFileClosed(String src) throws IOException{
1692 checkOpen();
1693 try {
1694 return namenode.isFileClosed(src);
1695 } catch(RemoteException re) {
1696 throw re.unwrapRemoteException(AccessControlException.class,
1697 FileNotFoundException.class,
1698 UnresolvedPathException.class);
1699 }
1700 }
1701
1702 /**
1703 * Get the file info for a specific file or directory. If src
1704 * refers to a symlink then the FileStatus of the link is returned.
1705 * @param src path to a file or directory.
1706 *
1707 * For description of exceptions thrown
1708 * @see ClientProtocol#getFileLinkInfo(String)
1709 */
1710 public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
1711 checkOpen();
1712 try {
1713 return namenode.getFileLinkInfo(src);
1714 } catch(RemoteException re) {
1715 throw re.unwrapRemoteException(AccessControlException.class,
1716 UnresolvedPathException.class);
1717 }
1718 }
1719
1720 /**
1721 * Get the checksum of a file.
1722 * @param src The file path
1723 * @return The checksum
1724 * @see DistributedFileSystem#getFileChecksum(Path)
1725 */
1726 public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
1727 checkOpen();
1728 return getFileChecksum(src, clientName, namenode, socketFactory,
1729 dfsClientConf.socketTimeout, getDataEncryptionKey(),
1730 dfsClientConf.connectToDnViaHostname);
1731 }
1732
1733 @InterfaceAudience.Private
1734 public void clearDataEncryptionKey() {
1735 LOG.debug("Clearing encryption key");
1736 synchronized (this) {
1737 encryptionKey = null;
1738 }
1739 }
1740
1741 /**
1742 * @return true if data sent between this client and DNs should be encrypted,
1743 * false otherwise.
1744 * @throws IOException in the event of error communicating with the NN
1745 */
1746 boolean shouldEncryptData() throws IOException {
1747 FsServerDefaults d = getServerDefaults();
1748 return d == null ? false : d.getEncryptDataTransfer();
1749 }
1750
1751 @InterfaceAudience.Private
1752 public DataEncryptionKey getDataEncryptionKey()
1753 throws IOException {
1754 if (shouldEncryptData()) {
1755 synchronized (this) {
1756 if (encryptionKey == null ||
1757 encryptionKey.expiryDate < Time.now()) {
1758 LOG.debug("Getting new encryption token from NN");
1759 encryptionKey = namenode.getDataEncryptionKey();
1760 }
1761 return encryptionKey;
1762 }
1763 } else {
1764 return null;
1765 }
1766 }
1767
1768 /**
1769 * Get the checksum of a file.
1770 * @param src The file path
1771 * @param clientName the name of the client requesting the checksum.
1772 * @param namenode the RPC proxy for the namenode
1773 * @param socketFactory to create sockets to connect to DNs
1774 * @param socketTimeout timeout to use when connecting and waiting for a response
1775 * @param encryptionKey the key needed to communicate with DNs in this cluster
1776 * @param connectToDnViaHostname whether the client should use hostnames instead of IPs
1777 * @return The checksum
1778 */
1779 private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
1780 String clientName,
1781 ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
1782 DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
1783 throws IOException {
1784 //get all block locations
1785 LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1786 if (null == blockLocations) {
1787 throw new FileNotFoundException("File does not exist: " + src);
1788 }
1789 List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
1790 final DataOutputBuffer md5out = new DataOutputBuffer();
1791 int bytesPerCRC = -1;
1792 DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
1793 long crcPerBlock = 0;
1794 boolean refetchBlocks = false;
1795 int lastRetriedIndex = -1;
1796
1797 //get block checksum for each block
1798 for(int i = 0; i < locatedblocks.size(); i++) {
1799 if (refetchBlocks) { // refetch to get fresh tokens
1800 blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1801 if (null == blockLocations) {
1802 throw new FileNotFoundException("File does not exist: " + src);
1803 }
1804 locatedblocks = blockLocations.getLocatedBlocks();
1805 refetchBlocks = false;
1806 }
1807 LocatedBlock lb = locatedblocks.get(i);
1808 final ExtendedBlock block = lb.getBlock();
1809 final DatanodeInfo[] datanodes = lb.getLocations();
1810
1811 //try each datanode location of the block
1812 final int timeout = 3000 * datanodes.length + socketTimeout;
1813 boolean done = false;
1814 for(int j = 0; !done && j < datanodes.length; j++) {
1815 DataOutputStream out = null;
1816 DataInputStream in = null;
1817
1818 try {
1819 //connect to a datanode
1820 IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
1821 encryptionKey, datanodes[j], timeout);
1822 out = new DataOutputStream(new BufferedOutputStream(pair.out,
1823 HdfsConstants.SMALL_BUFFER_SIZE));
1824 in = new DataInputStream(pair.in);
1825
1826 if (LOG.isDebugEnabled()) {
1827 LOG.debug("write to " + datanodes[j] + ": "
1828 + Op.BLOCK_CHECKSUM + ", block=" + block);
1829 }
1830 // get block MD5
1831 new Sender(out).blockChecksum(block, lb.getBlockToken());
1832
1833 final BlockOpResponseProto reply =
1834 BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
1835
1836 if (reply.getStatus() != Status.SUCCESS) {
1837 if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
1838 throw new InvalidBlockTokenException();
1839 } else {
1840 throw new IOException("Bad response " + reply + " for block "
1841 + block + " from datanode " + datanodes[j]);
1842 }
1843 }
1844
1845 OpBlockChecksumResponseProto checksumData =
1846 reply.getChecksumResponse();
1847
1848 //read byte-per-checksum
1849 final int bpc = checksumData.getBytesPerCrc();
1850 if (i == 0) { //first block
1851 bytesPerCRC = bpc;
1852 }
1853 else if (bpc != bytesPerCRC) {
1854 throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
1855 + " but bytesPerCRC=" + bytesPerCRC);
1856 }
1857
1858 //read crc-per-block
1859 final long cpb = checksumData.getCrcPerBlock();
1860 if (locatedblocks.size() > 1 && i == 0) {
1861 crcPerBlock = cpb;
1862 }
1863
1864 //read md5
1865 final MD5Hash md5 = new MD5Hash(
1866 checksumData.getMd5().toByteArray());
1867 md5.write(md5out);
1868
1869 // read crc-type
1870 final DataChecksum.Type ct;
1871 if (checksumData.hasCrcType()) {
1872 ct = PBHelper.convert(checksumData
1873 .getCrcType());
1874 } else {
1875 LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
1876 "inferring checksum by reading first byte");
1877 ct = inferChecksumTypeByReading(
1878 clientName, socketFactory, socketTimeout, lb, datanodes[j],
1879 encryptionKey, connectToDnViaHostname);
1880 }
1881
1882 if (i == 0) { // first block
1883 crcType = ct;
1884 } else if (crcType != DataChecksum.Type.MIXED
1885 && crcType != ct) {
1886 // if crc types are mixed in a file
1887 crcType = DataChecksum.Type.MIXED;
1888 }
1889
1890 done = true;
1891
1892 if (LOG.isDebugEnabled()) {
1893 if (i == 0) {
1894 LOG.debug("set bytesPerCRC=" + bytesPerCRC
1895 + ", crcPerBlock=" + crcPerBlock);
1896 }
1897 LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
1898 }
1899 } catch (InvalidBlockTokenException ibte) {
1900 if (i > lastRetriedIndex) {
1901 if (LOG.isDebugEnabled()) {
1902 LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
1903 + "for file " + src + " for block " + block
1904 + " from datanode " + datanodes[j]
1905 + ". Will retry the block once.");
1906 }
1907 lastRetriedIndex = i;
1908 done = true; // actually it's not done; but we'll retry
1909 i--; // repeat at i-th block
1910 refetchBlocks = true;
1911 break;
1912 }
1913 } catch (IOException ie) {
1914 LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
1915 } finally {
1916 IOUtils.closeStream(in);
1917 IOUtils.closeStream(out);
1918 }
1919 }
1920
1921 if (!done) {
1922 throw new IOException("Fail to get block MD5 for " + block);
1923 }
1924 }
1925
1926 //compute file MD5
1927 final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
1928 switch (crcType) {
1929 case CRC32:
1930 return new MD5MD5CRC32GzipFileChecksum(bytesPerCRC,
1931 crcPerBlock, fileMD5);
1932 case CRC32C:
1933 return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
1934 crcPerBlock, fileMD5);
1935 default:
1936 // If there is no block allocated for the file,
1937 // return one with the magic entry that matches what previous
1938 // hdfs versions return.
1939 if (locatedblocks.size() == 0) {
1940 return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
1941 }
1942
1943 // we should never get here since the validity was checked
1944 // when getCrcType() was called above.
1945 return null;
1946 }
1947 }
1948
1949 /**
1950 * Connect to the given datanode's datantrasfer port, and return
1951 * the resulting IOStreamPair. This includes encryption wrapping, etc.
1952 */
1953 private static IOStreamPair connectToDN(
1954 SocketFactory socketFactory, boolean connectToDnViaHostname,
1955 DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
1956 throws IOException
1957 {
1958 boolean success = false;
1959 Socket sock = null;
1960 try {
1961 sock = socketFactory.createSocket();
1962 String dnAddr = dn.getXferAddr(connectToDnViaHostname);
1963 if (LOG.isDebugEnabled()) {
1964 LOG.debug("Connecting to datanode " + dnAddr);
1965 }
1966 NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
1967 sock.setSoTimeout(timeout);
1968
1969 OutputStream unbufOut = NetUtils.getOutputStream(sock);
1970 InputStream unbufIn = NetUtils.getInputStream(sock);
1971 IOStreamPair ret;
1972 if (encryptionKey != null) {
1973 ret = DataTransferEncryptor.getEncryptedStreams(
1974 unbufOut, unbufIn, encryptionKey);
1975 } else {
1976 ret = new IOStreamPair(unbufIn, unbufOut);
1977 }
1978 success = true;
1979 return ret;
1980 } finally {
1981 if (!success) {
1982 IOUtils.closeSocket(sock);
1983 }
1984 }
1985 }
1986
1987 /**
1988 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
1989 * for the first byte of that replica. This is used for compatibility
1990 * with older HDFS versions which did not include the checksum type in
1991 * OpBlockChecksumResponseProto.
1992 *
1993 * @param in input stream from datanode
1994 * @param out output stream to datanode
1995 * @param lb the located block
1996 * @param clientName the name of the DFSClient requesting the checksum
1997 * @param dn the connected datanode
1998 * @return the inferred checksum type
1999 * @throws IOException if an error occurs
2000 */
2001 private static Type inferChecksumTypeByReading(
2002 String clientName, SocketFactory socketFactory, int socketTimeout,
2003 LocatedBlock lb, DatanodeInfo dn,
2004 DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
2005 throws IOException {
2006 IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
2007 encryptionKey, dn, socketTimeout);
2008
2009 try {
2010 DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
2011 HdfsConstants.SMALL_BUFFER_SIZE));
2012 DataInputStream in = new DataInputStream(pair.in);
2013
2014 new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
2015 0, 1, true, CachingStrategy.newDefaultStrategy());
2016 final BlockOpResponseProto reply =
2017 BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
2018
2019 if (reply.getStatus() != Status.SUCCESS) {
2020 if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
2021 throw new InvalidBlockTokenException();
2022 } else {
2023 throw new IOException("Bad response " + reply + " trying to read "
2024 + lb.getBlock() + " from datanode " + dn);
2025 }
2026 }
2027
2028 return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
2029 } finally {
2030 IOUtils.cleanup(null, pair.in, pair.out);
2031 }
2032 }
2033
2034 /**
2035 * Set permissions to a file or directory.
2036 * @param src path name.
2037 * @param permission
2038 *
2039 * @see ClientProtocol#setPermission(String, FsPermission)
2040 */
2041 public void setPermission(String src, FsPermission permission)
2042 throws IOException {
2043 checkOpen();
2044 try {
2045 namenode.setPermission(src, permission);
2046 } catch(RemoteException re) {
2047 throw re.unwrapRemoteException(AccessControlException.class,
2048 FileNotFoundException.class,
2049 SafeModeException.class,
2050 UnresolvedPathException.class,
2051 SnapshotAccessControlException.class);
2052 }
2053 }
2054
2055 /**
2056 * Set file or directory owner.
2057 * @param src path name.
2058 * @param username user id.
2059 * @param groupname user group.
2060 *
2061 * @see ClientProtocol#setOwner(String, String, String)
2062 */
2063 public void setOwner(String src, String username, String groupname)
2064 throws IOException {
2065 checkOpen();
2066 try {
2067 namenode.setOwner(src, username, groupname);
2068 } catch(RemoteException re) {
2069 throw re.unwrapRemoteException(AccessControlException.class,
2070 FileNotFoundException.class,
2071 SafeModeException.class,
2072 UnresolvedPathException.class,
2073 SnapshotAccessControlException.class);
2074 }
2075 }
2076
2077 /**
2078 * @see ClientProtocol#getStats()
2079 */
2080 public FsStatus getDiskStatus() throws IOException {
2081 long rawNums[] = namenode.getStats();
2082 return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
2083 }
2084
2085 /**
2086 * Returns count of blocks with no good replicas left. Normally should be
2087 * zero.
2088 * @throws IOException
2089 */
2090 public long getMissingBlocksCount() throws IOException {
2091 return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
2092 }
2093
2094 /**
2095 * Returns count of blocks with one of more replica missing.
2096 * @throws IOException
2097 */
2098 public long getUnderReplicatedBlocksCount() throws IOException {
2099 return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
2100 }
2101
2102 /**
2103 * Returns count of blocks with at least one replica marked corrupt.
2104 * @throws IOException
2105 */
2106 public long getCorruptBlocksCount() throws IOException {
2107 return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
2108 }
2109
2110 /**
2111 * @return a list in which each entry describes a corrupt file/block
2112 * @throws IOException
2113 */
2114 public CorruptFileBlocks listCorruptFileBlocks(String path,
2115 String cookie)
2116 throws IOException {
2117 return namenode.listCorruptFileBlocks(path, cookie);
2118 }
2119
2120 public DatanodeInfo[] datanodeReport(DatanodeReportType type)
2121 throws IOException {
2122 return namenode.getDatanodeReport(type);
2123 }
2124
2125 /**
2126 * Enter, leave or get safe mode.
2127 *
2128 * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
2129 */
2130 public boolean setSafeMode(SafeModeAction action) throws IOException {
2131 return setSafeMode(action, false);
2132 }
2133
2134 /**
2135 * Enter, leave or get safe mode.
2136 *
2137 * @param action
2138 * One of SafeModeAction.GET, SafeModeAction.ENTER and
2139 * SafeModeActiob.LEAVE
2140 * @param isChecked
2141 * If true, then check only active namenode's safemode status, else
2142 * check first namenode's status.
2143 * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
2144 */
2145 public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
2146 return namenode.setSafeMode(action, isChecked);
2147 }
2148
2149 /**
2150 * Create one snapshot.
2151 *
2152 * @param snapshotRoot The directory where the snapshot is to be taken
2153 * @param snapshotName Name of the snapshot
2154 * @return the snapshot path.
2155 * @see ClientProtocol#createSnapshot(String, String)
2156 */
2157 public String createSnapshot(String snapshotRoot, String snapshotName)
2158 throws IOException {
2159 checkOpen();
2160 try {
2161 return namenode.createSnapshot(snapshotRoot, snapshotName);
2162 } catch(RemoteException re) {
2163 throw re.unwrapRemoteException();
2164 }
2165 }
2166
2167 /**
2168 * Delete a snapshot of a snapshottable directory.
2169 *
2170 * @param snapshotRoot The snapshottable directory that the
2171 * to-be-deleted snapshot belongs to
2172 * @param snapshotName The name of the to-be-deleted snapshot
2173 * @throws IOException
2174 * @see ClientProtocol#deleteSnapshot(String, String)
2175 */
2176 public void deleteSnapshot(String snapshotRoot, String snapshotName)
2177 throws IOException {
2178 try {
2179 namenode.deleteSnapshot(snapshotRoot, snapshotName);
2180 } catch(RemoteException re) {
2181 throw re.unwrapRemoteException();
2182 }
2183 }
2184
2185 /**
2186 * Rename a snapshot.
2187 * @param snapshotDir The directory path where the snapshot was taken
2188 * @param snapshotOldName Old name of the snapshot
2189 * @param snapshotNewName New name of the snapshot
2190 * @throws IOException
2191 * @see ClientProtocol#renameSnapshot(String, String, String)
2192 */
2193 public void renameSnapshot(String snapshotDir, String snapshotOldName,
2194 String snapshotNewName) throws IOException {
2195 checkOpen();
2196 try {
2197 namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
2198 } catch(RemoteException re) {
2199 throw re.unwrapRemoteException();
2200 }
2201 }
2202
2203 /**
2204 * Get all the current snapshottable directories.
2205 * @return All the current snapshottable directories
2206 * @throws IOException
2207 * @see ClientProtocol#getSnapshottableDirListing()
2208 */
2209 public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
2210 throws IOException {
2211 checkOpen();
2212 try {
2213 return namenode.getSnapshottableDirListing();
2214 } catch(RemoteException re) {
2215 throw re.unwrapRemoteException();
2216 }
2217 }
2218
2219 /**
2220 * Allow snapshot on a directory.
2221 *
2222 * @see ClientProtocol#allowSnapshot(String snapshotRoot)
2223 */
2224 public void allowSnapshot(String snapshotRoot) throws IOException {
2225 checkOpen();
2226 try {
2227 namenode.allowSnapshot(snapshotRoot);
2228 } catch (RemoteException re) {
2229 throw re.unwrapRemoteException();
2230 }
2231 }
2232
2233 /**
2234 * Disallow snapshot on a directory.
2235 *
2236 * @see ClientProtocol#disallowSnapshot(String snapshotRoot)
2237 */
2238 public void disallowSnapshot(String snapshotRoot) throws IOException {
2239 checkOpen();
2240 try {
2241 namenode.disallowSnapshot(snapshotRoot);
2242 } catch (RemoteException re) {
2243 throw re.unwrapRemoteException();
2244 }
2245 }
2246
2247 /**
2248 * Get the difference between two snapshots, or between a snapshot and the
2249 * current tree of a directory.
2250 * @see ClientProtocol#getSnapshotDiffReport(String, String, String)
2251 */
2252 public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
2253 String fromSnapshot, String toSnapshot) throws IOException {
2254 checkOpen();
2255 try {
2256 return namenode.getSnapshotDiffReport(snapshotDir,
2257 fromSnapshot, toSnapshot);
2258 } catch(RemoteException re) {
2259 throw re.unwrapRemoteException();
2260 }
2261 }
2262
2263 /**
2264 * Save namespace image.
2265 *
2266 * @see ClientProtocol#saveNamespace()
2267 */
2268 void saveNamespace() throws AccessControlException, IOException {
2269 try {
2270 namenode.saveNamespace();
2271 } catch(RemoteException re) {
2272 throw re.unwrapRemoteException(AccessControlException.class);
2273 }
2274 }
2275
2276 /**
2277 * Rolls the edit log on the active NameNode.
2278 * @return the txid of the new log segment
2279 *
2280 * @see ClientProtocol#rollEdits()
2281 */
2282 long rollEdits() throws AccessControlException, IOException {
2283 try {
2284 return namenode.rollEdits();
2285 } catch(RemoteException re) {
2286 throw re.unwrapRemoteException(AccessControlException.class);
2287 }
2288 }
2289
2290 /**
2291 * enable/disable restore failed storage.
2292 *
2293 * @see ClientProtocol#restoreFailedStorage(String arg)
2294 */
2295 boolean restoreFailedStorage(String arg)
2296 throws AccessControlException, IOException{
2297 return namenode.restoreFailedStorage(arg);
2298 }
2299
2300 /**
2301 * Refresh the hosts and exclude files. (Rereads them.)
2302 * See {@link ClientProtocol#refreshNodes()}
2303 * for more details.
2304 *
2305 * @see ClientProtocol#refreshNodes()
2306 */
2307 public void refreshNodes() throws IOException {
2308 namenode.refreshNodes();
2309 }
2310
2311 /**
2312 * Dumps DFS data structures into specified file.
2313 *
2314 * @see ClientProtocol#metaSave(String)
2315 */
2316 public void metaSave(String pathname) throws IOException {
2317 namenode.metaSave(pathname);
2318 }
2319
2320 /**
2321 * Requests the namenode to tell all datanodes to use a new, non-persistent
2322 * bandwidth value for dfs.balance.bandwidthPerSec.
2323 * See {@link ClientProtocol#setBalancerBandwidth(long)}
2324 * for more details.
2325 *
2326 * @see ClientProtocol#setBalancerBandwidth(long)
2327 */
2328 public void setBalancerBandwidth(long bandwidth) throws IOException {
2329 namenode.setBalancerBandwidth(bandwidth);
2330 }
2331
2332 /**
2333 * @see ClientProtocol#finalizeUpgrade()
2334 */
2335 public void finalizeUpgrade() throws IOException {
2336 namenode.finalizeUpgrade();
2337 }
2338
2339 /**
2340 */
2341 @Deprecated
2342 public boolean mkdirs(String src) throws IOException {
2343 return mkdirs(src, null, true);
2344 }
2345
2346 /**
2347 * Create a directory (or hierarchy of directories) with the given
2348 * name and permission.
2349 *
2350 * @param src The path of the directory being created
2351 * @param permission The permission of the directory being created.
2352 * If permission == null, use {@link FsPermission#getDefault()}.
2353 * @param createParent create missing parent directory if true
2354 *
2355 * @return True if the operation success.
2356 *
2357 * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
2358 */
2359 public boolean mkdirs(String src, FsPermission permission,
2360 boolean createParent) throws IOException {
2361 if (permission == null) {
2362 permission = FsPermission.getDefault();
2363 }
2364 FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
2365 return primitiveMkdir(src, masked, createParent);
2366 }
2367
2368 /**
2369 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2370 * that the permissions has already been masked against umask.
2371 */
2372 public boolean primitiveMkdir(String src, FsPermission absPermission)
2373 throws IOException {
2374 return primitiveMkdir(src, absPermission, true);
2375 }
2376
2377 /**
2378 * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2379 * that the permissions has already been masked against umask.
2380 */
2381 public boolean primitiveMkdir(String src, FsPermission absPermission,
2382 boolean createParent)
2383 throws IOException {
2384 checkOpen();
2385 if (absPermission == null) {
2386 absPermission =
2387 FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
2388 }
2389
2390 if(LOG.isDebugEnabled()) {
2391 LOG.debug(src + ": masked=" + absPermission);
2392 }
2393 try {
2394 return namenode.mkdirs(src, absPermission, createParent);
2395 } catch(RemoteException re) {
2396 throw re.unwrapRemoteException(AccessControlException.class,
2397 InvalidPathException.class,
2398 FileAlreadyExistsException.class,
2399 FileNotFoundException.class,
2400 ParentNotDirectoryException.class,
2401 SafeModeException.class,
2402 NSQuotaExceededException.class,
2403 DSQuotaExceededException.class,
2404 UnresolvedPathException.class,
2405 SnapshotAccessControlException.class);
2406 }
2407 }
2408
2409 /**
2410 * Get {@link ContentSummary} rooted at the specified directory.
2411 * @param path The string representation of the path
2412 *
2413 * @see ClientProtocol#getContentSummary(String)
2414 */
2415 ContentSummary getContentSummary(String src) throws IOException {
2416 try {
2417 return namenode.getContentSummary(src);
2418 } catch(RemoteException re) {
2419 throw re.unwrapRemoteException(AccessControlException.class,
2420 FileNotFoundException.class,
2421 UnresolvedPathException.class);
2422 }
2423 }
2424
2425 /**
2426 * Sets or resets quotas for a directory.
2427 * @see ClientProtocol#setQuota(String, long, long)
2428 */
2429 void setQuota(String src, long namespaceQuota, long diskspaceQuota)
2430 throws IOException {
2431 // sanity check
2432 if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2433 namespaceQuota != HdfsConstants.QUOTA_RESET) ||
2434 (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2435 diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
2436 throw new IllegalArgumentException("Invalid values for quota : " +
2437 namespaceQuota + " and " +
2438 diskspaceQuota);
2439
2440 }
2441 try {
2442 namenode.setQuota(src, namespaceQuota, diskspaceQuota);
2443 } catch(RemoteException re) {
2444 throw re.unwrapRemoteException(AccessControlException.class,
2445 FileNotFoundException.class,
2446 NSQuotaExceededException.class,
2447 DSQuotaExceededException.class,
2448 UnresolvedPathException.class,
2449 SnapshotAccessControlException.class);
2450 }
2451 }
2452
2453 /**
2454 * set the modification and access time of a file
2455 *
2456 * @see ClientProtocol#setTimes(String, long, long)
2457 */
2458 public void setTimes(String src, long mtime, long atime) throws IOException {
2459 checkOpen();
2460 try {
2461 namenode.setTimes(src, mtime, atime);
2462 } catch(RemoteException re) {
2463 throw re.unwrapRemoteException(AccessControlException.class,
2464 FileNotFoundException.class,
2465 UnresolvedPathException.class,
2466 SnapshotAccessControlException.class);
2467 }
2468 }
2469
2470 /**
2471 * @deprecated use {@link HdfsDataInputStream} instead.
2472 */
2473 @Deprecated
2474 public static class DFSDataInputStream extends HdfsDataInputStream {
2475
2476 public DFSDataInputStream(DFSInputStream in) throws IOException {
2477 super(in);
2478 }
2479 }
2480
2481 void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
2482 DatanodeInfo [] dnArr = { dn };
2483 LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
2484 reportChecksumFailure(file, lblocks);
2485 }
2486
2487 // just reports checksum failure and ignores any exception during the report.
2488 void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
2489 try {
2490 reportBadBlocks(lblocks);
2491 } catch (IOException ie) {
2492 LOG.info("Found corruption while reading " + file
2493 + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
2494 }
2495 }
2496
2497 @Override
2498 public String toString() {
2499 return getClass().getSimpleName() + "[clientName=" + clientName
2500 + ", ugi=" + ugi + "]";
2501 }
2502
2503 public DomainSocketFactory getDomainSocketFactory() {
2504 return domainSocketFactory;
2505 }
2506
2507 public void disableLegacyBlockReaderLocal() {
2508 shouldUseLegacyBlockReaderLocal = false;
2509 }
2510
2511 public boolean useLegacyBlockReaderLocal() {
2512 return shouldUseLegacyBlockReaderLocal;
2513 }
2514
2515 public CachingStrategy getDefaultReadCachingStrategy() {
2516 return defaultReadCachingStrategy;
2517 }
2518
2519 public CachingStrategy getDefaultWriteCachingStrategy() {
2520 return defaultWriteCachingStrategy;
2521 }
2522 }