001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.hdfs;
020    
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
036    
037    import java.io.IOException;
038    import java.io.PrintStream;
039    import java.io.UnsupportedEncodingException;
040    import java.net.InetAddress;
041    import java.net.InetSocketAddress;
042    import java.net.URI;
043    import java.net.URISyntaxException;
044    import java.security.SecureRandom;
045    import java.text.SimpleDateFormat;
046    import java.util.ArrayList;
047    import java.util.Arrays;
048    import java.util.Collection;
049    import java.util.Collections;
050    import java.util.Comparator;
051    import java.util.Date;
052    import java.util.HashSet;
053    import java.util.List;
054    import java.util.Locale;
055    import java.util.Map;
056    import java.util.Random;
057    import java.util.Set;
058    
059    import javax.net.SocketFactory;
060    
061    import org.apache.commons.cli.CommandLine;
062    import org.apache.commons.cli.CommandLineParser;
063    import org.apache.commons.cli.Option;
064    import org.apache.commons.cli.Options;
065    import org.apache.commons.cli.ParseException;
066    import org.apache.commons.cli.PosixParser;
067    import org.apache.commons.logging.Log;
068    import org.apache.commons.logging.LogFactory;
069    import org.apache.hadoop.HadoopIllegalArgumentException;
070    import org.apache.hadoop.classification.InterfaceAudience;
071    import org.apache.hadoop.conf.Configuration;
072    import org.apache.hadoop.fs.BlockLocation;
073    import org.apache.hadoop.fs.CommonConfigurationKeys;
074    import org.apache.hadoop.fs.FileSystem;
075    import org.apache.hadoop.fs.Path;
076    import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
077    import org.apache.hadoop.hdfs.protocol.DatanodeID;
078    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
079    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
080    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
081    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
082    import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
083    import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
084    import org.apache.hadoop.hdfs.server.namenode.NameNode;
085    import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
086    import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
087    import org.apache.hadoop.http.HttpConfig;
088    import org.apache.hadoop.http.HttpServer2;
089    import org.apache.hadoop.ipc.ProtobufRpcEngine;
090    import org.apache.hadoop.ipc.RPC;
091    import org.apache.hadoop.net.NetUtils;
092    import org.apache.hadoop.net.NodeBase;
093    import org.apache.hadoop.security.SecurityUtil;
094    import org.apache.hadoop.security.UserGroupInformation;
095    import org.apache.hadoop.security.authorize.AccessControlList;
096    import org.apache.hadoop.util.StringUtils;
097    import org.apache.hadoop.util.ToolRunner;
098    
099    import com.google.common.annotations.VisibleForTesting;
100    import com.google.common.base.Charsets;
101    import com.google.common.base.Joiner;
102    import com.google.common.base.Preconditions;
103    import com.google.common.collect.Lists;
104    import com.google.common.collect.Maps;
105    import com.google.common.primitives.SignedBytes;
106    import com.google.protobuf.BlockingService;
107    
108    @InterfaceAudience.Private
109    public class DFSUtil {
110      public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
111      
112      public static final byte[] EMPTY_BYTES = {};
113    
114      /** Compare two byte arrays by lexicographical order. */
115      public static int compareBytes(byte[] left, byte[] right) {
116        if (left == null) {
117          left = EMPTY_BYTES;
118        }
119        if (right == null) {
120          right = EMPTY_BYTES;
121        }
122        return SignedBytes.lexicographicalComparator().compare(left, right);
123      }
124    
125      private DFSUtil() { /* Hidden constructor */ }
126      private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
127        @Override
128        protected Random initialValue() {
129          return new Random();
130        }
131      };
132      
133      private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
134        @Override
135        protected SecureRandom initialValue() {
136          return new SecureRandom();
137        }
138      };
139    
140      /** @return a pseudo random number generator. */
141      public static Random getRandom() {
142        return RANDOM.get();
143      }
144      
145      /** @return a pseudo secure random number generator. */
146      public static SecureRandom getSecureRandom() {
147        return SECURE_RANDOM.get();
148      }
149    
150      /** Shuffle the elements in the given array. */
151      public static <T> T[] shuffle(final T[] array) {
152        if (array != null && array.length > 0) {
153          final Random random = getRandom();
154          for (int n = array.length; n > 1; ) {
155            final int randomIndex = random.nextInt(n);
156            n--;
157            if (n != randomIndex) {
158              final T tmp = array[randomIndex];
159              array[randomIndex] = array[n];
160              array[n] = tmp;
161            }
162          }
163        }
164        return array;
165      }
166    
167      /**
168       * Compartor for sorting DataNodeInfo[] based on decommissioned states.
169       * Decommissioned nodes are moved to the end of the array on sorting with
170       * this compartor.
171       */
172      public static final Comparator<DatanodeInfo> DECOM_COMPARATOR = 
173        new Comparator<DatanodeInfo>() {
174          @Override
175          public int compare(DatanodeInfo a, DatanodeInfo b) {
176            return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
177              a.isDecommissioned() ? 1 : -1;
178          }
179        };
180        
181          
182      /**
183       * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
184       * Decommissioned/stale nodes are moved to the end of the array on sorting
185       * with this comparator.
186       */ 
187      @InterfaceAudience.Private 
188      public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
189        private final long staleInterval;
190    
191        /**
192         * Constructor of DecomStaleComparator
193         * 
194         * @param interval
195         *          The time interval for marking datanodes as stale is passed from
196         *          outside, since the interval may be changed dynamically
197         */
198        public DecomStaleComparator(long interval) {
199          this.staleInterval = interval;
200        }
201    
202        @Override
203        public int compare(DatanodeInfo a, DatanodeInfo b) {
204          // Decommissioned nodes will still be moved to the end of the list
205          if (a.isDecommissioned()) {
206            return b.isDecommissioned() ? 0 : 1;
207          } else if (b.isDecommissioned()) {
208            return -1;
209          }
210          // Stale nodes will be moved behind the normal nodes
211          boolean aStale = a.isStale(staleInterval);
212          boolean bStale = b.isStale(staleInterval);
213          return aStale == bStale ? 0 : (aStale ? 1 : -1);
214        }
215      }    
216        
217      /**
218       * Address matcher for matching an address to local address
219       */
220      static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
221        @Override
222        public boolean match(InetSocketAddress s) {
223          return NetUtils.isLocalAddress(s.getAddress());
224        };
225      };
226      
227      /**
228       * Whether the pathname is valid.  Currently prohibits relative paths, 
229       * names which contain a ":" or "//", or other non-canonical paths.
230       */
231      public static boolean isValidName(String src) {
232        // Path must be absolute.
233        if (!src.startsWith(Path.SEPARATOR)) {
234          return false;
235        }
236          
237        // Check for ".." "." ":" "/"
238        String[] components = StringUtils.split(src, '/');
239        for (int i = 0; i < components.length; i++) {
240          String element = components[i];
241          if (element.equals(".")  ||
242              (element.indexOf(":") >= 0)  ||
243              (element.indexOf("/") >= 0)) {
244            return false;
245          }
246          // ".." is allowed in path starting with /.reserved/.inodes
247          if (element.equals("..")) {
248            if (components.length > 4
249                && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
250                && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
251              continue;
252            }
253            return false;
254          }
255          // The string may start or end with a /, but not have
256          // "//" in the middle.
257          if (element.isEmpty() && i != components.length - 1 &&
258              i != 0) {
259            return false;
260          }
261        }
262        return true;
263      }
264    
265      /**
266       * Checks if a string is a valid path component. For instance, components
267       * cannot contain a ":" or "/", and cannot be equal to a reserved component
268       * like ".snapshot".
269       * <p>
270       * The primary use of this method is for validating paths when loading the
271       * FSImage. During normal NN operation, paths are sometimes allowed to
272       * contain reserved components.
273       * 
274       * @return If component is valid
275       */
276      public static boolean isValidNameForComponent(String component) {
277        if (component.equals(".") ||
278            component.equals("..") ||
279            component.indexOf(":") >= 0 ||
280            component.indexOf("/") >= 0) {
281          return false;
282        }
283        return !isReservedPathComponent(component);
284      }
285    
286    
287      /**
288       * Returns if the component is reserved.
289       * 
290       * <p>
291       * Note that some components are only reserved under certain directories, e.g.
292       * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
293       * 
294       * @param component
295       * @return if the component is reserved
296       */
297      public static boolean isReservedPathComponent(String component) {
298        for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
299          if (component.equals(reserved)) {
300            return true;
301          }
302        }
303        return false;
304      }
305    
306      /**
307       * Converts a byte array to a string using UTF8 encoding.
308       */
309      public static String bytes2String(byte[] bytes) {
310        return bytes2String(bytes, 0, bytes.length);
311      }
312      
313      /**
314       * Decode a specific range of bytes of the given byte array to a string
315       * using UTF8.
316       * 
317       * @param bytes The bytes to be decoded into characters
318       * @param offset The index of the first byte to decode
319       * @param length The number of bytes to decode
320       * @return The decoded string
321       */
322      public static String bytes2String(byte[] bytes, int offset, int length) {
323        try {
324          return new String(bytes, offset, length, "UTF8");
325        } catch(UnsupportedEncodingException e) {
326          assert false : "UTF8 encoding is not supported ";
327        }
328        return null;
329      }
330    
331      /**
332       * Converts a string to a byte array using UTF8 encoding.
333       */
334      public static byte[] string2Bytes(String str) {
335        return str.getBytes(Charsets.UTF_8);
336      }
337    
338      /**
339       * Given a list of path components returns a path as a UTF8 String
340       */
341      public static String byteArray2PathString(byte[][] pathComponents) {
342        if (pathComponents.length == 0) {
343          return "";
344        } else if (pathComponents.length == 1
345            && (pathComponents[0] == null || pathComponents[0].length == 0)) {
346          return Path.SEPARATOR;
347        }
348        StringBuilder result = new StringBuilder();
349        for (int i = 0; i < pathComponents.length; i++) {
350          result.append(new String(pathComponents[i], Charsets.UTF_8));
351          if (i < pathComponents.length - 1) {
352            result.append(Path.SEPARATOR_CHAR);
353          }
354        }
355        return result.toString();
356      }
357    
358      /**
359       * Converts a list of path components into a path using Path.SEPARATOR.
360       * 
361       * @param components Path components
362       * @return Combined path as a UTF-8 string
363       */
364      public static String strings2PathString(String[] components) {
365        if (components.length == 0) {
366          return "";
367        }
368        if (components.length == 1) {
369          if (components[0] == null || components[0].isEmpty()) {
370            return Path.SEPARATOR;
371          }
372        }
373        return Joiner.on(Path.SEPARATOR).join(components);
374      }
375    
376      /**
377       * Given a list of path components returns a byte array
378       */
379      public static byte[] byteArray2bytes(byte[][] pathComponents) {
380        if (pathComponents.length == 0) {
381          return EMPTY_BYTES;
382        } else if (pathComponents.length == 1
383            && (pathComponents[0] == null || pathComponents[0].length == 0)) {
384          return new byte[]{(byte) Path.SEPARATOR_CHAR};
385        }
386        int length = 0;
387        for (int i = 0; i < pathComponents.length; i++) {
388          length += pathComponents[i].length;
389          if (i < pathComponents.length - 1) {
390            length++; // for SEPARATOR
391          }
392        }
393        byte[] path = new byte[length];
394        int index = 0;
395        for (int i = 0; i < pathComponents.length; i++) {
396          System.arraycopy(pathComponents[i], 0, path, index,
397              pathComponents[i].length);
398          index += pathComponents[i].length;
399          if (i < pathComponents.length - 1) {
400            path[index] = (byte) Path.SEPARATOR_CHAR;
401            index++;
402          }
403        }
404        return path;
405      }
406    
407      /** Convert an object representing a path to a string. */
408      public static String path2String(final Object path) {
409        return path == null? null
410            : path instanceof String? (String)path
411            : path instanceof byte[][]? byteArray2PathString((byte[][])path)
412            : path.toString();
413      }
414    
415      /**
416       * Splits the array of bytes into array of arrays of bytes
417       * on byte separator
418       * @param bytes the array of bytes to split
419       * @param separator the delimiting byte
420       */
421      public static byte[][] bytes2byteArray(byte[] bytes, byte separator) {
422        return bytes2byteArray(bytes, bytes.length, separator);
423      }
424    
425      /**
426       * Splits first len bytes in bytes to array of arrays of bytes
427       * on byte separator
428       * @param bytes the byte array to split
429       * @param len the number of bytes to split
430       * @param separator the delimiting byte
431       */
432      public static byte[][] bytes2byteArray(byte[] bytes,
433                                             int len,
434                                             byte separator) {
435        assert len <= bytes.length;
436        int splits = 0;
437        if (len == 0) {
438          return new byte[][]{null};
439        }
440        // Count the splits. Omit multiple separators and the last one
441        for (int i = 0; i < len; i++) {
442          if (bytes[i] == separator) {
443            splits++;
444          }
445        }
446        int last = len - 1;
447        while (last > -1 && bytes[last--] == separator) {
448          splits--;
449        }
450        if (splits == 0 && bytes[0] == separator) {
451          return new byte[][]{null};
452        }
453        splits++;
454        byte[][] result = new byte[splits][];
455        int startIndex = 0;
456        int nextIndex = 0;
457        int index = 0;
458        // Build the splits
459        while (index < splits) {
460          while (nextIndex < len && bytes[nextIndex] != separator) {
461            nextIndex++;
462          }
463          result[index] = new byte[nextIndex - startIndex];
464          System.arraycopy(bytes, startIndex, result[index], 0, nextIndex
465                  - startIndex);
466          index++;
467          startIndex = nextIndex + 1;
468          nextIndex = startIndex;
469        }
470        return result;
471      }
472      
473      /**
474       * Convert a LocatedBlocks to BlockLocations[]
475       * @param blocks a LocatedBlocks
476       * @return an array of BlockLocations
477       */
478      public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
479        if (blocks == null) {
480          return new BlockLocation[0];
481        }
482        return locatedBlocks2Locations(blocks.getLocatedBlocks());
483      }
484      
485      /**
486       * Convert a List<LocatedBlock> to BlockLocation[]
487       * @param blocks A List<LocatedBlock> to be converted
488       * @return converted array of BlockLocation
489       */
490      public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
491        if (blocks == null) {
492          return new BlockLocation[0];
493        }
494        int nrBlocks = blocks.size();
495        BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
496        if (nrBlocks == 0) {
497          return blkLocations;
498        }
499        int idx = 0;
500        for (LocatedBlock blk : blocks) {
501          assert idx < nrBlocks : "Incorrect index";
502          DatanodeInfo[] locations = blk.getLocations();
503          String[] hosts = new String[locations.length];
504          String[] xferAddrs = new String[locations.length];
505          String[] racks = new String[locations.length];
506          for (int hCnt = 0; hCnt < locations.length; hCnt++) {
507            hosts[hCnt] = locations[hCnt].getHostName();
508            xferAddrs[hCnt] = locations[hCnt].getXferAddr();
509            NodeBase node = new NodeBase(xferAddrs[hCnt], 
510                                         locations[hCnt].getNetworkLocation());
511            racks[hCnt] = node.toString();
512          }
513          DatanodeInfo[] cachedLocations = blk.getCachedLocations();
514          String[] cachedHosts = new String[cachedLocations.length];
515          for (int i=0; i<cachedLocations.length; i++) {
516            cachedHosts[i] = cachedLocations[i].getHostName();
517          }
518          blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
519                                                racks,
520                                                blk.getStartOffset(),
521                                                blk.getBlockSize(),
522                                                blk.isCorrupt());
523          idx++;
524        }
525        return blkLocations;
526      }
527    
528      /**
529       * Returns collection of nameservice Ids from the configuration.
530       * @param conf configuration
531       * @return collection of nameservice Ids, or null if not specified
532       */
533      public static Collection<String> getNameServiceIds(Configuration conf) {
534        return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
535      }
536    
537      /**
538       * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
539       * returns a list with a single null value.
540       */
541      private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
542        if (coll == null || coll.isEmpty()) {
543          return Collections.singletonList(null);
544        } else {
545          return coll;
546        }
547      }
548      
549      /**
550       * Namenode HighAvailability related configuration.
551       * Returns collection of namenode Ids from the configuration. One logical id
552       * for each namenode in the in the HA setup.
553       * 
554       * @param conf configuration
555       * @param nsId the nameservice ID to look at, or null for non-federated 
556       * @return collection of namenode Ids
557       */
558      public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
559        String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
560        return conf.getTrimmedStringCollection(key);
561      }
562      
563      /**
564       * Given a list of keys in the order of preference, returns a value
565       * for the key in the given order from the configuration.
566       * @param defaultValue default value to return, when key was not found
567       * @param keySuffix suffix to add to the key, if it is not null
568       * @param conf Configuration
569       * @param keys list of keys in the order of preference
570       * @return value of the key or default if a key was not found in configuration
571       */
572      private static String getConfValue(String defaultValue, String keySuffix,
573          Configuration conf, String... keys) {
574        String value = null;
575        for (String key : keys) {
576          key = addSuffix(key, keySuffix);
577          value = conf.get(key);
578          if (value != null) {
579            break;
580          }
581        }
582        if (value == null) {
583          value = defaultValue;
584        }
585        return value;
586      }
587      
588      /** Add non empty and non null suffix to a key */
589      private static String addSuffix(String key, String suffix) {
590        if (suffix == null || suffix.isEmpty()) {
591          return key;
592        }
593        assert !suffix.startsWith(".") :
594          "suffix '" + suffix + "' should not already have '.' prepended.";
595        return key + "." + suffix;
596      }
597      
598      /** Concatenate list of suffix strings '.' separated */
599      private static String concatSuffixes(String... suffixes) {
600        if (suffixes == null) {
601          return null;
602        }
603        return Joiner.on(".").skipNulls().join(suffixes);
604      }
605      
606      /**
607       * Return configuration key of format key.suffix1.suffix2...suffixN
608       */
609      public static String addKeySuffixes(String key, String... suffixes) {
610        String keySuffix = concatSuffixes(suffixes);
611        return addSuffix(key, keySuffix);
612      }
613      
614      /**
615       * Returns the configured address for all NameNodes in the cluster.
616       * @param conf configuration
617       * @param defaultAddress default address to return in case key is not found.
618       * @param keys Set of keys to look for in the order of preference
619       * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
620       */
621      private static Map<String, Map<String, InetSocketAddress>>
622        getAddresses(Configuration conf,
623          String defaultAddress, String... keys) {
624        Collection<String> nameserviceIds = getNameServiceIds(conf);
625        
626        // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
627        // across all of the configured nameservices and namenodes.
628        Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
629        for (String nsId : emptyAsSingletonNull(nameserviceIds)) {
630          Map<String, InetSocketAddress> isas =
631            getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
632          if (!isas.isEmpty()) {
633            ret.put(nsId, isas);
634          }
635        }
636        return ret;
637      }
638      
639      /**
640       * Get all of the RPC addresses of the individual NNs in a given nameservice.
641       * 
642       * @param conf Configuration
643       * @param nsId the nameservice whose NNs addresses we want.
644       * @param defaultValue default address to return in case key is not found.
645       * @return A map from nnId -> RPC address of each NN in the nameservice.
646       */
647      public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
648          Configuration conf, String nsId, String defaultValue) {
649        return getAddressesForNameserviceId(conf, nsId, defaultValue,
650            DFS_NAMENODE_RPC_ADDRESS_KEY);
651      }
652    
653      private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
654          Configuration conf, String nsId, String defaultValue,
655          String... keys) {
656        Collection<String> nnIds = getNameNodeIds(conf, nsId);
657        Map<String, InetSocketAddress> ret = Maps.newHashMap();
658        for (String nnId : emptyAsSingletonNull(nnIds)) {
659          String suffix = concatSuffixes(nsId, nnId);
660          String address = getConfValue(defaultValue, suffix, conf, keys);
661          if (address != null) {
662            InetSocketAddress isa = NetUtils.createSocketAddr(address);
663            if (isa.isUnresolved()) {
664              LOG.warn("Namenode for " + nsId +
665                       " remains unresolved for ID " + nnId +
666                       ".  Check your hdfs-site.xml file to " +
667                       "ensure namenodes are configured properly.");
668            }
669            ret.put(nnId, isa);
670          }
671        }
672        return ret;
673      }
674    
675      /**
676       * @return a collection of all configured NN Kerberos principals.
677       */
678      public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
679        Set<String> principals = new HashSet<String>();
680        for (String nsId : DFSUtil.getNameServiceIds(conf)) {
681          if (HAUtil.isHAEnabled(conf, nsId)) {
682            for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
683              Configuration confForNn = new Configuration(conf);
684              NameNode.initializeGenericKeys(confForNn, nsId, nnId);
685              String principal = SecurityUtil.getServerPrincipal(confForNn
686                  .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
687                  NameNode.getAddress(confForNn).getHostName());
688              principals.add(principal);
689            }
690          } else {
691            Configuration confForNn = new Configuration(conf);
692            NameNode.initializeGenericKeys(confForNn, nsId, null);
693            String principal = SecurityUtil.getServerPrincipal(confForNn
694                .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
695                NameNode.getAddress(confForNn).getHostName());
696            principals.add(principal);
697          }
698        }
699    
700        return principals;
701      }
702    
703      /**
704       * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
705       * the configuration.
706       * 
707       * @param conf configuration
708       * @return list of InetSocketAddresses
709       */
710      public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
711          Configuration conf) {
712        return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
713      }
714    
715      /**
716       * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
717       * the configuration.
718       *
719       * @return list of InetSocketAddresses
720       */
721      public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
722          Configuration conf, String scheme) {
723        if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
724          return getAddresses(conf, null,
725              DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
726        } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
727          return getAddresses(conf, null,
728              DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
729        } else {
730          throw new IllegalArgumentException("Unsupported scheme: " + scheme);
731        }
732      }
733    
734      /**
735       * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver
736       * when the URL points to an non-HA cluster. When the URL points to an HA
737       * cluster, the resolver further resolves the logical name (i.e., the authority
738       * in the URL) into real namenode addresses.
739       */
740      public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
741          throws IOException {
742        int defaultPort;
743        String scheme = uri.getScheme();
744        if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
745          defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
746        } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
747          defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
748        } else {
749          throw new IllegalArgumentException("Unsupported scheme: " + scheme);
750        }
751    
752        ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
753    
754        if (!HAUtil.isLogicalUri(conf, uri)) {
755          InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
756              defaultPort);
757          ret.add(addr);
758    
759        } else {
760          Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
761              .getHaNnWebHdfsAddresses(conf, scheme);
762          // Extract the entry corresponding to the logical name.
763          Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost());
764          for (InetSocketAddress addr : addrs.values()) {
765            ret.add(addr);
766          }
767        }
768    
769        InetSocketAddress[] r = new InetSocketAddress[ret.size()];
770        return ret.toArray(r);
771      }
772      
773      /**
774       * Returns list of InetSocketAddress corresponding to  backup node rpc 
775       * addresses from the configuration.
776       * 
777       * @param conf configuration
778       * @return list of InetSocketAddresses
779       * @throws IOException on error
780       */
781      public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
782          Configuration conf) throws IOException {
783        Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
784            null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
785        if (addressList.isEmpty()) {
786          throw new IOException("Incorrect configuration: backup node address "
787              + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
788        }
789        return addressList;
790      }
791    
792      /**
793       * Returns list of InetSocketAddresses of corresponding to secondary namenode
794       * http addresses from the configuration.
795       * 
796       * @param conf configuration
797       * @return list of InetSocketAddresses
798       * @throws IOException on error
799       */
800      public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
801          Configuration conf) throws IOException {
802        Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
803            DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
804        if (addressList.isEmpty()) {
805          throw new IOException("Incorrect configuration: secondary namenode address "
806              + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
807        }
808        return addressList;
809      }
810    
811      /**
812       * Returns list of InetSocketAddresses corresponding to namenodes from the
813       * configuration. Note this is to be used by datanodes to get the list of
814       * namenode addresses to talk to.
815       * 
816       * Returns namenode address specifically configured for datanodes (using
817       * service ports), if found. If not, regular RPC address configured for other
818       * clients is returned.
819       * 
820       * @param conf configuration
821       * @return list of InetSocketAddress
822       * @throws IOException on error
823       */
824      public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
825          Configuration conf) throws IOException {
826        // Use default address as fall back
827        String defaultAddress;
828        try {
829          defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
830        } catch (IllegalArgumentException e) {
831          defaultAddress = null;
832        }
833        
834        Map<String, Map<String, InetSocketAddress>> addressList =
835          getAddresses(conf, defaultAddress,
836            DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
837        if (addressList.isEmpty()) {
838          throw new IOException("Incorrect configuration: namenode address "
839              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
840              + DFS_NAMENODE_RPC_ADDRESS_KEY
841              + " is not configured.");
842        }
843        return addressList;
844      }
845      
846      /**
847       * Flatten the given map, as returned by other functions in this class,
848       * into a flat list of {@link ConfiguredNNAddress} instances.
849       */
850      public static List<ConfiguredNNAddress> flattenAddressMap(
851          Map<String, Map<String, InetSocketAddress>> map) {
852        List<ConfiguredNNAddress> ret = Lists.newArrayList();
853        
854        for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
855          map.entrySet()) {
856          String nsId = entry.getKey();
857          Map<String, InetSocketAddress> nnMap = entry.getValue();
858          for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
859            String nnId = e2.getKey();
860            InetSocketAddress addr = e2.getValue();
861            
862            ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
863          }
864        }
865        return ret;
866      }
867    
868      /**
869       * Format the given map, as returned by other functions in this class,
870       * into a string suitable for debugging display. The format of this string
871       * should not be considered an interface, and is liable to change.
872       */
873      public static String addressMapToString(
874          Map<String, Map<String, InetSocketAddress>> map) {
875        StringBuilder b = new StringBuilder();
876        for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
877             map.entrySet()) {
878          String nsId = entry.getKey();
879          Map<String, InetSocketAddress> nnMap = entry.getValue();
880          b.append("Nameservice <").append(nsId).append(">:").append("\n");
881          for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
882            b.append("  NN ID ").append(e2.getKey())
883              .append(" => ").append(e2.getValue()).append("\n");
884          }
885        }
886        return b.toString();
887      }
888      
889      public static String nnAddressesAsString(Configuration conf) {
890        Map<String, Map<String, InetSocketAddress>> addresses =
891          getHaNnRpcAddresses(conf);
892        return addressMapToString(addresses);
893      }
894    
895      /**
896       * Represent one of the NameNodes configured in the cluster.
897       */
898      public static class ConfiguredNNAddress {
899        private final String nameserviceId;
900        private final String namenodeId;
901        private final InetSocketAddress addr;
902    
903        private ConfiguredNNAddress(String nameserviceId, String namenodeId,
904            InetSocketAddress addr) {
905          this.nameserviceId = nameserviceId;
906          this.namenodeId = namenodeId;
907          this.addr = addr;
908        }
909    
910        public String getNameserviceId() {
911          return nameserviceId;
912        }
913    
914        public String getNamenodeId() {
915          return namenodeId;
916        }
917    
918        public InetSocketAddress getAddress() {
919          return addr;
920        }
921        
922        @Override
923        public String toString() {
924          return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" +
925            "nnId=" + namenodeId + ";addr=" + addr + "]";
926        }
927      }
928      
929      /**
930       * Get a URI for each configured nameservice. If a nameservice is
931       * HA-enabled, then the logical URI of the nameservice is returned. If the
932       * nameservice is not HA-enabled, then a URI corresponding to an RPC address
933       * of the single NN for that nameservice is returned, preferring the service
934       * RPC address over the client RPC address.
935       * 
936       * @param conf configuration
937       * @return a collection of all configured NN URIs, preferring service
938       *         addresses
939       */
940      public static Collection<URI> getNsServiceRpcUris(Configuration conf) {
941        return getNameServiceUris(conf,
942            DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
943            DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
944      }
945    
946      /**
947       * Get a URI for each configured nameservice. If a nameservice is
948       * HA-enabled, then the logical URI of the nameservice is returned. If the
949       * nameservice is not HA-enabled, then a URI corresponding to the address of
950       * the single NN for that nameservice is returned.
951       * 
952       * @param conf configuration
953       * @param keys configuration keys to try in order to get the URI for non-HA
954       *        nameservices
955       * @return a collection of all configured NN URIs
956       */
957      public static Collection<URI> getNameServiceUris(Configuration conf,
958          String... keys) {
959        Set<URI> ret = new HashSet<URI>();
960        
961        // We're passed multiple possible configuration keys for any given NN or HA
962        // nameservice, and search the config in order of these keys. In order to
963        // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
964        // URI for a config key for which we've already found a preferred entry, we
965        // keep track of non-preferred keys here.
966        Set<URI> nonPreferredUris = new HashSet<URI>();
967        
968        for (String nsId : getNameServiceIds(conf)) {
969          if (HAUtil.isHAEnabled(conf, nsId)) {
970            // Add the logical URI of the nameservice.
971            try {
972              ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId));
973            } catch (URISyntaxException ue) {
974              throw new IllegalArgumentException(ue);
975            }
976          } else {
977            // Add the URI corresponding to the address of the NN.
978            boolean uriFound = false;
979            for (String key : keys) {
980              String addr = conf.get(concatSuffixes(key, nsId));
981              if (addr != null) {
982                URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
983                    NetUtils.createSocketAddr(addr));
984                if (!uriFound) {
985                  uriFound = true;
986                  ret.add(uri);
987                } else {
988                  nonPreferredUris.add(uri);
989                }
990              }
991            }
992          }
993        }
994        
995        // Add the generic configuration keys.
996        boolean uriFound = false;
997        for (String key : keys) {
998          String addr = conf.get(key);
999          if (addr != null) {
1000            URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
1001            if (!uriFound) {
1002              uriFound = true;
1003              ret.add(uri);
1004            } else {
1005              nonPreferredUris.add(uri);
1006            }
1007          }
1008        }
1009        
1010        // Add the default URI if it is an HDFS URI.
1011        URI defaultUri = FileSystem.getDefaultUri(conf);
1012        // checks if defaultUri is ip:port format
1013        // and convert it to hostname:port format
1014        if (defaultUri != null && (defaultUri.getPort() != -1)) {
1015          defaultUri = createUri(defaultUri.getScheme(),
1016              NetUtils.createSocketAddr(defaultUri.getHost(), 
1017                  defaultUri.getPort()));
1018        }
1019        if (defaultUri != null &&
1020            HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
1021            !nonPreferredUris.contains(defaultUri)) {
1022          ret.add(defaultUri);
1023        }
1024        
1025        return ret;
1026      }
1027    
1028      /**
1029       * Given the InetSocketAddress this method returns the nameservice Id
1030       * corresponding to the key with matching address, by doing a reverse 
1031       * lookup on the list of nameservices until it finds a match.
1032       * 
1033       * Since the process of resolving URIs to Addresses is slightly expensive,
1034       * this utility method should not be used in performance-critical routines.
1035       * 
1036       * @param conf - configuration
1037       * @param address - InetSocketAddress for configured communication with NN.
1038       *     Configured addresses are typically given as URIs, but we may have to
1039       *     compare against a URI typed in by a human, or the server name may be
1040       *     aliased, so we compare unambiguous InetSocketAddresses instead of just
1041       *     comparing URI substrings.
1042       * @param keys - list of configured communication parameters that should
1043       *     be checked for matches.  For example, to compare against RPC addresses,
1044       *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1045       *     DFS_NAMENODE_RPC_ADDRESS_KEY.  Use the generic parameter keys,
1046       *     not the NameServiceId-suffixed keys.
1047       * @return nameserviceId, or null if no match found
1048       */
1049      public static String getNameServiceIdFromAddress(final Configuration conf, 
1050          final InetSocketAddress address, String... keys) {
1051        // Configuration with a single namenode and no nameserviceId
1052        String[] ids = getSuffixIDs(conf, address, keys);
1053        return (ids != null) ? ids[0] : null;
1054      }
1055      
1056      /**
1057       * return server http or https address from the configuration for a
1058       * given namenode rpc address.
1059       * @param conf
1060       * @param namenodeAddr - namenode RPC address
1061       * @param scheme - the scheme (http / https)
1062       * @return server http or https address
1063       * @throws IOException 
1064       */
1065      public static URI getInfoServer(InetSocketAddress namenodeAddr,
1066          Configuration conf, String scheme) throws IOException {
1067        String[] suffixes = null;
1068        if (namenodeAddr != null) {
1069          // if non-default namenode, try reverse look up 
1070          // the nameServiceID if it is available
1071          suffixes = getSuffixIDs(conf, namenodeAddr,
1072              DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1073              DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1074        }
1075    
1076        String authority;
1077        if ("http".equals(scheme)) {
1078          authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
1079              DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
1080        } else if ("https".equals(scheme)) {
1081          authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
1082              DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
1083        } else {
1084          throw new IllegalArgumentException("Invalid scheme:" + scheme);
1085        }
1086    
1087        if (namenodeAddr != null) {
1088          authority = substituteForWildcardAddress(authority,
1089              namenodeAddr.getHostName());
1090        }
1091        return URI.create(scheme + "://" + authority);
1092      }
1093    
1094      /**
1095       * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
1096       * with defaultHost when it found out that the address is a wildcard / local
1097       * address.
1098       *
1099       * @param defaultHost
1100       *          The default host name of the namenode.
1101       * @param conf
1102       *          The configuration
1103       * @param scheme
1104       *          HTTP or HTTPS
1105       * @throws IOException
1106       */
1107      public static URI getInfoServerWithDefaultHost(String defaultHost,
1108          Configuration conf, final String scheme) throws IOException {
1109        URI configuredAddr = getInfoServer(null, conf, scheme);
1110        String authority = substituteForWildcardAddress(
1111            configuredAddr.getAuthority(), defaultHost);
1112        return URI.create(scheme + "://" + authority);
1113      }
1114    
1115      /**
1116       * Determine whether HTTP or HTTPS should be used to connect to the remote
1117       * server. Currently the client only connects to the server via HTTPS if the
1118       * policy is set to HTTPS_ONLY.
1119       *
1120       * @return the scheme (HTTP / HTTPS)
1121       */
1122      public static String getHttpClientScheme(Configuration conf) {
1123        HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
1124        return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
1125      }
1126    
1127      /**
1128       * Substitute a default host in the case that an address has been configured
1129       * with a wildcard. This is used, for example, when determining the HTTP
1130       * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
1131       * substitute the hostname from the filesystem URI rather than trying to
1132       * connect to 0.0.0.0.
1133       * @param configuredAddress the address found in the configuration
1134       * @param defaultHost the host to substitute with, if configuredAddress
1135       * is a local/wildcard address.
1136       * @return the substituted address
1137       * @throws IOException if it is a wildcard address and security is enabled
1138       */
1139      @VisibleForTesting
1140      static String substituteForWildcardAddress(String configuredAddress,
1141        String defaultHost) throws IOException {
1142        InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
1143        InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
1144            + ":0");
1145        final InetAddress addr = sockAddr.getAddress();
1146        if (addr != null && addr.isAnyLocalAddress()) {
1147          if (UserGroupInformation.isSecurityEnabled() &&
1148              defaultSockAddr.getAddress().isAnyLocalAddress()) {
1149            throw new IOException("Cannot use a wildcard address with security. " +
1150                "Must explicitly set bind address for Kerberos");
1151          }
1152          return defaultHost + ":" + sockAddr.getPort();
1153        } else {
1154          return configuredAddress;
1155        }
1156      }
1157      
1158      private static String getSuffixedConf(Configuration conf,
1159          String key, String defaultVal, String[] suffixes) {
1160        String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes));
1161        if (ret != null) {
1162          return ret;
1163        }
1164        return conf.get(key, defaultVal);
1165      }
1166      
1167      /**
1168       * Sets the node specific setting into generic configuration key. Looks up
1169       * value of "key.nameserviceId.namenodeId" and if found sets that value into 
1170       * generic key in the conf. If this is not found, falls back to
1171       * "key.nameserviceId" and then the unmodified key.
1172       *
1173       * Note that this only modifies the runtime conf.
1174       * 
1175       * @param conf
1176       *          Configuration object to lookup specific key and to set the value
1177       *          to the key passed. Note the conf object is modified.
1178       * @param nameserviceId
1179       *          nameservice Id to construct the node specific key. Pass null if
1180       *          federation is not configuration.
1181       * @param nnId
1182       *          namenode Id to construct the node specific key. Pass null if
1183       *          HA is not configured.
1184       * @param keys
1185       *          The key for which node specific value is looked up
1186       */
1187      public static void setGenericConf(Configuration conf,
1188          String nameserviceId, String nnId, String... keys) {
1189        for (String key : keys) {
1190          String value = conf.get(addKeySuffixes(key, nameserviceId, nnId));
1191          if (value != null) {
1192            conf.set(key, value);
1193            continue;
1194          }
1195          value = conf.get(addKeySuffixes(key, nameserviceId));
1196          if (value != null) {
1197            conf.set(key, value);
1198          }
1199        }
1200      }
1201      
1202      /** Return used as percentage of capacity */
1203      public static float getPercentUsed(long used, long capacity) {
1204        return capacity <= 0 ? 100 : (used * 100.0f)/capacity; 
1205      }
1206      
1207      /** Return remaining as percentage of capacity */
1208      public static float getPercentRemaining(long remaining, long capacity) {
1209        return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; 
1210      }
1211    
1212      /** Convert percentage to a string. */
1213      public static String percent2String(double percentage) {
1214        return StringUtils.format("%.2f%%", percentage);
1215      }
1216    
1217      /**
1218       * Round bytes to GiB (gibibyte)
1219       * @param bytes number of bytes
1220       * @return number of GiB
1221       */
1222      public static int roundBytesToGB(long bytes) {
1223        return Math.round((float)bytes/ 1024 / 1024 / 1024);
1224      }
1225      
1226      /** Create a {@link ClientDatanodeProtocol} proxy */
1227      public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1228          DatanodeID datanodeid, Configuration conf, int socketTimeout,
1229          boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
1230        return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
1231            connectToDnViaHostname, locatedBlock);
1232      }
1233      
1234      /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
1235      static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1236          DatanodeID datanodeid, Configuration conf, int socketTimeout,
1237          boolean connectToDnViaHostname) throws IOException {
1238        return new ClientDatanodeProtocolTranslatorPB(
1239            datanodeid, conf, socketTimeout, connectToDnViaHostname);
1240      }
1241      
1242      /** Create a {@link ClientDatanodeProtocol} proxy */
1243      public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1244          InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
1245          SocketFactory factory) throws IOException {
1246        return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
1247      }
1248    
1249      /**
1250       * Get nameservice Id for the {@link NameNode} based on namenode RPC address
1251       * matching the local node address.
1252       */
1253      public static String getNamenodeNameServiceId(Configuration conf) {
1254        return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
1255      }
1256      
1257      /**
1258       * Get nameservice Id for the BackupNode based on backup node RPC address
1259       * matching the local node address.
1260       */
1261      public static String getBackupNameServiceId(Configuration conf) {
1262        return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
1263      }
1264      
1265      /**
1266       * Get nameservice Id for the secondary node based on secondary http address
1267       * matching the local node address.
1268       */
1269      public static String getSecondaryNameServiceId(Configuration conf) {
1270        return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
1271      }
1272      
1273      /**
1274       * Get the nameservice Id by matching the {@code addressKey} with the
1275       * the address of the local node. 
1276       * 
1277       * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
1278       * configured, and more than one nameservice Id is configured, this method 
1279       * determines the nameservice Id by matching the local node's address with the
1280       * configured addresses. When a match is found, it returns the nameservice Id
1281       * from the corresponding configuration key.
1282       * 
1283       * @param conf Configuration
1284       * @param addressKey configuration key to get the address.
1285       * @return nameservice Id on success, null if federation is not configured.
1286       * @throws HadoopIllegalArgumentException on error
1287       */
1288      private static String getNameServiceId(Configuration conf, String addressKey) {
1289        String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
1290        if (nameserviceId != null) {
1291          return nameserviceId;
1292        }
1293        Collection<String> nsIds = getNameServiceIds(conf);
1294        if (1 == nsIds.size()) {
1295          return nsIds.toArray(new String[1])[0];
1296        }
1297        String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
1298        
1299        return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
1300      }
1301      
1302      /**
1303       * Returns nameservice Id and namenode Id when the local host matches the
1304       * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
1305       * 
1306       * @param conf Configuration
1307       * @param addressKey configuration key corresponding to the address.
1308       * @param knownNsId only look at configs for the given nameservice, if not-null
1309       * @param knownNNId only look at configs for the given namenode, if not null
1310       * @param matcher matching criteria for matching the address
1311       * @return Array with nameservice Id and namenode Id on success. First element
1312       *         in the array is nameservice Id and second element is namenode Id.
1313       *         Null value indicates that the configuration does not have the the
1314       *         Id.
1315       * @throws HadoopIllegalArgumentException on error
1316       */
1317      static String[] getSuffixIDs(final Configuration conf, final String addressKey,
1318          String knownNsId, String knownNNId,
1319          final AddressMatcher matcher) {
1320        String nameserviceId = null;
1321        String namenodeId = null;
1322        int found = 0;
1323        
1324        Collection<String> nsIds = getNameServiceIds(conf);
1325        for (String nsId : emptyAsSingletonNull(nsIds)) {
1326          if (knownNsId != null && !knownNsId.equals(nsId)) {
1327            continue;
1328          }
1329          
1330          Collection<String> nnIds = getNameNodeIds(conf, nsId);
1331          for (String nnId : emptyAsSingletonNull(nnIds)) {
1332            if (LOG.isTraceEnabled()) {
1333              LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
1334                  addressKey, nsId, nnId));
1335            }
1336            if (knownNNId != null && !knownNNId.equals(nnId)) {
1337              continue;
1338            }
1339            String key = addKeySuffixes(addressKey, nsId, nnId);
1340            String addr = conf.get(key);
1341            if (addr == null) {
1342              continue;
1343            }
1344            InetSocketAddress s = null;
1345            try {
1346              s = NetUtils.createSocketAddr(addr);
1347            } catch (Exception e) {
1348              LOG.warn("Exception in creating socket address " + addr, e);
1349              continue;
1350            }
1351            if (!s.isUnresolved() && matcher.match(s)) {
1352              nameserviceId = nsId;
1353              namenodeId = nnId;
1354              found++;
1355            }
1356          }
1357        }
1358        if (found > 1) { // Only one address must match the local address
1359          String msg = "Configuration has multiple addresses that match "
1360              + "local node's address. Please configure the system with "
1361              + DFS_NAMESERVICE_ID + " and "
1362              + DFS_HA_NAMENODE_ID_KEY;
1363          throw new HadoopIllegalArgumentException(msg);
1364        }
1365        return new String[] { nameserviceId, namenodeId };
1366      }
1367      
1368      /**
1369       * For given set of {@code keys} adds nameservice Id and or namenode Id
1370       * and returns {nameserviceId, namenodeId} when address match is found.
1371       * @see #getSuffixIDs(Configuration, String, AddressMatcher)
1372       */
1373      static String[] getSuffixIDs(final Configuration conf,
1374          final InetSocketAddress address, final String... keys) {
1375        AddressMatcher matcher = new AddressMatcher() {
1376         @Override
1377          public boolean match(InetSocketAddress s) {
1378            return address.equals(s);
1379          } 
1380        };
1381        
1382        for (String key : keys) {
1383          String[] ids = getSuffixIDs(conf, key, null, null, matcher);
1384          if (ids != null && (ids [0] != null || ids[1] != null)) {
1385            return ids;
1386          }
1387        }
1388        return null;
1389      }
1390      
1391      private interface AddressMatcher {
1392        public boolean match(InetSocketAddress s);
1393      }
1394    
1395      /** Create a URI from the scheme and address */
1396      public static URI createUri(String scheme, InetSocketAddress address) {
1397        try {
1398          return new URI(scheme, null, address.getHostName(), address.getPort(),
1399              null, null, null);
1400        } catch (URISyntaxException ue) {
1401          throw new IllegalArgumentException(ue);
1402        }
1403      }
1404      
1405      /**
1406       * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
1407       * @param conf configuration
1408       * @param protocol Protocol interface
1409       * @param service service that implements the protocol
1410       * @param server RPC server to which the protocol & implementation is added to
1411       * @throws IOException
1412       */
1413      public static void addPBProtocol(Configuration conf, Class<?> protocol,
1414          BlockingService service, RPC.Server server) throws IOException {
1415        RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
1416        server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
1417      }
1418    
1419      /**
1420       * Map a logical namenode ID to its service address. Use the given
1421       * nameservice if specified, or the configured one if none is given.
1422       *
1423       * @param conf Configuration
1424       * @param nsId which nameservice nnId is a part of, optional
1425       * @param nnId the namenode ID to get the service addr for
1426       * @return the service addr, null if it could not be determined
1427       */
1428      public static String getNamenodeServiceAddr(final Configuration conf,
1429          String nsId, String nnId) {
1430    
1431        if (nsId == null) {
1432          nsId = getOnlyNameServiceIdOrNull(conf);
1433        }
1434    
1435        String serviceAddrKey = concatSuffixes(
1436            DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
1437    
1438        String addrKey = concatSuffixes(
1439            DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
1440    
1441        String serviceRpcAddr = conf.get(serviceAddrKey);
1442        if (serviceRpcAddr == null) {
1443          serviceRpcAddr = conf.get(addrKey);
1444        }
1445        return serviceRpcAddr;
1446      }
1447    
1448      /**
1449       * If the configuration refers to only a single nameservice, return the
1450       * name of that nameservice. If it refers to 0 or more than 1, return null.
1451       */
1452      public static String getOnlyNameServiceIdOrNull(Configuration conf) {
1453        Collection<String> nsIds = getNameServiceIds(conf);
1454        if (1 == nsIds.size()) {
1455          return nsIds.toArray(new String[1])[0];
1456        } else {
1457          // No nameservice ID was given and more than one is configured
1458          return null;
1459        }
1460      }
1461      
1462      public static final Options helpOptions = new Options();
1463      public static final Option helpOpt = new Option("h", "help", false,
1464          "get help information");
1465    
1466      static {
1467        helpOptions.addOption(helpOpt);
1468      }
1469    
1470      /**
1471       * Parse the arguments for commands
1472       * 
1473       * @param args the argument to be parsed
1474       * @param helpDescription help information to be printed out
1475       * @param out Printer
1476       * @param printGenericCommandUsage whether to print the 
1477       *              generic command usage defined in ToolRunner
1478       * @return true when the argument matches help option, false if not
1479       */
1480      public static boolean parseHelpArgument(String[] args,
1481          String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
1482        if (args.length == 1) {
1483          try {
1484            CommandLineParser parser = new PosixParser();
1485            CommandLine cmdLine = parser.parse(helpOptions, args);
1486            if (cmdLine.hasOption(helpOpt.getOpt())
1487                || cmdLine.hasOption(helpOpt.getLongOpt())) {
1488              // should print out the help information
1489              out.println(helpDescription + "\n");
1490              if (printGenericCommandUsage) {
1491                ToolRunner.printGenericCommandUsage(out);
1492              }
1493              return true;
1494            }
1495          } catch (ParseException pe) {
1496            return false;
1497          }
1498        }
1499        return false;
1500      }
1501      
1502      /**
1503       * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
1504       * 
1505       * @param conf Configuration
1506       * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
1507       */
1508      public static float getInvalidateWorkPctPerIteration(Configuration conf) {
1509        float blocksInvalidateWorkPct = conf.getFloat(
1510            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
1511            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
1512        Preconditions.checkArgument(
1513            (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
1514            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
1515            " = '" + blocksInvalidateWorkPct + "' is invalid. " +
1516            "It should be a positive, non-zero float value, not greater than 1.0f, " +
1517            "to indicate a percentage.");
1518        return blocksInvalidateWorkPct;
1519      }
1520    
1521      /**
1522       * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
1523       * configuration.
1524       * 
1525       * @param conf Configuration
1526       * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
1527       */
1528      public static int getReplWorkMultiplier(Configuration conf) {
1529        int blocksReplWorkMultiplier = conf.getInt(
1530                DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
1531                DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
1532        Preconditions.checkArgument(
1533            (blocksReplWorkMultiplier > 0),
1534            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
1535            " = '" + blocksReplWorkMultiplier + "' is invalid. " +
1536            "It should be a positive, non-zero integer value.");
1537        return blocksReplWorkMultiplier;
1538      }
1539      
1540      /**
1541       * Get SPNEGO keytab Key from configuration
1542       * 
1543       * @param conf
1544       *          Configuration
1545       * @param defaultKey
1546       * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
1547       *         else return defaultKey
1548       */
1549      public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
1550        String value = 
1551            conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
1552        return (value == null || value.isEmpty()) ?
1553            defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
1554      }
1555    
1556      /**
1557       * Get http policy. Http Policy is chosen as follows:
1558       * <ol>
1559       * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
1560       * https endpoints are started on configured https ports</li>
1561       * <li>This configuration is overridden by dfs.https.enable configuration, if
1562       * it is set to true. In that case, both http and https endpoints are stared.</li>
1563       * <li>All the above configurations are overridden by dfs.http.policy
1564       * configuration. With this configuration you can set http-only, https-only
1565       * and http-and-https endpoints.</li>
1566       * </ol>
1567       * See hdfs-default.xml documentation for more details on each of the above
1568       * configuration settings.
1569       */
1570      public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
1571        String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1572        if (policyStr == null) {
1573          boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
1574              DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
1575    
1576          boolean hadoopSsl = conf.getBoolean(
1577              CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
1578              CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
1579    
1580          if (hadoopSsl) {
1581            LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
1582                + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1583                + ".");
1584          }
1585          if (https) {
1586            LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
1587                + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1588                + ".");
1589          }
1590    
1591          return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
1592              : HttpConfig.Policy.HTTP_ONLY;
1593        }
1594    
1595        HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
1596        if (policy == null) {
1597          throw new HadoopIllegalArgumentException("Unregonized value '"
1598              + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1599        }
1600    
1601        conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
1602        return policy;
1603      }
1604    
1605      public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
1606          Configuration sslConf) {
1607        return builder
1608            .needsClientAuth(
1609                sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1610                    DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
1611            .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
1612            .keyStore(sslConf.get("ssl.server.keystore.location"),
1613                sslConf.get("ssl.server.keystore.password"),
1614                sslConf.get("ssl.server.keystore.type", "jks"))
1615            .trustStore(sslConf.get("ssl.server.truststore.location"),
1616                sslConf.get("ssl.server.truststore.password"),
1617                sslConf.get("ssl.server.truststore.type", "jks"));
1618      }
1619    
1620      /**
1621       * Load HTTPS-related configuration.
1622       */
1623      public static Configuration loadSslConfiguration(Configuration conf) {
1624        Configuration sslConf = new Configuration(false);
1625    
1626        sslConf.addResource(conf.get(
1627            DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
1628            DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
1629    
1630        boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1631            DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
1632        sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
1633        return sslConf;
1634      }
1635    
1636      /**
1637       * Return a HttpServer.Builder that the journalnode / namenode / secondary
1638       * namenode can use to initialize their HTTP / HTTPS server.
1639       *
1640       */
1641      public static HttpServer2.Builder httpServerTemplateForNNAndJN(
1642          Configuration conf, final InetSocketAddress httpAddr,
1643          final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
1644          String spnegoKeytabFileKey) throws IOException {
1645        HttpConfig.Policy policy = getHttpPolicy(conf);
1646    
1647        HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
1648            .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
1649            .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
1650            .setUsernameConfKey(spnegoUserNameKey)
1651            .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
1652    
1653        // initialize the webserver for uploading/downloading files.
1654        LOG.info("Starting web server as: "
1655            + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
1656                httpAddr.getHostName()));
1657    
1658        if (policy.isHttpEnabled()) {
1659          if (httpAddr.getPort() == 0) {
1660            builder.setFindPort(true);
1661          }
1662    
1663          URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
1664          builder.addEndpoint(uri);
1665          LOG.info("Starting Web-server for " + name + " at: " + uri);
1666        }
1667    
1668        if (policy.isHttpsEnabled() && httpsAddr != null) {
1669          Configuration sslConf = loadSslConfiguration(conf);
1670          loadSslConfToHttpServerBuilder(builder, sslConf);
1671    
1672          if (httpsAddr.getPort() == 0) {
1673            builder.setFindPort(true);
1674          }
1675    
1676          URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
1677          builder.addEndpoint(uri);
1678          LOG.info("Starting Web-server for " + name + " at: " + uri);
1679        }
1680        return builder;
1681      }
1682    
1683      /**
1684       * Converts a Date into an ISO-8601 formatted datetime string.
1685       */
1686      public static String dateToIso8601String(Date date) {
1687        SimpleDateFormat df =
1688            new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
1689        return df.format(date);
1690      }
1691    
1692      /**
1693       * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
1694       */
1695      public static String durationToString(long durationMs) {
1696        boolean negative = false;
1697        if (durationMs < 0) {
1698          negative = true;
1699          durationMs = -durationMs;
1700        }
1701        // Chop off the milliseconds
1702        long durationSec = durationMs / 1000;
1703        final int secondsPerMinute = 60;
1704        final int secondsPerHour = 60*60;
1705        final int secondsPerDay = 60*60*24;
1706        final long days = durationSec / secondsPerDay;
1707        durationSec -= days * secondsPerDay;
1708        final long hours = durationSec / secondsPerHour;
1709        durationSec -= hours * secondsPerHour;
1710        final long minutes = durationSec / secondsPerMinute;
1711        durationSec -= minutes * secondsPerMinute;
1712        final long seconds = durationSec;
1713        final long milliseconds = durationMs % 1000;
1714        String format = "%03d:%02d:%02d:%02d.%03d";
1715        if (negative)  {
1716          format = "-" + format;
1717        }
1718        return String.format(format, days, hours, minutes, seconds, milliseconds);
1719      }
1720    
1721      /**
1722       * Converts a relative time string into a duration in milliseconds.
1723       */
1724      public static long parseRelativeTime(String relTime) throws IOException {
1725        if (relTime.length() < 2) {
1726          throw new IOException("Unable to parse relative time value of " + relTime
1727              + ": too short");
1728        }
1729        String ttlString = relTime.substring(0, relTime.length()-1);
1730        long ttl;
1731        try {
1732          ttl = Long.parseLong(ttlString);
1733        } catch (NumberFormatException e) {
1734          throw new IOException("Unable to parse relative time value of " + relTime
1735              + ": " + ttlString + " is not a number");
1736        }
1737        if (relTime.endsWith("s")) {
1738          // pass
1739        } else if (relTime.endsWith("m")) {
1740          ttl *= 60;
1741        } else if (relTime.endsWith("h")) {
1742          ttl *= 60*60;
1743        } else if (relTime.endsWith("d")) {
1744          ttl *= 60*60*24;
1745        } else {
1746          throw new IOException("Unable to parse relative time value of " + relTime
1747              + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
1748        }
1749        return ttl*1000;
1750      }
1751    
1752      /**
1753       * Assert that all objects in the collection are equal. Returns silently if
1754       * so, throws an AssertionError if any object is not equal. All null values
1755       * are considered equal.
1756       * 
1757       * @param objects the collection of objects to check for equality.
1758       */
1759      public static void assertAllResultsEqual(Collection<?> objects)
1760          throws AssertionError {
1761        if (objects.size() == 0 || objects.size() == 1)
1762          return;
1763        
1764        Object[] resultsArray = objects.toArray();
1765        for (int i = 1; i < resultsArray.length; i++) {
1766          Object currElement = resultsArray[i];
1767          Object lastElement = resultsArray[i - 1];
1768          if ((currElement == null && currElement != lastElement) ||
1769              (currElement != null && !currElement.equals(lastElement))) {
1770            throw new AssertionError("Not all elements match in results: " +
1771              Arrays.toString(resultsArray));
1772          }
1773        }
1774      }
1775    }