001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.blockmanagement;
019
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Collections;
023import java.util.HashMap;
024import java.util.HashSet;
025import java.util.Iterator;
026import java.util.LinkedList;
027import java.util.List;
028import java.util.Map;
029import java.util.Queue;
030import java.util.Set;
031
032import com.google.common.annotations.VisibleForTesting;
033
034import org.apache.hadoop.classification.InterfaceAudience;
035import org.apache.hadoop.classification.InterfaceStability;
036import org.apache.hadoop.fs.StorageType;
037import org.apache.hadoop.hdfs.protocol.Block;
038import org.apache.hadoop.hdfs.protocol.DatanodeID;
039import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
040import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
041import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
042import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
043import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
044import org.apache.hadoop.hdfs.server.protocol.StorageReport;
045import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
046import org.apache.hadoop.hdfs.util.EnumCounters;
047import org.apache.hadoop.hdfs.util.LightWeightHashSet;
048import org.apache.hadoop.util.IntrusiveCollection;
049import org.apache.hadoop.util.Time;
050import org.slf4j.Logger;
051import org.slf4j.LoggerFactory;
052
053/**
054 * This class extends the DatanodeInfo class with ephemeral information (eg
055 * health, capacity, what blocks are associated with the Datanode) that is
056 * private to the Namenode, ie this class is not exposed to clients.
057 */
058@InterfaceAudience.Private
059@InterfaceStability.Evolving
060public class DatanodeDescriptor extends DatanodeInfo {
061  public static final Logger LOG =
062      LoggerFactory.getLogger(DatanodeDescriptor.class);
063  public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
064  private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
065
066  /** Block and targets pair */
067  @InterfaceAudience.Private
068  @InterfaceStability.Evolving
069  public static class BlockTargetPair {
070    public final Block block;
071    public final DatanodeStorageInfo[] targets;    
072
073    BlockTargetPair(Block block, DatanodeStorageInfo[] targets) {
074      this.block = block;
075      this.targets = targets;
076    }
077  }
078
079  /** A BlockTargetPair queue. */
080  private static class BlockQueue<E> {
081    private final Queue<E> blockq = new LinkedList<>();
082
083    /** Size of the queue */
084    synchronized int size() {return blockq.size();}
085
086    /** Enqueue */
087    synchronized boolean offer(E e) { 
088      return blockq.offer(e);
089    }
090
091    /** Dequeue */
092    synchronized List<E> poll(int numBlocks) {
093      if (numBlocks <= 0 || blockq.isEmpty()) {
094        return null;
095      }
096
097      List<E> results = new ArrayList<>();
098      for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
099        results.add(blockq.poll());
100      }
101      return results;
102    }
103
104    /**
105     * Returns <tt>true</tt> if the queue contains the specified element.
106     */
107    synchronized boolean contains(E e) {
108      return blockq.contains(e);
109    }
110
111    synchronized void clear() {
112      blockq.clear();
113    }
114  }
115
116  /**
117   * A list of CachedBlock objects on this datanode.
118   */
119  public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> {
120    public enum Type {
121      PENDING_CACHED,
122      CACHED,
123      PENDING_UNCACHED
124    }
125
126    private final DatanodeDescriptor datanode;
127
128    private final Type type;
129
130    CachedBlocksList(DatanodeDescriptor datanode, Type type) {
131      this.datanode = datanode;
132      this.type = type;
133    }
134
135    public DatanodeDescriptor getDatanode() {
136      return datanode;
137    }
138
139    public Type getType() {
140      return type;
141    }
142  }
143
144  // Stores status of decommissioning.
145  // If node is not decommissioning, do not use this object for anything.
146  public final DecommissioningStatus decommissioningStatus =
147      new DecommissioningStatus();
148
149  private final Map<String, DatanodeStorageInfo> storageMap =
150      new HashMap<>();
151
152  /**
153   * The blocks which we want to cache on this DataNode.
154   */
155  private final CachedBlocksList pendingCached = 
156      new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED);
157
158  /**
159   * The blocks which we know are cached on this datanode.
160   * This list is updated by periodic cache reports.
161   */
162  private final CachedBlocksList cached = 
163      new CachedBlocksList(this, CachedBlocksList.Type.CACHED);
164
165  /**
166   * The blocks which we want to uncache on this DataNode.
167   */
168  private final CachedBlocksList pendingUncached = 
169      new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED);
170
171  /**
172   * The time when the last batch of caching directives was sent, in
173   * monotonic milliseconds.
174   */
175  private long lastCachingDirectiveSentTimeMs;
176
177  // isAlive == heartbeats.contains(this)
178  // This is an optimization, because contains takes O(n) time on Arraylist
179  private boolean isAlive = false;
180  private boolean needKeyUpdate = false;
181  private boolean forceRegistration = false;
182
183  // A system administrator can tune the balancer bandwidth parameter
184  // (dfs.balance.bandwidthPerSec) dynamically by calling
185  // "dfsadmin -setBalanacerBandwidth <newbandwidth>", at which point the
186  // following 'bandwidth' variable gets updated with the new value for each
187  // node. Once the heartbeat command is issued to update the value on the
188  // specified datanode, this value will be set back to 0.
189  private long bandwidth;
190
191  /** A queue of blocks to be replicated by this datanode */
192  private final BlockQueue<BlockTargetPair> replicateBlocks =
193      new BlockQueue<>();
194  /** A queue of blocks to be recovered by this datanode */
195  private final BlockQueue<BlockInfo> recoverBlocks = new BlockQueue<>();
196  /** A set of blocks to be invalidated by this datanode */
197  private final LightWeightHashSet<Block> invalidateBlocks =
198      new LightWeightHashSet<>();
199
200  /* Variables for maintaining number of blocks scheduled to be written to
201   * this storage. This count is approximate and might be slightly bigger
202   * in case of errors (e.g. datanode does not report if an error occurs
203   * while writing the block).
204   */
205  private EnumCounters<StorageType> currApproxBlocksScheduled
206      = new EnumCounters<>(StorageType.class);
207  private EnumCounters<StorageType> prevApproxBlocksScheduled
208      = new EnumCounters<>(StorageType.class);
209  private long lastBlocksScheduledRollTime = 0;
210  private int volumeFailures = 0;
211  private VolumeFailureSummary volumeFailureSummary = null;
212  
213  /** 
214   * When set to true, the node is not in include list and is not allowed
215   * to communicate with the namenode
216   */
217  private boolean disallowed = false;
218
219  // The number of replication work pending before targets are determined
220  private int PendingReplicationWithoutTargets = 0;
221
222  // HB processing can use it to tell if it is the first HB since DN restarted
223  private boolean heartbeatedSinceRegistration = false;
224
225  /**
226   * DatanodeDescriptor constructor
227   * @param nodeID id of the data node
228   */
229  public DatanodeDescriptor(DatanodeID nodeID) {
230    super(nodeID);
231    updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null);
232  }
233
234  /**
235   * DatanodeDescriptor constructor
236   * @param nodeID id of the data node
237   * @param networkLocation location of the data node in network
238   */
239  public DatanodeDescriptor(DatanodeID nodeID, 
240                            String networkLocation) {
241    super(nodeID, networkLocation);
242    updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null);
243  }
244
245  public CachedBlocksList getPendingCached() {
246    return pendingCached;
247  }
248
249  public CachedBlocksList getCached() {
250    return cached;
251  }
252
253  public CachedBlocksList getPendingUncached() {
254    return pendingUncached;
255  }
256
257  public boolean isAlive() {
258    return isAlive;
259  }
260
261  public void setAlive(boolean isAlive) {
262    this.isAlive = isAlive;
263  }
264
265  public boolean needKeyUpdate() {
266    return needKeyUpdate;
267  }
268
269  public void setNeedKeyUpdate(boolean needKeyUpdate) {
270    this.needKeyUpdate = needKeyUpdate;
271  }
272
273  @VisibleForTesting
274  public boolean isHeartbeatedSinceRegistration() {
275   return heartbeatedSinceRegistration;
276  }
277
278  @VisibleForTesting
279  public DatanodeStorageInfo getStorageInfo(String storageID) {
280    synchronized (storageMap) {
281      return storageMap.get(storageID);
282    }
283  }
284
285  @VisibleForTesting
286  public DatanodeStorageInfo[] getStorageInfos() {
287    synchronized (storageMap) {
288      final Collection<DatanodeStorageInfo> storages = storageMap.values();
289      return storages.toArray(new DatanodeStorageInfo[storages.size()]);
290    }
291  }
292
293  public StorageReport[] getStorageReports() {
294    final DatanodeStorageInfo[] infos = getStorageInfos();
295    final StorageReport[] reports = new StorageReport[infos.length];
296    for(int i = 0; i < infos.length; i++) {
297      reports[i] = infos[i].toStorageReport();
298    }
299    return reports;
300  }
301
302  boolean hasStaleStorages() {
303    synchronized (storageMap) {
304      for (DatanodeStorageInfo storage : storageMap.values()) {
305        if (storage.areBlockContentsStale()) {
306          return true;
307        }
308      }
309      return false;
310    }
311  }
312
313  public void resetBlocks() {
314    setCapacity(0);
315    setRemaining(0);
316    setBlockPoolUsed(0);
317    setDfsUsed(0);
318    setXceiverCount(0);
319    this.invalidateBlocks.clear();
320    this.volumeFailures = 0;
321    // pendingCached, cached, and pendingUncached are protected by the
322    // FSN lock.
323    this.pendingCached.clear();
324    this.cached.clear();
325    this.pendingUncached.clear();
326  }
327  
328  public void clearBlockQueues() {
329    synchronized (invalidateBlocks) {
330      this.invalidateBlocks.clear();
331    }
332    this.recoverBlocks.clear();
333    this.replicateBlocks.clear();
334    // pendingCached, cached, and pendingUncached are protected by the
335    // FSN lock.
336    this.pendingCached.clear();
337    this.cached.clear();
338    this.pendingUncached.clear();
339  }
340
341  public int numBlocks() {
342    int blocks = 0;
343    for (DatanodeStorageInfo entry : getStorageInfos()) {
344      blocks += entry.numBlocks();
345    }
346    return blocks;
347  }
348
349  /**
350   * Updates stats from datanode heartbeat.
351   */
352  public void updateHeartbeat(StorageReport[] reports, long cacheCapacity,
353      long cacheUsed, int xceiverCount, int volFailures,
354      VolumeFailureSummary volumeFailureSummary) {
355    updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount,
356        volFailures, volumeFailureSummary);
357    heartbeatedSinceRegistration = true;
358  }
359
360  /**
361   * process datanode heartbeat or stats initialization.
362   */
363  public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
364      long cacheUsed, int xceiverCount, int volFailures,
365      VolumeFailureSummary volumeFailureSummary) {
366    long totalCapacity = 0;
367    long totalRemaining = 0;
368    long totalBlockPoolUsed = 0;
369    long totalDfsUsed = 0;
370    long totalNonDfsUsed = 0;
371    Set<DatanodeStorageInfo> failedStorageInfos = null;
372
373    // Decide if we should check for any missing StorageReport and mark it as
374    // failed. There are different scenarios.
375    // 1. When DN is running, a storage failed. Given the current DN
376    //    implementation doesn't add recovered storage back to its storage list
377    //    until DN restart, we can assume volFailures won't decrease
378    //    during the current DN registration session.
379    //    When volumeFailures == this.volumeFailures, it implies there is no
380    //    state change. No need to check for failed storage. This is an
381    //    optimization.  Recent versions of the DataNode report a
382    //    VolumeFailureSummary containing the date/time of the last volume
383    //    failure.  If that's available, then we check that instead for greater
384    //    accuracy.
385    // 2. After DN restarts, volFailures might not increase and it is possible
386    //    we still have new failed storage. For example, admins reduce
387    //    available storages in configuration. Another corner case
388    //    is the failed volumes might change after restart; a) there
389    //    is one good storage A, one restored good storage B, so there is
390    //    one element in storageReports and that is A. b) A failed. c) Before
391    //    DN sends HB to NN to indicate A has failed, DN restarts. d) After DN
392    //    restarts, storageReports has one element which is B.
393    final boolean checkFailedStorages;
394    if (volumeFailureSummary != null && this.volumeFailureSummary != null) {
395      checkFailedStorages = volumeFailureSummary.getLastVolumeFailureDate() >
396          this.volumeFailureSummary.getLastVolumeFailureDate();
397    } else {
398      checkFailedStorages = (volFailures > this.volumeFailures) ||
399          !heartbeatedSinceRegistration;
400    }
401
402    if (checkFailedStorages) {
403      if (this.volumeFailures != volFailures) {
404        LOG.info("Number of failed storages changes from {} to {}",
405            this.volumeFailures, volFailures);
406      }
407      synchronized (storageMap) {
408        failedStorageInfos =
409            new HashSet<>(storageMap.values());
410      }
411    }
412
413    setCacheCapacity(cacheCapacity);
414    setCacheUsed(cacheUsed);
415    setXceiverCount(xceiverCount);
416    setLastUpdate(Time.now());
417    setLastUpdateMonotonic(Time.monotonicNow());
418    this.volumeFailures = volFailures;
419    this.volumeFailureSummary = volumeFailureSummary;
420    for (StorageReport report : reports) {
421      DatanodeStorageInfo storage = updateStorage(report.getStorage());
422      if (checkFailedStorages) {
423        failedStorageInfos.remove(storage);
424      }
425
426      storage.receivedHeartbeat(report);
427      totalCapacity += report.getCapacity();
428      totalRemaining += report.getRemaining();
429      totalBlockPoolUsed += report.getBlockPoolUsed();
430      totalDfsUsed += report.getDfsUsed();
431      totalNonDfsUsed += report.getNonDfsUsed();
432    }
433    rollBlocksScheduled(getLastUpdateMonotonic());
434
435    // Update total metrics for the node.
436    setCapacity(totalCapacity);
437    setRemaining(totalRemaining);
438    setBlockPoolUsed(totalBlockPoolUsed);
439    setDfsUsed(totalDfsUsed);
440    setNonDfsUsed(totalNonDfsUsed);
441    if (checkFailedStorages) {
442      updateFailedStorage(failedStorageInfos);
443    }
444    long storageMapSize;
445    synchronized (storageMap) {
446      storageMapSize = storageMap.size();
447    }
448    if (storageMapSize != reports.length) {
449      pruneStorageMap(reports);
450    }
451  }
452
453  /**
454   * Remove stale storages from storageMap. We must not remove any storages
455   * as long as they have associated block replicas.
456   */
457  private void pruneStorageMap(final StorageReport[] reports) {
458    synchronized (storageMap) {
459      LOG.debug("Number of storages reported in heartbeat={};"
460              + " Number of storages in storageMap={}", reports.length,
461          storageMap.size());
462
463      HashMap<String, DatanodeStorageInfo> excessStorages;
464
465      // Init excessStorages with all known storages.
466      excessStorages = new HashMap<>(storageMap);
467
468      // Remove storages that the DN reported in the heartbeat.
469      for (final StorageReport report : reports) {
470        excessStorages.remove(report.getStorage().getStorageID());
471      }
472
473      // For each remaining storage, remove it if there are no associated
474      // blocks.
475      for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
476        if (storageInfo.numBlocks() == 0) {
477          storageMap.remove(storageInfo.getStorageID());
478          LOG.info("Removed storage {} from DataNode {}", storageInfo, this);
479        } else {
480          // This can occur until all block reports are received.
481          LOG.debug("Deferring removal of stale storage {} with {} blocks",
482              storageInfo, storageInfo.numBlocks());
483        }
484      }
485    }
486  }
487
488  private void updateFailedStorage(
489      Set<DatanodeStorageInfo> failedStorageInfos) {
490    for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
491      if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
492        LOG.info("{} failed.", storageInfo);
493        storageInfo.setState(DatanodeStorage.State.FAILED);
494      }
495    }
496  }
497
498  private static class BlockIterator implements Iterator<BlockInfo> {
499    private int index = 0;
500    private final List<Iterator<BlockInfo>> iterators;
501    
502    private BlockIterator(final DatanodeStorageInfo... storages) {
503      List<Iterator<BlockInfo>> iterators = new ArrayList<>();
504      for (DatanodeStorageInfo e : storages) {
505        iterators.add(e.getBlockIterator());
506      }
507      this.iterators = Collections.unmodifiableList(iterators);
508    }
509
510    @Override
511    public boolean hasNext() {
512      update();
513      return !iterators.isEmpty() && iterators.get(index).hasNext();
514    }
515
516    @Override
517    public BlockInfo next() {
518      update();
519      return iterators.get(index).next();
520    }
521    
522    @Override
523    public void remove() {
524      throw new UnsupportedOperationException("Remove unsupported.");
525    }
526    
527    private void update() {
528      while(index < iterators.size() - 1 && !iterators.get(index).hasNext()) {
529        index++;
530      }
531    }
532  }
533
534  Iterator<BlockInfo> getBlockIterator() {
535    return new BlockIterator(getStorageInfos());
536  }
537
538  void incrementPendingReplicationWithoutTargets() {
539    PendingReplicationWithoutTargets++;
540  }
541
542  void decrementPendingReplicationWithoutTargets() {
543    PendingReplicationWithoutTargets--;
544  }
545
546  /**
547   * Store block replication work.
548   */
549  void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) {
550    assert(block != null && targets != null && targets.length > 0);
551    replicateBlocks.offer(new BlockTargetPair(block, targets));
552  }
553
554  /**
555   * Store block recovery work.
556   */
557  void addBlockToBeRecovered(BlockInfo block) {
558    if(recoverBlocks.contains(block)) {
559      // this prevents adding the same block twice to the recovery queue
560      BlockManager.LOG.info(block + " is already in the recovery queue");
561      return;
562    }
563    recoverBlocks.offer(block);
564  }
565
566  /**
567   * Store block invalidation work.
568   */
569  void addBlocksToBeInvalidated(List<Block> blocklist) {
570    assert(blocklist != null && blocklist.size() > 0);
571    synchronized (invalidateBlocks) {
572      for(Block blk : blocklist) {
573        invalidateBlocks.add(blk);
574      }
575    }
576  }
577
578  /**
579   * The number of work items that are pending to be replicated
580   */
581  int getNumberOfBlocksToBeReplicated() {
582    return PendingReplicationWithoutTargets + replicateBlocks.size();
583  }
584
585  public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
586    return replicateBlocks.poll(maxTransfers);
587  }
588
589  public BlockInfo[] getLeaseRecoveryCommand(int maxTransfers) {
590    List<BlockInfo> blocks = recoverBlocks.poll(maxTransfers);
591    if(blocks == null)
592      return null;
593    return blocks.toArray(new BlockInfo[blocks.size()]);
594  }
595
596  /**
597   * Remove the specified number of blocks to be invalidated
598   */
599  public Block[] getInvalidateBlocks(int maxblocks) {
600    synchronized (invalidateBlocks) {
601      Block[] deleteList = invalidateBlocks.pollToArray(new Block[Math.min(
602          invalidateBlocks.size(), maxblocks)]);
603      return deleteList.length == 0 ? null : deleteList;
604    }
605  }
606
607  /**
608   * Find whether the datanode contains good storage of given type to
609   * place block of size <code>blockSize</code>.
610   *
611   * <p>Currently datanode only cares about the storage type, in this
612   * method, the first storage of given type we see is returned.
613   *
614   * @param t requested storage type
615   * @param blockSize requested block size
616   */
617  public DatanodeStorageInfo chooseStorage4Block(StorageType t,
618      long blockSize) {
619    final long requiredSize =
620        blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
621    final long scheduledSize = blockSize * getBlocksScheduled(t);
622    long remaining = 0;
623    DatanodeStorageInfo storage = null;
624    for (DatanodeStorageInfo s : getStorageInfos()) {
625      if (s.getState() == State.NORMAL && s.getStorageType() == t) {
626        if (storage == null) {
627          storage = s;
628        }
629        long r = s.getRemaining();
630        if (r >= requiredSize) {
631          remaining += r;
632        }
633      }
634    }
635    if (requiredSize > remaining - scheduledSize) {
636      return null;
637    }
638    return storage;
639  }
640
641  /**
642   * @return Approximate number of blocks currently scheduled to be written 
643   * to the given storage type of this datanode.
644   */
645  public int getBlocksScheduled(StorageType t) {
646    return (int)(currApproxBlocksScheduled.get(t)
647        + prevApproxBlocksScheduled.get(t));
648  }
649
650  /**
651   * @return Approximate number of blocks currently scheduled to be written 
652   * to this datanode.
653   */
654  public int getBlocksScheduled() {
655    return (int)(currApproxBlocksScheduled.sum()
656        + prevApproxBlocksScheduled.sum());
657  }
658
659  /** Increment the number of blocks scheduled. */
660  void incrementBlocksScheduled(StorageType t) {
661    currApproxBlocksScheduled.add(t, 1);
662  }
663  
664  /** Decrement the number of blocks scheduled. */
665  void decrementBlocksScheduled(StorageType t) {
666    if (prevApproxBlocksScheduled.get(t) > 0) {
667      prevApproxBlocksScheduled.subtract(t, 1);
668    } else if (currApproxBlocksScheduled.get(t) > 0) {
669      currApproxBlocksScheduled.subtract(t, 1);
670    } 
671    // its ok if both counters are zero.
672  }
673  
674  /** Adjusts curr and prev number of blocks scheduled every few minutes. */
675  private void rollBlocksScheduled(long now) {
676    if (now - lastBlocksScheduledRollTime > BLOCKS_SCHEDULED_ROLL_INTERVAL) {
677      prevApproxBlocksScheduled.set(currApproxBlocksScheduled);
678      currApproxBlocksScheduled.reset();
679      lastBlocksScheduledRollTime = now;
680    }
681  }
682  
683  @Override
684  public int hashCode() {
685    // Super implementation is sufficient
686    return super.hashCode();
687  }
688  
689  @Override
690  public boolean equals(Object obj) {
691    // Sufficient to use super equality as datanodes are uniquely identified
692    // by DatanodeID
693    return (this == obj) || super.equals(obj);
694  }
695
696  /** Decommissioning status */
697  public class DecommissioningStatus {
698    private int underReplicatedBlocks;
699    private int decommissionOnlyReplicas;
700    private int underReplicatedInOpenFiles;
701    private long startTime;
702    
703    synchronized void set(int underRep,
704        int onlyRep, int underConstruction) {
705      if (!isDecommissionInProgress()) {
706        return;
707      }
708      underReplicatedBlocks = underRep;
709      decommissionOnlyReplicas = onlyRep;
710      underReplicatedInOpenFiles = underConstruction;
711    }
712
713    /** @return the number of under-replicated blocks */
714    public synchronized int getUnderReplicatedBlocks() {
715      if (!isDecommissionInProgress()) {
716        return 0;
717      }
718      return underReplicatedBlocks;
719    }
720    /** @return the number of decommission-only replicas */
721    public synchronized int getDecommissionOnlyReplicas() {
722      if (!isDecommissionInProgress()) {
723        return 0;
724      }
725      return decommissionOnlyReplicas;
726    }
727    /** @return the number of under-replicated blocks in open files */
728    public synchronized int getUnderReplicatedInOpenFiles() {
729      if (!isDecommissionInProgress()) {
730        return 0;
731      }
732      return underReplicatedInOpenFiles;
733    }
734    /** Set start time */
735    public synchronized void setStartTime(long time) {
736      startTime = time;
737    }
738    /** @return start time */
739    public synchronized long getStartTime() {
740      if (!isDecommissionInProgress()) {
741        return 0;
742      }
743      return startTime;
744    }
745  }  // End of class DecommissioningStatus
746
747  /**
748   * Set the flag to indicate if this datanode is disallowed from communicating
749   * with the namenode.
750   */
751  public void setDisallowed(boolean flag) {
752    disallowed = flag;
753  }
754  /** Is the datanode disallowed from communicating with the namenode? */
755  public boolean isDisallowed() {
756    return disallowed;
757  }
758
759  /**
760   * @return number of failed volumes in the datanode.
761   */
762  public int getVolumeFailures() {
763    return volumeFailures;
764  }
765
766  /**
767   * Returns info about volume failures.
768   *
769   * @return info about volume failures, possibly null
770   */
771  public VolumeFailureSummary getVolumeFailureSummary() {
772    return volumeFailureSummary;
773  }
774
775  /**
776   * @param nodeReg DatanodeID to update registration for.
777   */
778  @Override
779  public void updateRegInfo(DatanodeID nodeReg) {
780    super.updateRegInfo(nodeReg);
781    
782    // must re-process IBR after re-registration
783    for(DatanodeStorageInfo storage : getStorageInfos()) {
784      storage.setBlockReportCount(0);
785    }
786    heartbeatedSinceRegistration = false;
787    forceRegistration = false;
788  }
789
790  /**
791   * @return balancer bandwidth in bytes per second for this datanode
792   */
793  public long getBalancerBandwidth() {
794    return this.bandwidth;
795  }
796
797  /**
798   * @param bandwidth balancer bandwidth in bytes per second for this datanode
799   */
800  public void setBalancerBandwidth(long bandwidth) {
801    this.bandwidth = bandwidth;
802  }
803
804  @Override
805  public String dumpDatanode() {
806    StringBuilder sb = new StringBuilder(super.dumpDatanode());
807    int repl = replicateBlocks.size();
808    if (repl > 0) {
809      sb.append(" ").append(repl).append(" blocks to be replicated;");
810    }
811    int inval = invalidateBlocks.size();
812    if (inval > 0) {
813      sb.append(" ").append(inval).append(" blocks to be invalidated;");      
814    }
815    int recover = recoverBlocks.size();
816    if (recover > 0) {
817      sb.append(" ").append(recover).append(" blocks to be recovered;");
818    }
819    return sb.toString();
820  }
821
822  DatanodeStorageInfo updateStorage(DatanodeStorage s) {
823    synchronized (storageMap) {
824      DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
825      if (storage == null) {
826        LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
827            getXferAddr());
828        storage = new DatanodeStorageInfo(this, s);
829        storageMap.put(s.getStorageID(), storage);
830      } else if (storage.getState() != s.getState() ||
831                 storage.getStorageType() != s.getStorageType()) {
832        // For backwards compatibility, make sure that the type and
833        // state are updated. Some reports from older datanodes do
834        // not include these fields so we may have assumed defaults.
835        storage.updateFromStorage(s);
836        storageMap.put(storage.getStorageID(), storage);
837      }
838      return storage;
839    }
840  }
841
842  /**
843   * @return   The time at which we last sent caching directives to this 
844   *           DataNode, in monotonic milliseconds.
845   */
846  public long getLastCachingDirectiveSentTimeMs() {
847    return this.lastCachingDirectiveSentTimeMs;
848  }
849
850  /**
851   * @param time  The time at which we last sent caching directives to this 
852   *              DataNode, in monotonic milliseconds.
853   */
854  public void setLastCachingDirectiveSentTimeMs(long time) {
855    this.lastCachingDirectiveSentTimeMs = time;
856  }
857  
858  /**
859   * @return whether at least first block report has been received
860   */
861  public boolean checkBlockReportReceived() {
862    if(this.getStorageInfos().length == 0) {
863      return false;
864    }
865    for(DatanodeStorageInfo storageInfo: this.getStorageInfos()) {
866      if(storageInfo.getBlockReportCount() == 0 )
867        return false;
868    }
869    return true;
870  }
871
872  public void setForceRegistration(boolean force) {
873    forceRegistration = force;
874  }
875
876  public boolean isRegistered() {
877    return isAlive() && !forceRegistration;
878  }
879}
880