001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs.server.blockmanagement;
019    
020    import java.util.Arrays;
021    import java.util.Iterator;
022    import java.util.List;
023    
024    import com.google.common.annotations.VisibleForTesting;
025    import org.apache.hadoop.hdfs.StorageType;
026    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
027    import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
028    import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
029    import org.apache.hadoop.hdfs.server.protocol.StorageReport;
030    
031    /**
032     * A Datanode has one or more storages. A storage in the Datanode is represented
033     * by this class.
034     */
035    public class DatanodeStorageInfo {
036      public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
037    
038      public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
039        return toDatanodeInfos(Arrays.asList(storages));
040      }
041      static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
042        final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
043        for(int i = 0; i < storages.size(); i++) {
044          datanodes[i] = storages.get(i).getDatanodeDescriptor();
045        }
046        return datanodes;
047      }
048    
049      static DatanodeDescriptor[] toDatanodeDescriptors(
050          DatanodeStorageInfo[] storages) {
051        DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length];
052        for (int i = 0; i < storages.length; ++i) {
053          datanodes[i] = storages[i].getDatanodeDescriptor();
054        }
055        return datanodes;
056      }
057    
058      public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
059        String[] storageIDs = new String[storages.length];
060        for(int i = 0; i < storageIDs.length; i++) {
061          storageIDs[i] = storages[i].getStorageID();
062        }
063        return storageIDs;
064      }
065    
066      public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
067        StorageType[] storageTypes = new StorageType[storages.length];
068        for(int i = 0; i < storageTypes.length; i++) {
069          storageTypes[i] = storages[i].getStorageType();
070        }
071        return storageTypes;
072      }
073    
074      public void updateFromStorage(DatanodeStorage storage) {
075        state = storage.getState();
076        storageType = storage.getStorageType();
077      }
078    
079      /**
080       * Iterates over the list of blocks belonging to the data-node.
081       */
082      class BlockIterator implements Iterator<BlockInfo> {
083        private BlockInfo current;
084    
085        BlockIterator(BlockInfo head) {
086          this.current = head;
087        }
088    
089        public boolean hasNext() {
090          return current != null;
091        }
092    
093        public BlockInfo next() {
094          BlockInfo res = current;
095          current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
096          return res;
097        }
098    
099        public void remove() {
100          throw new UnsupportedOperationException("Sorry. can't remove.");
101        }
102      }
103    
104      private final DatanodeDescriptor dn;
105      private final String storageID;
106      private StorageType storageType;
107      private State state;
108    
109      private long capacity;
110      private long dfsUsed;
111      private long remaining;
112      private long blockPoolUsed;
113    
114      private volatile BlockInfo blockList = null;
115      private int numBlocks = 0;
116    
117      /** The number of block reports received */
118      private int blockReportCount = 0;
119    
120      /**
121       * Set to false on any NN failover, and reset to true
122       * whenever a block report is received.
123       */
124      private boolean heartbeatedSinceFailover = false;
125    
126      /**
127       * At startup or at failover, the storages in the cluster may have pending
128       * block deletions from a previous incarnation of the NameNode. The block
129       * contents are considered as stale until a block report is received. When a
130       * storage is considered as stale, the replicas on it are also considered as
131       * stale. If any block has at least one stale replica, then no invalidations
132       * will be processed for this block. See HDFS-1972.
133       */
134      private boolean blockContentsStale = true;
135    
136      DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) {
137        this.dn = dn;
138        this.storageID = s.getStorageID();
139        this.storageType = s.getStorageType();
140        this.state = s.getState();
141      }
142    
143      int getBlockReportCount() {
144        return blockReportCount;
145      }
146    
147      void setBlockReportCount(int blockReportCount) {
148        this.blockReportCount = blockReportCount;
149      }
150    
151      boolean areBlockContentsStale() {
152        return blockContentsStale;
153      }
154    
155      void markStaleAfterFailover() {
156        heartbeatedSinceFailover = false;
157        blockContentsStale = true;
158      }
159    
160      void receivedHeartbeat(StorageReport report) {
161        updateState(report);
162        heartbeatedSinceFailover = true;
163      }
164    
165      void receivedBlockReport() {
166        if (heartbeatedSinceFailover) {
167          blockContentsStale = false;
168        }
169        blockReportCount++;
170      }
171    
172      @VisibleForTesting
173      public void setUtilizationForTesting(long capacity, long dfsUsed,
174                          long remaining, long blockPoolUsed) {
175        this.capacity = capacity;
176        this.dfsUsed = dfsUsed;
177        this.remaining = remaining;
178        this.blockPoolUsed = blockPoolUsed;
179      }
180      
181      State getState() {
182        return this.state;
183      }
184      
185      String getStorageID() {
186        return storageID;
187      }
188    
189      StorageType getStorageType() {
190        return storageType;
191      }
192    
193      long getCapacity() {
194        return capacity;
195      }
196    
197      long getDfsUsed() {
198        return dfsUsed;
199      }
200    
201      long getRemaining() {
202        return remaining;
203      }
204    
205      long getBlockPoolUsed() {
206        return blockPoolUsed;
207      }
208    
209      boolean addBlock(BlockInfo b) {
210        if(!b.addStorage(this))
211          return false;
212        // add to the head of the data-node list
213        blockList = b.listInsert(blockList, this);
214        numBlocks++;
215        return true;
216      }
217    
218      boolean removeBlock(BlockInfo b) {
219        blockList = b.listRemove(blockList, this);
220        if (b.removeStorage(this)) {
221          numBlocks--;
222          return true;
223        } else {
224          return false;
225        }
226      }
227    
228      int numBlocks() {
229        return numBlocks;
230      }
231      
232      Iterator<BlockInfo> getBlockIterator() {
233        return new BlockIterator(blockList);
234    
235      }
236    
237      /**
238       * Move block to the head of the list of blocks belonging to the data-node.
239       * @return the index of the head of the blockList
240       */
241      int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
242        blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
243        return curIndex;
244      }
245    
246      /**
247       * Used for testing only
248       * @return the head of the blockList
249       */
250      @VisibleForTesting
251      BlockInfo getBlockListHeadForTesting(){
252        return blockList;
253      }
254    
255      void updateState(StorageReport r) {
256        capacity = r.getCapacity();
257        dfsUsed = r.getDfsUsed();
258        remaining = r.getRemaining();
259        blockPoolUsed = r.getBlockPoolUsed();
260      }
261    
262      public DatanodeDescriptor getDatanodeDescriptor() {
263        return dn;
264      }
265    
266      /** Increment the number of blocks scheduled for each given storage */ 
267      public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) {
268        for (DatanodeStorageInfo s : storages) {
269          s.getDatanodeDescriptor().incrementBlocksScheduled();
270        }
271      }
272    
273      @Override
274      public boolean equals(Object obj) {
275        if (this == obj) {
276          return true;
277        } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) {
278          return false;
279        }
280        final DatanodeStorageInfo that = (DatanodeStorageInfo)obj;
281        return this.storageID.equals(that.storageID);
282      }
283    
284      @Override
285      public int hashCode() {
286        return storageID.hashCode();
287      }
288    
289      @Override
290      public String toString() {
291        return "[" + storageType + "]" + storageID + ":" + state;
292      }
293    }