001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.blockmanagement; 019 020import java.util.Arrays; 021import java.util.Iterator; 022import java.util.List; 023 024import com.google.common.annotations.VisibleForTesting; 025 026import org.apache.hadoop.fs.StorageType; 027import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 028import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 029import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 030import org.apache.hadoop.hdfs.server.protocol.StorageReport; 031 032/** 033 * A Datanode has one or more storages. A storage in the Datanode is represented 034 * by this class. 035 */ 036public class DatanodeStorageInfo { 037 public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; 038 039 public static DatanodeInfo[] toDatanodeInfos( 040 DatanodeStorageInfo[] storages) { 041 return storages == null ? null: toDatanodeInfos(Arrays.asList(storages)); 042 } 043 static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) { 044 final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; 045 for(int i = 0; i < storages.size(); i++) { 046 datanodes[i] = storages.get(i).getDatanodeDescriptor(); 047 } 048 return datanodes; 049 } 050 051 static DatanodeDescriptor[] toDatanodeDescriptors( 052 DatanodeStorageInfo[] storages) { 053 DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length]; 054 for (int i = 0; i < storages.length; ++i) { 055 datanodes[i] = storages[i].getDatanodeDescriptor(); 056 } 057 return datanodes; 058 } 059 060 public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { 061 if (storages == null) { 062 return null; 063 } 064 String[] storageIDs = new String[storages.length]; 065 for(int i = 0; i < storageIDs.length; i++) { 066 storageIDs[i] = storages[i].getStorageID(); 067 } 068 return storageIDs; 069 } 070 071 public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { 072 if (storages == null) { 073 return null; 074 } 075 StorageType[] storageTypes = new StorageType[storages.length]; 076 for(int i = 0; i < storageTypes.length; i++) { 077 storageTypes[i] = storages[i].getStorageType(); 078 } 079 return storageTypes; 080 } 081 082 public void updateFromStorage(DatanodeStorage storage) { 083 state = storage.getState(); 084 storageType = storage.getStorageType(); 085 } 086 087 /** 088 * Iterates over the list of blocks belonging to the data-node. 089 */ 090 class BlockIterator implements Iterator<BlockInfo> { 091 private BlockInfo current; 092 093 BlockIterator(BlockInfo head) { 094 this.current = head; 095 } 096 097 public boolean hasNext() { 098 return current != null; 099 } 100 101 public BlockInfo next() { 102 BlockInfo res = current; 103 current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this)); 104 return res; 105 } 106 107 public void remove() { 108 throw new UnsupportedOperationException("Sorry. can't remove."); 109 } 110 } 111 112 private final DatanodeDescriptor dn; 113 private final String storageID; 114 private StorageType storageType; 115 private State state; 116 117 private long capacity; 118 private long dfsUsed; 119 private long nonDfsUsed; 120 private volatile long remaining; 121 private long blockPoolUsed; 122 123 private volatile BlockInfo blockList = null; 124 private int numBlocks = 0; 125 126 /** The number of block reports received */ 127 private int blockReportCount = 0; 128 129 /** 130 * Set to false on any NN failover, and reset to true 131 * whenever a block report is received. 132 */ 133 private boolean heartbeatedSinceFailover = false; 134 135 /** 136 * At startup or at failover, the storages in the cluster may have pending 137 * block deletions from a previous incarnation of the NameNode. The block 138 * contents are considered as stale until a block report is received. When a 139 * storage is considered as stale, the replicas on it are also considered as 140 * stale. If any block has at least one stale replica, then no invalidations 141 * will be processed for this block. See HDFS-1972. 142 */ 143 private boolean blockContentsStale = true; 144 145 DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) { 146 this.dn = dn; 147 this.storageID = s.getStorageID(); 148 this.storageType = s.getStorageType(); 149 this.state = s.getState(); 150 } 151 152 public int getBlockReportCount() { 153 return blockReportCount; 154 } 155 156 void setBlockReportCount(int blockReportCount) { 157 this.blockReportCount = blockReportCount; 158 } 159 160 public boolean areBlockContentsStale() { 161 return blockContentsStale; 162 } 163 164 void markStaleAfterFailover() { 165 heartbeatedSinceFailover = false; 166 blockContentsStale = true; 167 } 168 169 void receivedHeartbeat(StorageReport report) { 170 updateState(report); 171 heartbeatedSinceFailover = true; 172 } 173 174 void receivedBlockReport() { 175 if (heartbeatedSinceFailover) { 176 blockContentsStale = false; 177 } 178 blockReportCount++; 179 } 180 181 @VisibleForTesting 182 public void setUtilizationForTesting(long capacity, long dfsUsed, 183 long remaining, long blockPoolUsed) { 184 this.capacity = capacity; 185 this.dfsUsed = dfsUsed; 186 this.remaining = remaining; 187 this.blockPoolUsed = blockPoolUsed; 188 } 189 190 State getState() { 191 return this.state; 192 } 193 194 void setState(State state) { 195 this.state = state; 196 } 197 198 boolean areBlocksOnFailedStorage() { 199 return getState() == State.FAILED && numBlocks != 0; 200 } 201 202 public String getStorageID() { 203 return storageID; 204 } 205 206 public StorageType getStorageType() { 207 return storageType; 208 } 209 210 long getCapacity() { 211 return capacity; 212 } 213 214 long getDfsUsed() { 215 return dfsUsed; 216 } 217 218 long getNonDfsUsed() { 219 return nonDfsUsed; 220 } 221 222 long getRemaining() { 223 return remaining; 224 } 225 226 long getBlockPoolUsed() { 227 return blockPoolUsed; 228 } 229 230 public AddBlockResult addBlock(BlockInfo b) { 231 // First check whether the block belongs to a different storage 232 // on the same DN. 233 AddBlockResult result = AddBlockResult.ADDED; 234 DatanodeStorageInfo otherStorage = 235 b.findStorageInfo(getDatanodeDescriptor()); 236 237 if (otherStorage != null) { 238 if (otherStorage != this) { 239 // The block belongs to a different storage. Remove it first. 240 otherStorage.removeBlock(b); 241 result = AddBlockResult.REPLACED; 242 } else { 243 // The block is already associated with this storage. 244 return AddBlockResult.ALREADY_EXIST; 245 } 246 } 247 248 // add to the head of the data-node list 249 b.addStorage(this); 250 blockList = b.listInsert(blockList, this); 251 numBlocks++; 252 return result; 253 } 254 255 public boolean removeBlock(BlockInfo b) { 256 blockList = b.listRemove(blockList, this); 257 if (b.removeStorage(this)) { 258 numBlocks--; 259 return true; 260 } else { 261 return false; 262 } 263 } 264 265 int numBlocks() { 266 return numBlocks; 267 } 268 269 Iterator<BlockInfo> getBlockIterator() { 270 return new BlockIterator(blockList); 271 272 } 273 274 /** 275 * Move block to the head of the list of blocks belonging to the data-node. 276 * @return the index of the head of the blockList 277 */ 278 int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) { 279 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex); 280 return curIndex; 281 } 282 283 /** 284 * Used for testing only 285 * @return the head of the blockList 286 */ 287 @VisibleForTesting 288 BlockInfo getBlockListHeadForTesting(){ 289 return blockList; 290 } 291 292 void updateState(StorageReport r) { 293 capacity = r.getCapacity(); 294 dfsUsed = r.getDfsUsed(); 295 nonDfsUsed = r.getNonDfsUsed(); 296 remaining = r.getRemaining(); 297 blockPoolUsed = r.getBlockPoolUsed(); 298 } 299 300 public DatanodeDescriptor getDatanodeDescriptor() { 301 return dn; 302 } 303 304 /** Increment the number of blocks scheduled for each given storage */ 305 public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { 306 for (DatanodeStorageInfo s : storages) { 307 s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); 308 } 309 } 310 311 /** 312 * Decrement the number of blocks scheduled for each given storage. This will 313 * be called during abandon block or delete of UC block. 314 */ 315 public static void decrementBlocksScheduled(DatanodeStorageInfo... storages) { 316 for (DatanodeStorageInfo s : storages) { 317 s.getDatanodeDescriptor().decrementBlocksScheduled(s.getStorageType()); 318 } 319 } 320 321 @Override 322 public boolean equals(Object obj) { 323 if (this == obj) { 324 return true; 325 } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) { 326 return false; 327 } 328 final DatanodeStorageInfo that = (DatanodeStorageInfo)obj; 329 return this.storageID.equals(that.storageID); 330 } 331 332 @Override 333 public int hashCode() { 334 return storageID.hashCode(); 335 } 336 337 @Override 338 public String toString() { 339 return "[" + storageType + "]" + storageID + ":" + state + ":" + dn; 340 } 341 342 StorageReport toStorageReport() { 343 return new StorageReport( 344 new DatanodeStorage(storageID, state, storageType), 345 false, capacity, dfsUsed, remaining, blockPoolUsed, nonDfsUsed); 346 } 347 348 static Iterable<StorageType> toStorageTypes( 349 final Iterable<DatanodeStorageInfo> infos) { 350 return new Iterable<StorageType>() { 351 @Override 352 public Iterator<StorageType> iterator() { 353 return new Iterator<StorageType>() { 354 final Iterator<DatanodeStorageInfo> i = infos.iterator(); 355 @Override 356 public boolean hasNext() {return i.hasNext();} 357 @Override 358 public StorageType next() {return i.next().getStorageType();} 359 @Override 360 public void remove() { 361 throw new UnsupportedOperationException(); 362 } 363 }; 364 } 365 }; 366 } 367 368 /** @return the first {@link DatanodeStorageInfo} corresponding to 369 * the given datanode 370 */ 371 static DatanodeStorageInfo getDatanodeStorageInfo( 372 final Iterable<DatanodeStorageInfo> infos, 373 final DatanodeDescriptor datanode) { 374 if (datanode == null) { 375 return null; 376 } 377 for(DatanodeStorageInfo storage : infos) { 378 if (storage.getDatanodeDescriptor() == datanode) { 379 return storage; 380 } 381 } 382 return null; 383 } 384 385 static enum AddBlockResult { 386 ADDED, REPLACED, ALREADY_EXIST 387 } 388}