001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018 package org.apache.hadoop.hdfs.server.blockmanagement;
019
020 import java.util.Arrays;
021 import java.util.Iterator;
022 import java.util.List;
023
024 import com.google.common.annotations.VisibleForTesting;
025
026 import org.apache.hadoop.hdfs.StorageType;
027 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
028 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
029 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
030 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
031
032 /**
033 * A Datanode has one or more storages. A storage in the Datanode is represented
034 * by this class.
035 */
036 public class DatanodeStorageInfo {
037 public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
038
039 public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
040 return toDatanodeInfos(Arrays.asList(storages));
041 }
042 static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
043 final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
044 for(int i = 0; i < storages.size(); i++) {
045 datanodes[i] = storages.get(i).getDatanodeDescriptor();
046 }
047 return datanodes;
048 }
049
050 static DatanodeDescriptor[] toDatanodeDescriptors(
051 DatanodeStorageInfo[] storages) {
052 DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length];
053 for (int i = 0; i < storages.length; ++i) {
054 datanodes[i] = storages[i].getDatanodeDescriptor();
055 }
056 return datanodes;
057 }
058
059 public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
060 String[] storageIDs = new String[storages.length];
061 for(int i = 0; i < storageIDs.length; i++) {
062 storageIDs[i] = storages[i].getStorageID();
063 }
064 return storageIDs;
065 }
066
067 public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
068 StorageType[] storageTypes = new StorageType[storages.length];
069 for(int i = 0; i < storageTypes.length; i++) {
070 storageTypes[i] = storages[i].getStorageType();
071 }
072 return storageTypes;
073 }
074
075 public void updateFromStorage(DatanodeStorage storage) {
076 state = storage.getState();
077 storageType = storage.getStorageType();
078 }
079
080 /**
081 * Iterates over the list of blocks belonging to the data-node.
082 */
083 class BlockIterator implements Iterator<BlockInfo> {
084 private BlockInfo current;
085
086 BlockIterator(BlockInfo head) {
087 this.current = head;
088 }
089
090 public boolean hasNext() {
091 return current != null;
092 }
093
094 public BlockInfo next() {
095 BlockInfo res = current;
096 current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
097 return res;
098 }
099
100 public void remove() {
101 throw new UnsupportedOperationException("Sorry. can't remove.");
102 }
103 }
104
105 private final DatanodeDescriptor dn;
106 private final String storageID;
107 private StorageType storageType;
108 private State state;
109
110 private long capacity;
111 private long dfsUsed;
112 private volatile long remaining;
113 private long blockPoolUsed;
114
115 private volatile BlockInfo blockList = null;
116 private int numBlocks = 0;
117
118 /** The number of block reports received */
119 private int blockReportCount = 0;
120
121 /**
122 * Set to false on any NN failover, and reset to true
123 * whenever a block report is received.
124 */
125 private boolean heartbeatedSinceFailover = false;
126
127 /**
128 * At startup or at failover, the storages in the cluster may have pending
129 * block deletions from a previous incarnation of the NameNode. The block
130 * contents are considered as stale until a block report is received. When a
131 * storage is considered as stale, the replicas on it are also considered as
132 * stale. If any block has at least one stale replica, then no invalidations
133 * will be processed for this block. See HDFS-1972.
134 */
135 private boolean blockContentsStale = true;
136
137 DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) {
138 this.dn = dn;
139 this.storageID = s.getStorageID();
140 this.storageType = s.getStorageType();
141 this.state = s.getState();
142 }
143
144 int getBlockReportCount() {
145 return blockReportCount;
146 }
147
148 void setBlockReportCount(int blockReportCount) {
149 this.blockReportCount = blockReportCount;
150 }
151
152 boolean areBlockContentsStale() {
153 return blockContentsStale;
154 }
155
156 void markStaleAfterFailover() {
157 heartbeatedSinceFailover = false;
158 blockContentsStale = true;
159 }
160
161 void receivedHeartbeat(StorageReport report) {
162 updateState(report);
163 heartbeatedSinceFailover = true;
164 }
165
166 void receivedBlockReport() {
167 if (heartbeatedSinceFailover) {
168 blockContentsStale = false;
169 }
170 blockReportCount++;
171 }
172
173 @VisibleForTesting
174 public void setUtilizationForTesting(long capacity, long dfsUsed,
175 long remaining, long blockPoolUsed) {
176 this.capacity = capacity;
177 this.dfsUsed = dfsUsed;
178 this.remaining = remaining;
179 this.blockPoolUsed = blockPoolUsed;
180 }
181
182 State getState() {
183 return this.state;
184 }
185
186 void setState(State state) {
187 this.state = state;
188 }
189
190 boolean areBlocksOnFailedStorage() {
191 return getState() == State.FAILED && numBlocks != 0;
192 }
193
194 String getStorageID() {
195 return storageID;
196 }
197
198 StorageType getStorageType() {
199 return storageType;
200 }
201
202 long getCapacity() {
203 return capacity;
204 }
205
206 long getDfsUsed() {
207 return dfsUsed;
208 }
209
210 long getRemaining() {
211 return remaining;
212 }
213
214 long getBlockPoolUsed() {
215 return blockPoolUsed;
216 }
217
218 public boolean addBlock(BlockInfo b) {
219 // First check whether the block belongs to a different storage
220 // on the same DN.
221 boolean replaced = false;
222 DatanodeStorageInfo otherStorage =
223 b.findStorageInfo(getDatanodeDescriptor());
224
225 if (otherStorage != null) {
226 if (otherStorage != this) {
227 // The block belongs to a different storage. Remove it first.
228 otherStorage.removeBlock(b);
229 replaced = true;
230 } else {
231 // The block is already associated with this storage.
232 return false;
233 }
234 }
235
236 // add to the head of the data-node list
237 b.addStorage(this);
238 blockList = b.listInsert(blockList, this);
239 numBlocks++;
240 return !replaced;
241 }
242
243 boolean removeBlock(BlockInfo b) {
244 blockList = b.listRemove(blockList, this);
245 if (b.removeStorage(this)) {
246 numBlocks--;
247 return true;
248 } else {
249 return false;
250 }
251 }
252
253 int numBlocks() {
254 return numBlocks;
255 }
256
257 Iterator<BlockInfo> getBlockIterator() {
258 return new BlockIterator(blockList);
259
260 }
261
262 /**
263 * Move block to the head of the list of blocks belonging to the data-node.
264 * @return the index of the head of the blockList
265 */
266 int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
267 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
268 return curIndex;
269 }
270
271 /**
272 * Used for testing only
273 * @return the head of the blockList
274 */
275 @VisibleForTesting
276 BlockInfo getBlockListHeadForTesting(){
277 return blockList;
278 }
279
280 void updateState(StorageReport r) {
281 capacity = r.getCapacity();
282 dfsUsed = r.getDfsUsed();
283 remaining = r.getRemaining();
284 blockPoolUsed = r.getBlockPoolUsed();
285 }
286
287 public DatanodeDescriptor getDatanodeDescriptor() {
288 return dn;
289 }
290
291 /** Increment the number of blocks scheduled for each given storage */
292 public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) {
293 for (DatanodeStorageInfo s : storages) {
294 s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType());
295 }
296 }
297
298 @Override
299 public boolean equals(Object obj) {
300 if (this == obj) {
301 return true;
302 } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) {
303 return false;
304 }
305 final DatanodeStorageInfo that = (DatanodeStorageInfo)obj;
306 return this.storageID.equals(that.storageID);
307 }
308
309 @Override
310 public int hashCode() {
311 return storageID.hashCode();
312 }
313
314 @Override
315 public String toString() {
316 return "[" + storageType + "]" + storageID + ":" + state + ":" + dn;
317 }
318
319 StorageReport toStorageReport() {
320 return new StorageReport(
321 new DatanodeStorage(storageID, state, storageType),
322 false, capacity, dfsUsed, remaining, blockPoolUsed);
323 }
324
325 static Iterable<StorageType> toStorageTypes(
326 final Iterable<DatanodeStorageInfo> infos) {
327 return new Iterable<StorageType>() {
328 @Override
329 public Iterator<StorageType> iterator() {
330 return new Iterator<StorageType>() {
331 final Iterator<DatanodeStorageInfo> i = infos.iterator();
332 @Override
333 public boolean hasNext() {return i.hasNext();}
334 @Override
335 public StorageType next() {return i.next().getStorageType();}
336 @Override
337 public void remove() {
338 throw new UnsupportedOperationException();
339 }
340 };
341 }
342 };
343 }
344
345 /** @return the first {@link DatanodeStorageInfo} corresponding to
346 * the given datanode
347 */
348 static DatanodeStorageInfo getDatanodeStorageInfo(
349 final Iterable<DatanodeStorageInfo> infos,
350 final DatanodeDescriptor datanode) {
351 if (datanode == null) {
352 return null;
353 }
354 for(DatanodeStorageInfo storage : infos) {
355 if (storage.getDatanodeDescriptor() == datanode) {
356 return storage;
357 }
358 }
359 return null;
360 }
361 }