001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode.snapshot; 019 020import java.util.Collections; 021import java.util.List; 022 023import org.apache.hadoop.hdfs.protocol.Block; 024import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; 025import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; 026import org.apache.hadoop.hdfs.server.namenode.INode; 027import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; 028import org.apache.hadoop.hdfs.server.namenode.INodeFile; 029import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; 030 031/** A list of FileDiffs for storing snapshot data. */ 032public class FileDiffList extends 033 AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> { 034 035 @Override 036 FileDiff createDiff(int snapshotId, INodeFile file) { 037 return new FileDiff(snapshotId, file); 038 } 039 040 @Override 041 INodeFileAttributes createSnapshotCopy(INodeFile currentINode) { 042 return new INodeFileAttributes.SnapshotCopy(currentINode); 043 } 044 045 public void destroyAndCollectSnapshotBlocks( 046 BlocksMapUpdateInfo collectedBlocks) { 047 for (FileDiff d : asList()) { 048 d.destroyAndCollectSnapshotBlocks(collectedBlocks); 049 } 050 } 051 052 public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile, 053 INodeFileAttributes snapshotCopy, boolean withBlocks) { 054 final FileDiff diff = 055 super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy); 056 if (withBlocks) { // Store blocks if this is the first update 057 diff.setBlocks(iNodeFile.getBlocks()); 058 } 059 } 060 061 public BlockInfo[] findEarlierSnapshotBlocks(int snapshotId) { 062 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 063 if (snapshotId == Snapshot.CURRENT_STATE_ID) { 064 return null; 065 } 066 List<FileDiff> diffs = this.asList(); 067 int i = Collections.binarySearch(diffs, snapshotId); 068 BlockInfo[] blocks = null; 069 for(i = i >= 0 ? i : -i-2; i >= 0; i--) { 070 blocks = diffs.get(i).getBlocks(); 071 if(blocks != null) { 072 break; 073 } 074 } 075 return blocks; 076 } 077 078 public BlockInfo[] findLaterSnapshotBlocks(int snapshotId) { 079 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 080 if (snapshotId == Snapshot.CURRENT_STATE_ID) { 081 return null; 082 } 083 List<FileDiff> diffs = this.asList(); 084 int i = Collections.binarySearch(diffs, snapshotId); 085 BlockInfo[] blocks = null; 086 for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) { 087 blocks = diffs.get(i).getBlocks(); 088 if (blocks != null) { 089 break; 090 } 091 } 092 return blocks; 093 } 094 095 /** 096 * Copy blocks from the removed snapshot into the previous snapshot 097 * up to the file length of the latter. 098 * Collect unused blocks of the removed snapshot. 099 */ 100 void combineAndCollectSnapshotBlocks( 101 INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) { 102 BlockInfo[] removedBlocks = removed.getBlocks(); 103 if (removedBlocks == null) { 104 FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature(); 105 assert sf != null : "FileWithSnapshotFeature is null"; 106 if(sf.isCurrentFileDeleted()) 107 sf.collectBlocksAndClear(reclaimContext, file); 108 return; 109 } 110 int p = getPrior(removed.getSnapshotId(), true); 111 FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p); 112 // Copy blocks to the previous snapshot if not set already 113 if (earlierDiff != null) { 114 earlierDiff.setBlocks(removedBlocks); 115 } 116 BlockInfo[] earlierBlocks = 117 (earlierDiff == null ? new BlockInfo[]{} : earlierDiff.getBlocks()); 118 // Find later snapshot (or file itself) with blocks 119 BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId()); 120 laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks; 121 // Skip blocks, which belong to either the earlier or the later lists 122 int i = 0; 123 for(; i < removedBlocks.length; i++) { 124 if(i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i]) 125 continue; 126 if(i < laterBlocks.length && removedBlocks[i] == laterBlocks[i]) 127 continue; 128 break; 129 } 130 // Check if last block is part of truncate recovery 131 BlockInfo lastBlock = file.getLastBlock(); 132 Block dontRemoveBlock = null; 133 if (lastBlock != null && lastBlock.getBlockUCState().equals( 134 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { 135 dontRemoveBlock = lastBlock.getUnderConstructionFeature() 136 .getTruncateBlock(); 137 } 138 // Collect the remaining blocks of the file, ignoring truncate block 139 for (;i < removedBlocks.length; i++) { 140 if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) { 141 reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]); 142 } 143 } 144 } 145}