001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode.snapshot; 019 020import java.util.Collections; 021import java.util.List; 022 023import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; 024import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; 025import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; 026import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; 027import org.apache.hadoop.hdfs.server.namenode.INode; 028import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; 029import org.apache.hadoop.hdfs.server.namenode.INodeFile; 030import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; 031 032/** A list of FileDiffs for storing snapshot data. */ 033public class FileDiffList extends 034 AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> { 035 036 @Override 037 FileDiff createDiff(int snapshotId, INodeFile file) { 038 return new FileDiff(snapshotId, file); 039 } 040 041 @Override 042 INodeFileAttributes createSnapshotCopy(INodeFile currentINode) { 043 return new INodeFileAttributes.SnapshotCopy(currentINode); 044 } 045 046 public void destroyAndCollectSnapshotBlocks( 047 BlocksMapUpdateInfo collectedBlocks) { 048 for(FileDiff d : asList()) 049 d.destroyAndCollectSnapshotBlocks(collectedBlocks); 050 } 051 052 public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile, 053 INodeFileAttributes snapshotCopy, boolean withBlocks) { 054 final FileDiff diff = 055 super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy); 056 if(withBlocks) // Store blocks if this is the first update 057 diff.setBlocks(iNodeFile.getBlocks()); 058 } 059 060 public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) { 061 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 062 if(snapshotId == Snapshot.CURRENT_STATE_ID) { 063 return null; 064 } 065 List<FileDiff> diffs = this.asList(); 066 int i = Collections.binarySearch(diffs, snapshotId); 067 BlockInfoContiguous[] blocks = null; 068 for(i = i >= 0 ? i : -i-2; i >= 0; i--) { 069 blocks = diffs.get(i).getBlocks(); 070 if(blocks != null) { 071 break; 072 } 073 } 074 return blocks; 075 } 076 077 public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) { 078 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 079 if(snapshotId == Snapshot.CURRENT_STATE_ID) { 080 return null; 081 } 082 List<FileDiff> diffs = this.asList(); 083 int i = Collections.binarySearch(diffs, snapshotId); 084 BlockInfoContiguous[] blocks = null; 085 for(i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) { 086 blocks = diffs.get(i).getBlocks(); 087 if(blocks != null) { 088 break; 089 } 090 } 091 return blocks; 092 } 093 094 /** 095 * Copy blocks from the removed snapshot into the previous snapshot 096 * up to the file length of the latter. 097 * Collect unused blocks of the removed snapshot. 098 */ 099 void combineAndCollectSnapshotBlocks(BlockStoragePolicySuite bsps, INodeFile file, 100 FileDiff removed, 101 BlocksMapUpdateInfo collectedBlocks, 102 List<INode> removedINodes) { 103 BlockInfoContiguous[] removedBlocks = removed.getBlocks(); 104 if(removedBlocks == null) { 105 FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature(); 106 assert sf != null : "FileWithSnapshotFeature is null"; 107 if(sf.isCurrentFileDeleted()) 108 sf.collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes); 109 return; 110 } 111 int p = getPrior(removed.getSnapshotId(), true); 112 FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p); 113 // Copy blocks to the previous snapshot if not set already 114 if(earlierDiff != null) 115 earlierDiff.setBlocks(removedBlocks); 116 BlockInfoContiguous[] earlierBlocks = 117 (earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks()); 118 // Find later snapshot (or file itself) with blocks 119 BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId()); 120 laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks; 121 // Skip blocks, which belong to either the earlier or the later lists 122 int i = 0; 123 for(; i < removedBlocks.length; i++) { 124 if(i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i]) 125 continue; 126 if(i < laterBlocks.length && removedBlocks[i] == laterBlocks[i]) 127 continue; 128 break; 129 } 130 // Check if last block is part of truncate recovery 131 BlockInfoContiguous lastBlock = file.getLastBlock(); 132 BlockInfoContiguous dontRemoveBlock = null; 133 if(lastBlock != null && lastBlock.getBlockUCState().equals( 134 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { 135 dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) 136 .getTruncateBlock(); 137 } 138 // Collect the remaining blocks of the file, ignoring truncate block 139 for(;i < removedBlocks.length; i++) { 140 if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) { 141 collectedBlocks.addDeleteBlock(removedBlocks[i]); 142 } 143 } 144 } 145}