001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs.server.namenode.snapshot;
019    
020    import java.io.DataOutput;
021    import java.io.IOException;
022    import java.util.List;
023    
024    import org.apache.hadoop.classification.InterfaceAudience;
025    import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
026    import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
027    import org.apache.hadoop.hdfs.server.namenode.INode;
028    import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
029    import org.apache.hadoop.hdfs.server.namenode.INodeFile;
030    import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
031    import org.apache.hadoop.hdfs.server.namenode.Quota;
032    import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
033    
034    /**
035     * An interface for {@link INodeFile} to support snapshot.
036     */
037    @InterfaceAudience.Private
038    public interface FileWithSnapshot {
039      /**
040       * The difference of an {@link INodeFile} between two snapshots.
041       */
042      public static class FileDiff extends AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
043        /** The file size at snapshot creation time. */
044        private final long fileSize;
045    
046        private FileDiff(Snapshot snapshot, INodeFile file) {
047          super(snapshot, null, null);
048          fileSize = file.computeFileSize();
049        }
050    
051        /** Constructor used by FSImage loading */
052        FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
053            FileDiff posteriorDiff, long fileSize) {
054          super(snapshot, snapshotINode, posteriorDiff);
055          this.fileSize = fileSize;
056        }
057    
058        /** @return the file size in the snapshot. */
059        public long getFileSize() {
060          return fileSize;
061        }
062    
063        private static Quota.Counts updateQuotaAndCollectBlocks(
064            INodeFile currentINode, FileDiff removed,
065            BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
066          FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
067          long oldDiskspace = currentINode.diskspaceConsumed();
068          if (removed.snapshotINode != null) {
069            short replication = removed.snapshotINode.getFileReplication();
070            short currentRepl = currentINode.getBlockReplication();
071            if (currentRepl == 0) {
072              oldDiskspace = currentINode.computeFileSize(true, true) * replication;
073            } else if (replication > currentRepl) {  
074              oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
075                  * replication;
076            }
077          }
078          
079          Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
080          
081          long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
082          return Quota.Counts.newInstance(0, dsDelta);
083        }
084        
085        @Override
086        Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
087            FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
088            final List<INode> removedINodes) {
089          return updateQuotaAndCollectBlocks(currentINode, posterior,
090              collectedBlocks, removedINodes);
091        }
092        
093        @Override
094        public String toString() {
095          return super.toString() + " fileSize=" + fileSize + ", rep="
096              + (snapshotINode == null? "?": snapshotINode.getFileReplication());
097        }
098    
099        @Override
100        void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
101          writeSnapshot(out);
102          out.writeLong(fileSize);
103    
104          // write snapshotINode
105          if (snapshotINode != null) {
106            out.writeBoolean(true);
107            FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
108          } else {
109            out.writeBoolean(false);
110          }
111        }
112    
113        @Override
114        Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
115            BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
116          return updateQuotaAndCollectBlocks(currentINode, this,
117              collectedBlocks, removedINodes);
118        }
119      }
120    
121      /** A list of FileDiffs for storing snapshot data. */
122      public static class FileDiffList
123          extends AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
124    
125        @Override
126        FileDiff createDiff(Snapshot snapshot, INodeFile file) {
127          return new FileDiff(snapshot, file);
128        }
129        
130        @Override
131        INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
132          return new INodeFileAttributes.SnapshotCopy(currentINode);
133        }
134      }
135    
136      /** @return the {@link INodeFile} view of this object. */
137      public INodeFile asINodeFile();
138    
139      /** @return the file diff list. */
140      public FileDiffList getDiffs();
141    
142      /** Is the current file deleted? */
143      public boolean isCurrentFileDeleted();
144      
145      /** Delete the file from the current tree */
146      public void deleteCurrentFile();
147    
148      /** Utility methods for the classes which implement the interface. */
149      public static class Util {
150        /** 
151         * @return block replication, which is the max file replication among
152         *         the file and the diff list.
153         */
154        public static short getBlockReplication(final FileWithSnapshot file) {
155          short max = file.isCurrentFileDeleted()? 0
156              : file.asINodeFile().getFileReplication();
157          for(FileDiff d : file.getDiffs()) {
158            if (d.snapshotINode != null) {
159              final short replication = d.snapshotINode.getFileReplication();
160              if (replication > max) {
161                max = replication;
162              }
163            }
164          }
165          return max;
166        }
167    
168        /**
169         * If some blocks at the end of the block list no longer belongs to
170         * any inode, collect them and update the block list.
171         */
172        static void collectBlocksAndClear(final FileWithSnapshot file,
173            final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
174          // check if everything is deleted.
175          if (file.isCurrentFileDeleted()
176              && file.getDiffs().asList().isEmpty()) {
177            file.asINodeFile().destroyAndCollectBlocks(info, removedINodes);
178            return;
179          }
180    
181          // find max file size.
182          final long max;
183          if (file.isCurrentFileDeleted()) {
184            final FileDiff last = file.getDiffs().getLast();
185            max = last == null? 0: last.fileSize;
186          } else { 
187            max = file.asINodeFile().computeFileSize();
188          }
189    
190          collectBlocksBeyondMax(file, max, info);
191        }
192    
193        private static void collectBlocksBeyondMax(final FileWithSnapshot file,
194            final long max, final BlocksMapUpdateInfo collectedBlocks) {
195          final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
196          if (oldBlocks != null) {
197            //find the minimum n such that the size of the first n blocks > max
198            int n = 0;
199            for(long size = 0; n < oldBlocks.length && max > size; n++) {
200              size += oldBlocks[n].getNumBytes();
201            }
202            
203            // starting from block n, the data is beyond max.
204            if (n < oldBlocks.length) {
205              // resize the array.  
206              final BlockInfo[] newBlocks;
207              if (n == 0) {
208                newBlocks = null;
209              } else {
210                newBlocks = new BlockInfo[n];
211                System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
212              }
213              
214              // set new blocks
215              file.asINodeFile().setBlocks(newBlocks);
216    
217              // collect the blocks beyond max.  
218              if (collectedBlocks != null) {
219                for(; n < oldBlocks.length; n++) {
220                  collectedBlocks.addDeleteBlock(oldBlocks[n]);
221                }
222              }
223            }
224          }
225        }
226      }
227    }