001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode.snapshot; 019 020import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadINodeDirectory; 021import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadPermission; 022import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.updateBlocksMap; 023import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeDirectory; 024import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeFile; 025 026import java.io.IOException; 027import java.io.InputStream; 028import java.io.OutputStream; 029import java.util.ArrayList; 030import java.util.Collections; 031import java.util.Comparator; 032import java.util.HashMap; 033import java.util.Iterator; 034import java.util.List; 035import java.util.Map; 036 037import com.google.common.collect.ImmutableList; 038import org.apache.hadoop.classification.InterfaceAudience; 039import org.apache.hadoop.fs.permission.PermissionStatus; 040import org.apache.hadoop.fs.StorageType; 041import org.apache.hadoop.hdfs.protocol.Block; 042import org.apache.hadoop.hdfs.protocol.HdfsConstants; 043import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 044import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; 045import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; 046import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; 047import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 048import org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat; 049import org.apache.hadoop.hdfs.server.namenode.AclFeature; 050import org.apache.hadoop.hdfs.server.namenode.FSDirectory; 051import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode; 052import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; 053import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; 054import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName; 055import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; 056import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; 057import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection; 058import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; 059import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection; 060import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry; 061import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type; 062import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection; 063import org.apache.hadoop.hdfs.server.namenode.INode; 064import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; 065import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; 066import org.apache.hadoop.hdfs.server.namenode.INodeFile; 067import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; 068import org.apache.hadoop.hdfs.server.namenode.INodeMap; 069import org.apache.hadoop.hdfs.server.namenode.INodeReference; 070import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; 071import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; 072import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; 073import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields; 074import org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry; 075import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext; 076import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; 077import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; 078import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; 079import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; 080import org.apache.hadoop.hdfs.util.Diff.ListType; 081import org.apache.hadoop.hdfs.util.EnumCounters; 082 083import com.google.common.base.Preconditions; 084import com.google.protobuf.ByteString; 085 086@InterfaceAudience.Private 087public class FSImageFormatPBSnapshot { 088 /** 089 * Loading snapshot related information from protobuf based FSImage 090 */ 091 public final static class Loader { 092 private final FSNamesystem fsn; 093 private final FSDirectory fsDir; 094 private final FSImageFormatProtobuf.Loader parent; 095 private final Map<Integer, Snapshot> snapshotMap; 096 097 public Loader(FSNamesystem fsn, FSImageFormatProtobuf.Loader parent) { 098 this.fsn = fsn; 099 this.fsDir = fsn.getFSDirectory(); 100 this.snapshotMap = new HashMap<Integer, Snapshot>(); 101 this.parent = parent; 102 } 103 104 /** 105 * The sequence of the ref node in refList must be strictly the same with 106 * the sequence in fsimage 107 */ 108 public void loadINodeReferenceSection(InputStream in) throws IOException { 109 final List<INodeReference> refList = parent.getLoaderContext() 110 .getRefList(); 111 while (true) { 112 INodeReferenceSection.INodeReference e = INodeReferenceSection 113 .INodeReference.parseDelimitedFrom(in); 114 if (e == null) { 115 break; 116 } 117 INodeReference ref = loadINodeReference(e); 118 refList.add(ref); 119 } 120 } 121 122 private INodeReference loadINodeReference( 123 INodeReferenceSection.INodeReference r) throws IOException { 124 long referredId = r.getReferredId(); 125 INode referred = fsDir.getInode(referredId); 126 WithCount withCount = (WithCount) referred.getParentReference(); 127 if (withCount == null) { 128 withCount = new INodeReference.WithCount(null, referred); 129 } 130 final INodeReference ref; 131 if (r.hasDstSnapshotId()) { // DstReference 132 ref = new INodeReference.DstReference(null, withCount, 133 r.getDstSnapshotId()); 134 } else { 135 ref = new INodeReference.WithName(null, withCount, r.getName() 136 .toByteArray(), r.getLastSnapshotId()); 137 } 138 return ref; 139 } 140 141 /** 142 * Load the snapshots section from fsimage. Also add snapshottable feature 143 * to snapshottable directories. 144 */ 145 public void loadSnapshotSection(InputStream in) throws IOException { 146 SnapshotManager sm = fsn.getSnapshotManager(); 147 SnapshotSection section = SnapshotSection.parseDelimitedFrom(in); 148 int snum = section.getNumSnapshots(); 149 sm.setNumSnapshots(snum); 150 sm.setSnapshotCounter(section.getSnapshotCounter()); 151 for (long sdirId : section.getSnapshottableDirList()) { 152 INodeDirectory dir = fsDir.getInode(sdirId).asDirectory(); 153 if (!dir.isSnapshottable()) { 154 dir.addSnapshottableFeature(); 155 } else { 156 // dir is root, and admin set root to snapshottable before 157 dir.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT); 158 } 159 sm.addSnapshottable(dir); 160 } 161 loadSnapshots(in, snum); 162 } 163 164 private void loadSnapshots(InputStream in, int size) throws IOException { 165 for (int i = 0; i < size; i++) { 166 SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot 167 .parseDelimitedFrom(in); 168 INodeDirectory root = loadINodeDirectory(pbs.getRoot(), 169 parent.getLoaderContext()); 170 int sid = pbs.getSnapshotId(); 171 INodeDirectory parent = fsDir.getInode(root.getId()).asDirectory(); 172 Snapshot snapshot = new Snapshot(sid, root, parent); 173 // add the snapshot to parent, since we follow the sequence of 174 // snapshotsByNames when saving, we do not need to sort when loading 175 parent.getDirectorySnapshottableFeature().addSnapshot(snapshot); 176 snapshotMap.put(sid, snapshot); 177 } 178 } 179 180 /** 181 * Load the snapshot diff section from fsimage. 182 */ 183 public void loadSnapshotDiffSection(InputStream in) throws IOException { 184 final List<INodeReference> refList = parent.getLoaderContext() 185 .getRefList(); 186 while (true) { 187 SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry 188 .parseDelimitedFrom(in); 189 if (entry == null) { 190 break; 191 } 192 long inodeId = entry.getInodeId(); 193 INode inode = fsDir.getInode(inodeId); 194 SnapshotDiffSection.DiffEntry.Type type = entry.getType(); 195 switch (type) { 196 case FILEDIFF: 197 loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff()); 198 break; 199 case DIRECTORYDIFF: 200 loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(), 201 refList); 202 break; 203 } 204 } 205 } 206 207 /** Load FileDiff list for a file with snapshot feature */ 208 private void loadFileDiffList(InputStream in, INodeFile file, int size) 209 throws IOException { 210 final FileDiffList diffs = new FileDiffList(); 211 final LoaderContext state = parent.getLoaderContext(); 212 final BlockManager bm = fsn.getBlockManager(); 213 for (int i = 0; i < size; i++) { 214 SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff 215 .parseDelimitedFrom(in); 216 INodeFileAttributes copy = null; 217 if (pbf.hasSnapshotCopy()) { 218 INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); 219 PermissionStatus permission = loadPermission( 220 fileInPb.getPermission(), state.getStringTable()); 221 222 AclFeature acl = null; 223 if (fileInPb.hasAcl()) { 224 int[] entries = AclEntryStatusFormat 225 .toInt(FSImageFormatPBINode.Loader.loadAclEntries( 226 fileInPb.getAcl(), state.getStringTable())); 227 acl = new AclFeature(entries); 228 } 229 XAttrFeature xAttrs = null; 230 if (fileInPb.hasXAttrs()) { 231 xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( 232 fileInPb.getXAttrs(), state.getStringTable())); 233 } 234 235 copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() 236 .toByteArray(), permission, acl, fileInPb.getModificationTime(), 237 fileInPb.getAccessTime(), (short) fileInPb.getReplication(), 238 fileInPb.getPreferredBlockSize(), 239 (byte)fileInPb.getStoragePolicyID(), 240 xAttrs); 241 } 242 243 FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null, 244 pbf.getFileSize()); 245 List<BlockProto> bpl = pbf.getBlocksList(); 246 BlockInfo[] blocks = new BlockInfo[bpl.size()]; 247 for(int j = 0, e = bpl.size(); j < e; ++j) { 248 Block blk = PBHelperClient.convert(bpl.get(j)); 249 BlockInfo storedBlock = bm.getStoredBlock(blk); 250 if(storedBlock == null) { 251 storedBlock = bm.addBlockCollection( 252 new BlockInfoContiguous(blk, copy.getFileReplication()), file); 253 } 254 blocks[j] = storedBlock; 255 } 256 if(blocks.length > 0) { 257 diff.setBlocks(blocks); 258 } 259 diffs.addFirst(diff); 260 } 261 file.addSnapshotFeature(diffs); 262 short repl = file.getPreferredBlockReplication(); 263 for (BlockInfo b : file.getBlocks()) { 264 if (b.getReplication() < repl) { 265 bm.setReplication(b.getReplication(), repl, b); 266 } 267 } 268 } 269 270 /** Load the created list in a DirectoryDiff */ 271 private List<INode> loadCreatedList(InputStream in, INodeDirectory dir, 272 int size) throws IOException { 273 List<INode> clist = new ArrayList<INode>(size); 274 for (long c = 0; c < size; c++) { 275 CreatedListEntry entry = CreatedListEntry.parseDelimitedFrom(in); 276 INode created = SnapshotFSImageFormat.loadCreated(entry.getName() 277 .toByteArray(), dir); 278 clist.add(created); 279 } 280 return clist; 281 } 282 283 private void addToDeletedList(INode dnode, INodeDirectory parent) { 284 dnode.setParent(parent); 285 if (dnode.isFile()) { 286 updateBlocksMap(dnode.asFile(), fsn.getBlockManager()); 287 } 288 } 289 290 /** 291 * Load the deleted list in a DirectoryDiff 292 */ 293 private List<INode> loadDeletedList(final List<INodeReference> refList, 294 InputStream in, INodeDirectory dir, List<Long> deletedNodes, 295 List<Integer> deletedRefNodes) 296 throws IOException { 297 List<INode> dlist = new ArrayList<INode>(deletedRefNodes.size() 298 + deletedNodes.size()); 299 // load non-reference inodes 300 for (long deletedId : deletedNodes) { 301 INode deleted = fsDir.getInode(deletedId); 302 dlist.add(deleted); 303 addToDeletedList(deleted, dir); 304 } 305 // load reference nodes in the deleted list 306 for (int refId : deletedRefNodes) { 307 INodeReference deletedRef = refList.get(refId); 308 dlist.add(deletedRef); 309 addToDeletedList(deletedRef, dir); 310 } 311 312 Collections.sort(dlist, new Comparator<INode>() { 313 @Override 314 public int compare(INode n1, INode n2) { 315 return n1.compareTo(n2.getLocalNameBytes()); 316 } 317 }); 318 return dlist; 319 } 320 321 /** Load DirectoryDiff list for a directory with snapshot feature */ 322 private void loadDirectoryDiffList(InputStream in, INodeDirectory dir, 323 int size, final List<INodeReference> refList) throws IOException { 324 if (!dir.isWithSnapshot()) { 325 dir.addSnapshotFeature(null); 326 } 327 DirectoryDiffList diffs = dir.getDiffs(); 328 final LoaderContext state = parent.getLoaderContext(); 329 330 for (int i = 0; i < size; i++) { 331 // load a directory diff 332 SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection. 333 DirectoryDiff.parseDelimitedFrom(in); 334 final int snapshotId = diffInPb.getSnapshotId(); 335 final Snapshot snapshot = snapshotMap.get(snapshotId); 336 int childrenSize = diffInPb.getChildrenSize(); 337 boolean useRoot = diffInPb.getIsSnapshotRoot(); 338 INodeDirectoryAttributes copy = null; 339 if (useRoot) { 340 copy = snapshot.getRoot(); 341 } else if (diffInPb.hasSnapshotCopy()) { 342 INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy(); 343 final byte[] name = diffInPb.getName().toByteArray(); 344 PermissionStatus permission = loadPermission( 345 dirCopyInPb.getPermission(), state.getStringTable()); 346 AclFeature acl = null; 347 if (dirCopyInPb.hasAcl()) { 348 int[] entries = AclEntryStatusFormat 349 .toInt(FSImageFormatPBINode.Loader.loadAclEntries( 350 dirCopyInPb.getAcl(), state.getStringTable())); 351 acl = new AclFeature(entries); 352 } 353 XAttrFeature xAttrs = null; 354 if (dirCopyInPb.hasXAttrs()) { 355 xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( 356 dirCopyInPb.getXAttrs(), state.getStringTable())); 357 } 358 359 long modTime = dirCopyInPb.getModificationTime(); 360 boolean noQuota = dirCopyInPb.getNsQuota() == -1 361 && dirCopyInPb.getDsQuota() == -1 362 && (!dirCopyInPb.hasTypeQuotas()); 363 364 if (noQuota) { 365 copy = new INodeDirectoryAttributes.SnapshotCopy(name, 366 permission, acl, modTime, xAttrs); 367 } else { 368 EnumCounters<StorageType> typeQuotas = null; 369 if (dirCopyInPb.hasTypeQuotas()) { 370 ImmutableList<QuotaByStorageTypeEntry> qes = 371 FSImageFormatPBINode.Loader.loadQuotaByStorageTypeEntries( 372 dirCopyInPb.getTypeQuotas()); 373 typeQuotas = new EnumCounters<StorageType>(StorageType.class, 374 HdfsConstants.QUOTA_RESET); 375 for (QuotaByStorageTypeEntry qe : qes) { 376 if (qe.getQuota() >= 0 && qe.getStorageType() != null && 377 qe.getStorageType().supportTypeQuota()) { 378 typeQuotas.set(qe.getStorageType(), qe.getQuota()); 379 } 380 } 381 } 382 copy = new INodeDirectoryAttributes.CopyWithQuota(name, permission, 383 acl, modTime, dirCopyInPb.getNsQuota(), 384 dirCopyInPb.getDsQuota(), typeQuotas, xAttrs); 385 } 386 } 387 // load created list 388 List<INode> clist = loadCreatedList(in, dir, 389 diffInPb.getCreatedListSize()); 390 // load deleted list 391 List<INode> dlist = loadDeletedList(refList, in, dir, 392 diffInPb.getDeletedINodeList(), diffInPb.getDeletedINodeRefList()); 393 // create the directory diff 394 DirectoryDiff diff = new DirectoryDiff(snapshotId, copy, null, 395 childrenSize, clist, dlist, useRoot); 396 diffs.addFirst(diff); 397 } 398 } 399 } 400 401 /** 402 * Saving snapshot related information to protobuf based FSImage 403 */ 404 public final static class Saver { 405 private final FSNamesystem fsn; 406 private final FileSummary.Builder headers; 407 private final FSImageFormatProtobuf.Saver parent; 408 private final SaveNamespaceContext context; 409 410 public Saver(FSImageFormatProtobuf.Saver parent, 411 FileSummary.Builder headers, SaveNamespaceContext context, 412 FSNamesystem fsn) { 413 this.parent = parent; 414 this.headers = headers; 415 this.context = context; 416 this.fsn = fsn; 417 } 418 419 /** 420 * save all the snapshottable directories and snapshots to fsimage 421 */ 422 public void serializeSnapshotSection(OutputStream out) throws IOException { 423 SnapshotManager sm = fsn.getSnapshotManager(); 424 SnapshotSection.Builder b = SnapshotSection.newBuilder() 425 .setSnapshotCounter(sm.getSnapshotCounter()) 426 .setNumSnapshots(sm.getNumSnapshots()); 427 428 INodeDirectory[] snapshottables = sm.getSnapshottableDirs(); 429 for (INodeDirectory sdir : snapshottables) { 430 b.addSnapshottableDir(sdir.getId()); 431 } 432 b.build().writeDelimitedTo(out); 433 int i = 0; 434 for(INodeDirectory sdir : snapshottables) { 435 for (Snapshot s : sdir.getDirectorySnapshottableFeature() 436 .getSnapshotList()) { 437 Root sroot = s.getRoot(); 438 SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot 439 .newBuilder().setSnapshotId(s.getId()); 440 INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, 441 parent.getSaverContext()); 442 INodeSection.INode r = INodeSection.INode.newBuilder() 443 .setId(sroot.getId()) 444 .setType(INodeSection.INode.Type.DIRECTORY) 445 .setName(ByteString.copyFrom(sroot.getLocalNameBytes())) 446 .setDirectory(db).build(); 447 sb.setRoot(r).build().writeDelimitedTo(out); 448 i++; 449 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 450 context.checkCancelled(); 451 } 452 } 453 } 454 Preconditions.checkState(i == sm.getNumSnapshots()); 455 parent.commitSection(headers, FSImageFormatProtobuf.SectionName.SNAPSHOT); 456 } 457 458 /** 459 * This can only be called after serializing both INode_Dir and SnapshotDiff 460 */ 461 public void serializeINodeReferenceSection(OutputStream out) 462 throws IOException { 463 final List<INodeReference> refList = parent.getSaverContext() 464 .getRefList(); 465 for (INodeReference ref : refList) { 466 INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref); 467 rb.build().writeDelimitedTo(out); 468 } 469 parent.commitSection(headers, SectionName.INODE_REFERENCE); 470 } 471 472 private INodeReferenceSection.INodeReference.Builder buildINodeReference( 473 INodeReference ref) throws IOException { 474 INodeReferenceSection.INodeReference.Builder rb = 475 INodeReferenceSection.INodeReference.newBuilder(). 476 setReferredId(ref.getId()); 477 if (ref instanceof WithName) { 478 rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName( 479 ByteString.copyFrom(ref.getLocalNameBytes())); 480 } else if (ref instanceof DstReference) { 481 rb.setDstSnapshotId(ref.getDstSnapshotId()); 482 } 483 return rb; 484 } 485 486 /** 487 * save all the snapshot diff to fsimage 488 */ 489 public void serializeSnapshotDiffSection(OutputStream out) 490 throws IOException { 491 INodeMap inodesMap = fsn.getFSDirectory().getINodeMap(); 492 final List<INodeReference> refList = parent.getSaverContext() 493 .getRefList(); 494 int i = 0; 495 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); 496 while (iter.hasNext()) { 497 INodeWithAdditionalFields inode = iter.next(); 498 if (inode.isFile()) { 499 serializeFileDiffList(inode.asFile(), out); 500 } else if (inode.isDirectory()) { 501 serializeDirDiffList(inode.asDirectory(), refList, out); 502 } 503 ++i; 504 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 505 context.checkCancelled(); 506 } 507 } 508 parent.commitSection(headers, 509 FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF); 510 } 511 512 private void serializeFileDiffList(INodeFile file, OutputStream out) 513 throws IOException { 514 FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature(); 515 if (sf != null) { 516 List<FileDiff> diffList = sf.getDiffs().asList(); 517 SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry 518 .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF) 519 .setNumOfDiff(diffList.size()).build(); 520 entry.writeDelimitedTo(out); 521 for (int i = diffList.size() - 1; i >= 0; i--) { 522 FileDiff diff = diffList.get(i); 523 SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff 524 .newBuilder().setSnapshotId(diff.getSnapshotId()) 525 .setFileSize(diff.getFileSize()); 526 if(diff.getBlocks() != null) { 527 for(Block block : diff.getBlocks()) { 528 fb.addBlocks(PBHelperClient.convert(block)); 529 } 530 } 531 INodeFileAttributes copy = diff.snapshotINode; 532 if (copy != null) { 533 fb.setName(ByteString.copyFrom(copy.getLocalNameBytes())) 534 .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext())); 535 } 536 fb.build().writeDelimitedTo(out); 537 } 538 } 539 } 540 541 private void saveCreatedList(List<INode> created, OutputStream out) 542 throws IOException { 543 // local names of the created list member 544 for (INode c : created) { 545 SnapshotDiffSection.CreatedListEntry.newBuilder() 546 .setName(ByteString.copyFrom(c.getLocalNameBytes())).build() 547 .writeDelimitedTo(out); 548 } 549 } 550 551 private void serializeDirDiffList(INodeDirectory dir, 552 final List<INodeReference> refList, OutputStream out) 553 throws IOException { 554 DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); 555 if (sf != null) { 556 List<DirectoryDiff> diffList = sf.getDiffs().asList(); 557 SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry 558 .newBuilder().setInodeId(dir.getId()).setType(Type.DIRECTORYDIFF) 559 .setNumOfDiff(diffList.size()).build(); 560 entry.writeDelimitedTo(out); 561 for (int i = diffList.size() - 1; i >= 0; i--) { // reverse order! 562 DirectoryDiff diff = diffList.get(i); 563 SnapshotDiffSection.DirectoryDiff.Builder db = SnapshotDiffSection. 564 DirectoryDiff.newBuilder().setSnapshotId(diff.getSnapshotId()) 565 .setChildrenSize(diff.getChildrenSize()) 566 .setIsSnapshotRoot(diff.isSnapshotRoot()); 567 INodeDirectoryAttributes copy = diff.snapshotINode; 568 if (!diff.isSnapshotRoot() && copy != null) { 569 db.setName(ByteString.copyFrom(copy.getLocalNameBytes())) 570 .setSnapshotCopy( 571 buildINodeDirectory(copy, parent.getSaverContext())); 572 } 573 // process created list and deleted list 574 List<INode> created = diff.getChildrenDiff() 575 .getList(ListType.CREATED); 576 db.setCreatedListSize(created.size()); 577 List<INode> deleted = diff.getChildrenDiff().getList(ListType.DELETED); 578 for (INode d : deleted) { 579 if (d.isReference()) { 580 refList.add(d.asReference()); 581 db.addDeletedINodeRef(refList.size() - 1); 582 } else { 583 db.addDeletedINode(d.getId()); 584 } 585 } 586 db.build().writeDelimitedTo(out); 587 saveCreatedList(created, out); 588 } 589 } 590 } 591 } 592 593 private FSImageFormatPBSnapshot(){} 594}