001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019 package org.apache.hadoop.hdfs.server.namenode;
020
021 import java.io.IOException;
022 import java.io.InputStream;
023 import java.io.OutputStream;
024 import java.util.ArrayList;
025 import java.util.Iterator;
026 import java.util.List;
027 import java.util.Map;
028
029 import org.apache.commons.logging.Log;
030 import org.apache.commons.logging.LogFactory;
031 import org.apache.hadoop.HadoopIllegalArgumentException;
032 import org.apache.hadoop.classification.InterfaceAudience;
033 import org.apache.hadoop.fs.permission.AclEntry;
034 import org.apache.hadoop.fs.permission.AclEntryScope;
035 import org.apache.hadoop.fs.permission.AclEntryType;
036 import org.apache.hadoop.fs.permission.FsAction;
037 import org.apache.hadoop.fs.permission.FsPermission;
038 import org.apache.hadoop.fs.permission.PermissionStatus;
039 import org.apache.hadoop.fs.XAttr;
040 import org.apache.hadoop.hdfs.protocol.Block;
041 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
042 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
043 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
044 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
045 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
046 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
047 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
048 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
049 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
050 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
051 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
052 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
053 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
054 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
055 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
056 import org.apache.hadoop.hdfs.util.ReadOnlyList;
057
058 import com.google.common.base.Preconditions;
059 import com.google.common.collect.ImmutableList;
060 import com.google.protobuf.ByteString;
061
062 @InterfaceAudience.Private
063 public final class FSImageFormatPBINode {
064 private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1;
065 private final static int USER_STRID_OFFSET = 40;
066 private final static int GROUP_STRID_OFFSET = 16;
067 private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
068
069 private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1;
070 private static final int ACL_ENTRY_NAME_OFFSET = 6;
071 private static final int ACL_ENTRY_TYPE_OFFSET = 3;
072 private static final int ACL_ENTRY_SCOPE_OFFSET = 5;
073 private static final int ACL_ENTRY_PERM_MASK = 7;
074 private static final int ACL_ENTRY_TYPE_MASK = 3;
075 private static final int ACL_ENTRY_SCOPE_MASK = 1;
076 private static final FsAction[] FSACTION_VALUES = FsAction.values();
077 private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
078 .values();
079 private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
080 .values();
081
082 private static final int XATTR_NAMESPACE_MASK = 3;
083 private static final int XATTR_NAMESPACE_OFFSET = 30;
084 private static final int XATTR_NAME_MASK = (1 << 24) - 1;
085 private static final int XATTR_NAME_OFFSET = 6;
086
087 /* See the comments in fsimage.proto for an explanation of the following. */
088 private static final int XATTR_NAMESPACE_EXT_OFFSET = 5;
089 private static final int XATTR_NAMESPACE_EXT_MASK = 1;
090
091 private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
092 XAttr.NameSpace.values();
093
094
095 public final static class Loader {
096 public static PermissionStatus loadPermission(long id,
097 final String[] stringTable) {
098 short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1));
099 int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK);
100 int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK);
101 return new PermissionStatus(stringTable[usid], stringTable[gsid],
102 new FsPermission(perm));
103 }
104
105 public static ImmutableList<AclEntry> loadAclEntries(
106 AclFeatureProto proto, final String[] stringTable) {
107 ImmutableList.Builder<AclEntry> b = ImmutableList.builder();
108 for (int v : proto.getEntriesList()) {
109 int p = v & ACL_ENTRY_PERM_MASK;
110 int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK;
111 int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK;
112 int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK;
113 String name = stringTable[nid];
114 b.add(new AclEntry.Builder().setName(name)
115 .setPermission(FSACTION_VALUES[p])
116 .setScope(ACL_ENTRY_SCOPE_VALUES[s])
117 .setType(ACL_ENTRY_TYPE_VALUES[t]).build());
118 }
119 return b.build();
120 }
121
122 public static ImmutableList<XAttr> loadXAttrs(
123 XAttrFeatureProto proto, final String[] stringTable) {
124 ImmutableList.Builder<XAttr> b = ImmutableList.builder();
125 for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
126 int v = xAttrCompactProto.getName();
127 int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
128 int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
129 ns |=
130 ((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2;
131 String name = stringTable[nid];
132 byte[] value = null;
133 if (xAttrCompactProto.getValue() != null) {
134 value = xAttrCompactProto.getValue().toByteArray();
135 }
136 b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
137 .setName(name).setValue(value).build());
138 }
139
140 return b.build();
141 }
142
143 public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
144 LoaderContext state) {
145 assert n.getType() == INodeSection.INode.Type.DIRECTORY;
146 INodeSection.INodeDirectory d = n.getDirectory();
147
148 final PermissionStatus permissions = loadPermission(d.getPermission(),
149 state.getStringTable());
150 final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
151 .toByteArray(), permissions, d.getModificationTime());
152
153 final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
154 if (nsQuota >= 0 || dsQuota >= 0) {
155 dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
156 }
157
158 if (d.hasAcl()) {
159 dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
160 state.getStringTable())));
161 }
162 if (d.hasXAttrs()) {
163 dir.addXAttrFeature(new XAttrFeature(
164 loadXAttrs(d.getXAttrs(), state.getStringTable())));
165 }
166 return dir;
167 }
168
169 public static void updateBlocksMap(INodeFile file, BlockManager bm) {
170 // Add file->block mapping
171 final BlockInfo[] blocks = file.getBlocks();
172 if (blocks != null) {
173 for (int i = 0; i < blocks.length; i++) {
174 file.setBlock(i, bm.addBlockCollection(blocks[i], file));
175 }
176 }
177 }
178
179 private final FSDirectory dir;
180 private final FSNamesystem fsn;
181 private final FSImageFormatProtobuf.Loader parent;
182
183 Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) {
184 this.fsn = fsn;
185 this.dir = fsn.dir;
186 this.parent = parent;
187 }
188
189 void loadINodeDirectorySection(InputStream in) throws IOException {
190 final List<INodeReference> refList = parent.getLoaderContext()
191 .getRefList();
192 while (true) {
193 INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
194 .parseDelimitedFrom(in);
195 // note that in is a LimitedInputStream
196 if (e == null) {
197 break;
198 }
199 INodeDirectory p = dir.getInode(e.getParent()).asDirectory();
200 for (long id : e.getChildrenList()) {
201 INode child = dir.getInode(id);
202 addToParent(p, child);
203 }
204 for (int refId : e.getRefChildrenList()) {
205 INodeReference ref = refList.get(refId);
206 addToParent(p, ref);
207 }
208 }
209 }
210
211 void loadINodeSection(InputStream in) throws IOException {
212 INodeSection s = INodeSection.parseDelimitedFrom(in);
213 fsn.resetLastInodeId(s.getLastInodeId());
214 LOG.info("Loading " + s.getNumInodes() + " INodes.");
215 for (int i = 0; i < s.getNumInodes(); ++i) {
216 INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
217 if (p.getId() == INodeId.ROOT_INODE_ID) {
218 loadRootINode(p);
219 } else {
220 INode n = loadINode(p);
221 dir.addToInodeMap(n);
222 }
223 }
224 }
225
226 /**
227 * Load the under-construction files section, and update the lease map
228 */
229 void loadFilesUnderConstructionSection(InputStream in) throws IOException {
230 while (true) {
231 FileUnderConstructionEntry entry = FileUnderConstructionEntry
232 .parseDelimitedFrom(in);
233 if (entry == null) {
234 break;
235 }
236 // update the lease manager
237 INodeFile file = dir.getInode(entry.getInodeId()).asFile();
238 FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
239 Preconditions.checkState(uc != null); // file must be under-construction
240 fsn.leaseManager.addLease(uc.getClientName(), entry.getFullPath());
241 }
242 }
243
244 private void addToParent(INodeDirectory parent, INode child) {
245 if (parent == dir.rootDir && FSDirectory.isReservedName(child)) {
246 throw new HadoopIllegalArgumentException("File name \""
247 + child.getLocalName() + "\" is reserved. Please "
248 + " change the name of the existing file or directory to another "
249 + "name before upgrading to this release.");
250 }
251 // NOTE: This does not update space counts for parents
252 if (!parent.addChild(child)) {
253 return;
254 }
255 dir.cacheName(child);
256
257 if (child.isFile()) {
258 updateBlocksMap(child.asFile(), fsn.getBlockManager());
259 }
260 }
261
262 private INode loadINode(INodeSection.INode n) {
263 switch (n.getType()) {
264 case FILE:
265 return loadINodeFile(n);
266 case DIRECTORY:
267 return loadINodeDirectory(n, parent.getLoaderContext());
268 case SYMLINK:
269 return loadINodeSymlink(n);
270 default:
271 break;
272 }
273 return null;
274 }
275
276 private INodeFile loadINodeFile(INodeSection.INode n) {
277 assert n.getType() == INodeSection.INode.Type.FILE;
278 INodeSection.INodeFile f = n.getFile();
279 List<BlockProto> bp = f.getBlocksList();
280 short replication = (short) f.getReplication();
281 LoaderContext state = parent.getLoaderContext();
282
283 BlockInfo[] blocks = new BlockInfo[bp.size()];
284 for (int i = 0, e = bp.size(); i < e; ++i) {
285 blocks[i] = new BlockInfo(PBHelper.convert(bp.get(i)), replication);
286 }
287 final PermissionStatus permissions = loadPermission(f.getPermission(),
288 parent.getLoaderContext().getStringTable());
289
290 final INodeFile file = new INodeFile(n.getId(),
291 n.getName().toByteArray(), permissions, f.getModificationTime(),
292 f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
293 (byte)f.getStoragePolicyID());
294
295 if (f.hasAcl()) {
296 file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
297 state.getStringTable())));
298 }
299
300 if (f.hasXAttrs()) {
301 file.addXAttrFeature(new XAttrFeature(
302 loadXAttrs(f.getXAttrs(), state.getStringTable())));
303 }
304
305 // under-construction information
306 if (f.hasFileUC()) {
307 INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
308 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
309 if (blocks.length > 0) {
310 BlockInfo lastBlk = file.getLastBlock();
311 // replace the last block of file
312 file.setBlock(file.numBlocks() - 1, new BlockInfoUnderConstruction(
313 lastBlk, replication));
314 }
315 }
316 return file;
317 }
318
319
320 private INodeSymlink loadINodeSymlink(INodeSection.INode n) {
321 assert n.getType() == INodeSection.INode.Type.SYMLINK;
322 INodeSection.INodeSymlink s = n.getSymlink();
323 final PermissionStatus permissions = loadPermission(s.getPermission(),
324 parent.getLoaderContext().getStringTable());
325 INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
326 permissions, s.getModificationTime(), s.getAccessTime(),
327 s.getTarget().toStringUtf8());
328 return sym;
329 }
330
331 private void loadRootINode(INodeSection.INode p) {
332 INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
333 final Quota.Counts q = root.getQuotaCounts();
334 final long nsQuota = q.get(Quota.NAMESPACE);
335 final long dsQuota = q.get(Quota.DISKSPACE);
336 if (nsQuota != -1 || dsQuota != -1) {
337 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
338 }
339 dir.rootDir.cloneModificationTime(root);
340 dir.rootDir.clonePermissionStatus(root);
341 // root dir supports having extended attributes according to POSIX
342 final XAttrFeature f = root.getXAttrFeature();
343 if (f != null) {
344 dir.rootDir.addXAttrFeature(f);
345 }
346 }
347 }
348
349 public final static class Saver {
350 private static long buildPermissionStatus(INodeAttributes n,
351 final SaverContext.DeduplicationMap<String> stringMap) {
352 long userId = stringMap.getId(n.getUserName());
353 long groupId = stringMap.getId(n.getGroupName());
354 return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET)
355 | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET)
356 | n.getFsPermissionShort();
357 }
358
359 private static AclFeatureProto.Builder buildAclEntries(AclFeature f,
360 final SaverContext.DeduplicationMap<String> map) {
361 AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
362 for (AclEntry e : f.getEntries()) {
363 int v = ((map.getId(e.getName()) & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET)
364 | (e.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET)
365 | (e.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET)
366 | (e.getPermission().ordinal());
367 b.addEntries(v);
368 }
369 return b;
370 }
371
372 private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
373 final SaverContext.DeduplicationMap<String> stringMap) {
374 XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
375 for (XAttr a : f.getXAttrs()) {
376 XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
377 newBuilder();
378 int nsOrd = a.getNameSpace().ordinal();
379 Preconditions.checkArgument(nsOrd < 8, "Too many namespaces.");
380 int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET)
381 | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
382 XATTR_NAME_OFFSET);
383 v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) <<
384 XATTR_NAMESPACE_EXT_OFFSET);
385 xAttrCompactBuilder.setName(v);
386 if (a.getValue() != null) {
387 xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
388 }
389 b.addXAttrs(xAttrCompactBuilder.build());
390 }
391
392 return b;
393 }
394
395 public static INodeSection.INodeFile.Builder buildINodeFile(
396 INodeFileAttributes file, final SaverContext state) {
397 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
398 .setAccessTime(file.getAccessTime())
399 .setModificationTime(file.getModificationTime())
400 .setPermission(buildPermissionStatus(file, state.getStringMap()))
401 .setPreferredBlockSize(file.getPreferredBlockSize())
402 .setReplication(file.getFileReplication())
403 .setStoragePolicyID(file.getLocalStoragePolicyID());
404
405 AclFeature f = file.getAclFeature();
406 if (f != null) {
407 b.setAcl(buildAclEntries(f, state.getStringMap()));
408 }
409 XAttrFeature xAttrFeature = file.getXAttrFeature();
410 if (xAttrFeature != null) {
411 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
412 }
413 return b;
414 }
415
416 public static INodeSection.INodeDirectory.Builder buildINodeDirectory(
417 INodeDirectoryAttributes dir, final SaverContext state) {
418 Quota.Counts quota = dir.getQuotaCounts();
419 INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
420 .newBuilder().setModificationTime(dir.getModificationTime())
421 .setNsQuota(quota.get(Quota.NAMESPACE))
422 .setDsQuota(quota.get(Quota.DISKSPACE))
423 .setPermission(buildPermissionStatus(dir, state.getStringMap()));
424
425 AclFeature f = dir.getAclFeature();
426 if (f != null) {
427 b.setAcl(buildAclEntries(f, state.getStringMap()));
428 }
429 XAttrFeature xAttrFeature = dir.getXAttrFeature();
430 if (xAttrFeature != null) {
431 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
432 }
433 return b;
434 }
435
436 private final FSNamesystem fsn;
437 private final FileSummary.Builder summary;
438 private final SaveNamespaceContext context;
439 private final FSImageFormatProtobuf.Saver parent;
440
441 Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
442 this.parent = parent;
443 this.summary = summary;
444 this.context = parent.getContext();
445 this.fsn = context.getSourceNamesystem();
446 }
447
448 void serializeINodeDirectorySection(OutputStream out) throws IOException {
449 Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
450 .getINodeMap().getMapIterator();
451 final ArrayList<INodeReference> refList = parent.getSaverContext()
452 .getRefList();
453 int i = 0;
454 while (iter.hasNext()) {
455 INodeWithAdditionalFields n = iter.next();
456 if (!n.isDirectory()) {
457 continue;
458 }
459
460 ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
461 Snapshot.CURRENT_STATE_ID);
462 if (children.size() > 0) {
463 INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
464 DirEntry.newBuilder().setParent(n.getId());
465 for (INode inode : children) {
466 if (!inode.isReference()) {
467 b.addChildren(inode.getId());
468 } else {
469 refList.add(inode.asReference());
470 b.addRefChildren(refList.size() - 1);
471 }
472 }
473 INodeDirectorySection.DirEntry e = b.build();
474 e.writeDelimitedTo(out);
475 }
476
477 ++i;
478 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
479 context.checkCancelled();
480 }
481 }
482 parent.commitSection(summary,
483 FSImageFormatProtobuf.SectionName.INODE_DIR);
484 }
485
486 void serializeINodeSection(OutputStream out) throws IOException {
487 INodeMap inodesMap = fsn.dir.getINodeMap();
488
489 INodeSection.Builder b = INodeSection.newBuilder()
490 .setLastInodeId(fsn.getLastInodeId()).setNumInodes(inodesMap.size());
491 INodeSection s = b.build();
492 s.writeDelimitedTo(out);
493
494 int i = 0;
495 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
496 while (iter.hasNext()) {
497 INodeWithAdditionalFields n = iter.next();
498 save(out, n);
499 ++i;
500 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
501 context.checkCancelled();
502 }
503 }
504 parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE);
505 }
506
507 void serializeFilesUCSection(OutputStream out) throws IOException {
508 Map<String, INodeFile> ucMap = fsn.getFilesUnderConstruction();
509 for (Map.Entry<String, INodeFile> entry : ucMap.entrySet()) {
510 String path = entry.getKey();
511 INodeFile file = entry.getValue();
512 FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry
513 .newBuilder().setInodeId(file.getId()).setFullPath(path);
514 FileUnderConstructionEntry e = b.build();
515 e.writeDelimitedTo(out);
516 }
517 parent.commitSection(summary,
518 FSImageFormatProtobuf.SectionName.FILES_UNDERCONSTRUCTION);
519 }
520
521 private void save(OutputStream out, INode n) throws IOException {
522 if (n.isDirectory()) {
523 save(out, n.asDirectory());
524 } else if (n.isFile()) {
525 save(out, n.asFile());
526 } else if (n.isSymlink()) {
527 save(out, n.asSymlink());
528 }
529 }
530
531 private void save(OutputStream out, INodeDirectory n) throws IOException {
532 INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n,
533 parent.getSaverContext());
534 INodeSection.INode r = buildINodeCommon(n)
535 .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build();
536 r.writeDelimitedTo(out);
537 }
538
539 private void save(OutputStream out, INodeFile n) throws IOException {
540 INodeSection.INodeFile.Builder b = buildINodeFile(n,
541 parent.getSaverContext());
542
543 if (n.getBlocks() != null) {
544 for (Block block : n.getBlocks()) {
545 b.addBlocks(PBHelper.convert(block));
546 }
547 }
548
549 FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
550 if (uc != null) {
551 INodeSection.FileUnderConstructionFeature f =
552 INodeSection.FileUnderConstructionFeature
553 .newBuilder().setClientName(uc.getClientName())
554 .setClientMachine(uc.getClientMachine()).build();
555 b.setFileUC(f);
556 }
557
558 INodeSection.INode r = buildINodeCommon(n)
559 .setType(INodeSection.INode.Type.FILE).setFile(b).build();
560 r.writeDelimitedTo(out);
561 }
562
563 private void save(OutputStream out, INodeSymlink n) throws IOException {
564 SaverContext state = parent.getSaverContext();
565 INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
566 .newBuilder()
567 .setPermission(buildPermissionStatus(n, state.getStringMap()))
568 .setTarget(ByteString.copyFrom(n.getSymlink()))
569 .setModificationTime(n.getModificationTime())
570 .setAccessTime(n.getAccessTime());
571
572 INodeSection.INode r = buildINodeCommon(n)
573 .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();
574 r.writeDelimitedTo(out);
575 }
576
577 private final INodeSection.INode.Builder buildINodeCommon(INode n) {
578 return INodeSection.INode.newBuilder()
579 .setId(n.getId())
580 .setName(ByteString.copyFrom(n.getLocalNameBytes()));
581 }
582 }
583
584 private FSImageFormatPBINode() {
585 }
586 }