001 // Generated by the protocol buffer compiler. DO NOT EDIT!
002 // source: QJournalProtocol.proto
003
004 package org.apache.hadoop.hdfs.qjournal.protocol;
005
006 public final class QJournalProtocolProtos {
007 private QJournalProtocolProtos() {}
008 public static void registerAllExtensions(
009 com.google.protobuf.ExtensionRegistry registry) {
010 }
011 public interface JournalIdProtoOrBuilder
012 extends com.google.protobuf.MessageOrBuilder {
013
014 // required string identifier = 1;
015 /**
016 * <code>required string identifier = 1;</code>
017 */
018 boolean hasIdentifier();
019 /**
020 * <code>required string identifier = 1;</code>
021 */
022 java.lang.String getIdentifier();
023 /**
024 * <code>required string identifier = 1;</code>
025 */
026 com.google.protobuf.ByteString
027 getIdentifierBytes();
028 }
029 /**
030 * Protobuf type {@code hadoop.hdfs.JournalIdProto}
031 */
032 public static final class JournalIdProto extends
033 com.google.protobuf.GeneratedMessage
034 implements JournalIdProtoOrBuilder {
035 // Use JournalIdProto.newBuilder() to construct.
036 private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
037 super(builder);
038 this.unknownFields = builder.getUnknownFields();
039 }
040 private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
041
042 private static final JournalIdProto defaultInstance;
043 public static JournalIdProto getDefaultInstance() {
044 return defaultInstance;
045 }
046
047 public JournalIdProto getDefaultInstanceForType() {
048 return defaultInstance;
049 }
050
051 private final com.google.protobuf.UnknownFieldSet unknownFields;
052 @java.lang.Override
053 public final com.google.protobuf.UnknownFieldSet
054 getUnknownFields() {
055 return this.unknownFields;
056 }
057 private JournalIdProto(
058 com.google.protobuf.CodedInputStream input,
059 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
060 throws com.google.protobuf.InvalidProtocolBufferException {
061 initFields();
062 int mutable_bitField0_ = 0;
063 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
064 com.google.protobuf.UnknownFieldSet.newBuilder();
065 try {
066 boolean done = false;
067 while (!done) {
068 int tag = input.readTag();
069 switch (tag) {
070 case 0:
071 done = true;
072 break;
073 default: {
074 if (!parseUnknownField(input, unknownFields,
075 extensionRegistry, tag)) {
076 done = true;
077 }
078 break;
079 }
080 case 10: {
081 bitField0_ |= 0x00000001;
082 identifier_ = input.readBytes();
083 break;
084 }
085 }
086 }
087 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
088 throw e.setUnfinishedMessage(this);
089 } catch (java.io.IOException e) {
090 throw new com.google.protobuf.InvalidProtocolBufferException(
091 e.getMessage()).setUnfinishedMessage(this);
092 } finally {
093 this.unknownFields = unknownFields.build();
094 makeExtensionsImmutable();
095 }
096 }
097 public static final com.google.protobuf.Descriptors.Descriptor
098 getDescriptor() {
099 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
100 }
101
102 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
103 internalGetFieldAccessorTable() {
104 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
105 .ensureFieldAccessorsInitialized(
106 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
107 }
108
109 public static com.google.protobuf.Parser<JournalIdProto> PARSER =
110 new com.google.protobuf.AbstractParser<JournalIdProto>() {
111 public JournalIdProto parsePartialFrom(
112 com.google.protobuf.CodedInputStream input,
113 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
114 throws com.google.protobuf.InvalidProtocolBufferException {
115 return new JournalIdProto(input, extensionRegistry);
116 }
117 };
118
119 @java.lang.Override
120 public com.google.protobuf.Parser<JournalIdProto> getParserForType() {
121 return PARSER;
122 }
123
124 private int bitField0_;
125 // required string identifier = 1;
126 public static final int IDENTIFIER_FIELD_NUMBER = 1;
127 private java.lang.Object identifier_;
128 /**
129 * <code>required string identifier = 1;</code>
130 */
131 public boolean hasIdentifier() {
132 return ((bitField0_ & 0x00000001) == 0x00000001);
133 }
134 /**
135 * <code>required string identifier = 1;</code>
136 */
137 public java.lang.String getIdentifier() {
138 java.lang.Object ref = identifier_;
139 if (ref instanceof java.lang.String) {
140 return (java.lang.String) ref;
141 } else {
142 com.google.protobuf.ByteString bs =
143 (com.google.protobuf.ByteString) ref;
144 java.lang.String s = bs.toStringUtf8();
145 if (bs.isValidUtf8()) {
146 identifier_ = s;
147 }
148 return s;
149 }
150 }
151 /**
152 * <code>required string identifier = 1;</code>
153 */
154 public com.google.protobuf.ByteString
155 getIdentifierBytes() {
156 java.lang.Object ref = identifier_;
157 if (ref instanceof java.lang.String) {
158 com.google.protobuf.ByteString b =
159 com.google.protobuf.ByteString.copyFromUtf8(
160 (java.lang.String) ref);
161 identifier_ = b;
162 return b;
163 } else {
164 return (com.google.protobuf.ByteString) ref;
165 }
166 }
167
168 private void initFields() {
169 identifier_ = "";
170 }
171 private byte memoizedIsInitialized = -1;
172 public final boolean isInitialized() {
173 byte isInitialized = memoizedIsInitialized;
174 if (isInitialized != -1) return isInitialized == 1;
175
176 if (!hasIdentifier()) {
177 memoizedIsInitialized = 0;
178 return false;
179 }
180 memoizedIsInitialized = 1;
181 return true;
182 }
183
184 public void writeTo(com.google.protobuf.CodedOutputStream output)
185 throws java.io.IOException {
186 getSerializedSize();
187 if (((bitField0_ & 0x00000001) == 0x00000001)) {
188 output.writeBytes(1, getIdentifierBytes());
189 }
190 getUnknownFields().writeTo(output);
191 }
192
193 private int memoizedSerializedSize = -1;
194 public int getSerializedSize() {
195 int size = memoizedSerializedSize;
196 if (size != -1) return size;
197
198 size = 0;
199 if (((bitField0_ & 0x00000001) == 0x00000001)) {
200 size += com.google.protobuf.CodedOutputStream
201 .computeBytesSize(1, getIdentifierBytes());
202 }
203 size += getUnknownFields().getSerializedSize();
204 memoizedSerializedSize = size;
205 return size;
206 }
207
208 private static final long serialVersionUID = 0L;
209 @java.lang.Override
210 protected java.lang.Object writeReplace()
211 throws java.io.ObjectStreamException {
212 return super.writeReplace();
213 }
214
215 @java.lang.Override
216 public boolean equals(final java.lang.Object obj) {
217 if (obj == this) {
218 return true;
219 }
220 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
221 return super.equals(obj);
222 }
223 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
224
225 boolean result = true;
226 result = result && (hasIdentifier() == other.hasIdentifier());
227 if (hasIdentifier()) {
228 result = result && getIdentifier()
229 .equals(other.getIdentifier());
230 }
231 result = result &&
232 getUnknownFields().equals(other.getUnknownFields());
233 return result;
234 }
235
236 private int memoizedHashCode = 0;
237 @java.lang.Override
238 public int hashCode() {
239 if (memoizedHashCode != 0) {
240 return memoizedHashCode;
241 }
242 int hash = 41;
243 hash = (19 * hash) + getDescriptorForType().hashCode();
244 if (hasIdentifier()) {
245 hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
246 hash = (53 * hash) + getIdentifier().hashCode();
247 }
248 hash = (29 * hash) + getUnknownFields().hashCode();
249 memoizedHashCode = hash;
250 return hash;
251 }
252
253 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
254 com.google.protobuf.ByteString data)
255 throws com.google.protobuf.InvalidProtocolBufferException {
256 return PARSER.parseFrom(data);
257 }
258 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
259 com.google.protobuf.ByteString data,
260 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
261 throws com.google.protobuf.InvalidProtocolBufferException {
262 return PARSER.parseFrom(data, extensionRegistry);
263 }
264 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
265 throws com.google.protobuf.InvalidProtocolBufferException {
266 return PARSER.parseFrom(data);
267 }
268 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
269 byte[] data,
270 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
271 throws com.google.protobuf.InvalidProtocolBufferException {
272 return PARSER.parseFrom(data, extensionRegistry);
273 }
274 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
275 throws java.io.IOException {
276 return PARSER.parseFrom(input);
277 }
278 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
279 java.io.InputStream input,
280 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281 throws java.io.IOException {
282 return PARSER.parseFrom(input, extensionRegistry);
283 }
284 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
285 throws java.io.IOException {
286 return PARSER.parseDelimitedFrom(input);
287 }
288 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
289 java.io.InputStream input,
290 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291 throws java.io.IOException {
292 return PARSER.parseDelimitedFrom(input, extensionRegistry);
293 }
294 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
295 com.google.protobuf.CodedInputStream input)
296 throws java.io.IOException {
297 return PARSER.parseFrom(input);
298 }
299 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
300 com.google.protobuf.CodedInputStream input,
301 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302 throws java.io.IOException {
303 return PARSER.parseFrom(input, extensionRegistry);
304 }
305
306 public static Builder newBuilder() { return Builder.create(); }
307 public Builder newBuilderForType() { return newBuilder(); }
308 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
309 return newBuilder().mergeFrom(prototype);
310 }
311 public Builder toBuilder() { return newBuilder(this); }
312
313 @java.lang.Override
314 protected Builder newBuilderForType(
315 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
316 Builder builder = new Builder(parent);
317 return builder;
318 }
319 /**
320 * Protobuf type {@code hadoop.hdfs.JournalIdProto}
321 */
322 public static final class Builder extends
323 com.google.protobuf.GeneratedMessage.Builder<Builder>
324 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
325 public static final com.google.protobuf.Descriptors.Descriptor
326 getDescriptor() {
327 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
328 }
329
330 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
331 internalGetFieldAccessorTable() {
332 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
333 .ensureFieldAccessorsInitialized(
334 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
335 }
336
337 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
338 private Builder() {
339 maybeForceBuilderInitialization();
340 }
341
342 private Builder(
343 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
344 super(parent);
345 maybeForceBuilderInitialization();
346 }
347 private void maybeForceBuilderInitialization() {
348 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
349 }
350 }
351 private static Builder create() {
352 return new Builder();
353 }
354
355 public Builder clear() {
356 super.clear();
357 identifier_ = "";
358 bitField0_ = (bitField0_ & ~0x00000001);
359 return this;
360 }
361
362 public Builder clone() {
363 return create().mergeFrom(buildPartial());
364 }
365
366 public com.google.protobuf.Descriptors.Descriptor
367 getDescriptorForType() {
368 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
369 }
370
371 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
372 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
373 }
374
375 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
376 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
377 if (!result.isInitialized()) {
378 throw newUninitializedMessageException(result);
379 }
380 return result;
381 }
382
383 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
384 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
385 int from_bitField0_ = bitField0_;
386 int to_bitField0_ = 0;
387 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
388 to_bitField0_ |= 0x00000001;
389 }
390 result.identifier_ = identifier_;
391 result.bitField0_ = to_bitField0_;
392 onBuilt();
393 return result;
394 }
395
396 public Builder mergeFrom(com.google.protobuf.Message other) {
397 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
398 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
399 } else {
400 super.mergeFrom(other);
401 return this;
402 }
403 }
404
405 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
406 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
407 if (other.hasIdentifier()) {
408 bitField0_ |= 0x00000001;
409 identifier_ = other.identifier_;
410 onChanged();
411 }
412 this.mergeUnknownFields(other.getUnknownFields());
413 return this;
414 }
415
416 public final boolean isInitialized() {
417 if (!hasIdentifier()) {
418
419 return false;
420 }
421 return true;
422 }
423
424 public Builder mergeFrom(
425 com.google.protobuf.CodedInputStream input,
426 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
427 throws java.io.IOException {
428 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
429 try {
430 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
431 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
432 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
433 throw e;
434 } finally {
435 if (parsedMessage != null) {
436 mergeFrom(parsedMessage);
437 }
438 }
439 return this;
440 }
441 private int bitField0_;
442
443 // required string identifier = 1;
444 private java.lang.Object identifier_ = "";
445 /**
446 * <code>required string identifier = 1;</code>
447 */
448 public boolean hasIdentifier() {
449 return ((bitField0_ & 0x00000001) == 0x00000001);
450 }
451 /**
452 * <code>required string identifier = 1;</code>
453 */
454 public java.lang.String getIdentifier() {
455 java.lang.Object ref = identifier_;
456 if (!(ref instanceof java.lang.String)) {
457 java.lang.String s = ((com.google.protobuf.ByteString) ref)
458 .toStringUtf8();
459 identifier_ = s;
460 return s;
461 } else {
462 return (java.lang.String) ref;
463 }
464 }
465 /**
466 * <code>required string identifier = 1;</code>
467 */
468 public com.google.protobuf.ByteString
469 getIdentifierBytes() {
470 java.lang.Object ref = identifier_;
471 if (ref instanceof String) {
472 com.google.protobuf.ByteString b =
473 com.google.protobuf.ByteString.copyFromUtf8(
474 (java.lang.String) ref);
475 identifier_ = b;
476 return b;
477 } else {
478 return (com.google.protobuf.ByteString) ref;
479 }
480 }
481 /**
482 * <code>required string identifier = 1;</code>
483 */
484 public Builder setIdentifier(
485 java.lang.String value) {
486 if (value == null) {
487 throw new NullPointerException();
488 }
489 bitField0_ |= 0x00000001;
490 identifier_ = value;
491 onChanged();
492 return this;
493 }
494 /**
495 * <code>required string identifier = 1;</code>
496 */
497 public Builder clearIdentifier() {
498 bitField0_ = (bitField0_ & ~0x00000001);
499 identifier_ = getDefaultInstance().getIdentifier();
500 onChanged();
501 return this;
502 }
503 /**
504 * <code>required string identifier = 1;</code>
505 */
506 public Builder setIdentifierBytes(
507 com.google.protobuf.ByteString value) {
508 if (value == null) {
509 throw new NullPointerException();
510 }
511 bitField0_ |= 0x00000001;
512 identifier_ = value;
513 onChanged();
514 return this;
515 }
516
517 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalIdProto)
518 }
519
520 static {
521 defaultInstance = new JournalIdProto(true);
522 defaultInstance.initFields();
523 }
524
525 // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalIdProto)
526 }
527
528 public interface RequestInfoProtoOrBuilder
529 extends com.google.protobuf.MessageOrBuilder {
530
531 // required .hadoop.hdfs.JournalIdProto journalId = 1;
532 /**
533 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
534 */
535 boolean hasJournalId();
536 /**
537 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
538 */
539 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
540 /**
541 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
542 */
543 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
544
545 // required uint64 epoch = 2;
546 /**
547 * <code>required uint64 epoch = 2;</code>
548 */
549 boolean hasEpoch();
550 /**
551 * <code>required uint64 epoch = 2;</code>
552 */
553 long getEpoch();
554
555 // required uint64 ipcSerialNumber = 3;
556 /**
557 * <code>required uint64 ipcSerialNumber = 3;</code>
558 */
559 boolean hasIpcSerialNumber();
560 /**
561 * <code>required uint64 ipcSerialNumber = 3;</code>
562 */
563 long getIpcSerialNumber();
564
565 // optional uint64 committedTxId = 4;
566 /**
567 * <code>optional uint64 committedTxId = 4;</code>
568 *
569 * <pre>
570 * Whenever a writer makes a request, it informs
571 * the node of the latest committed txid. This may
572 * be higher than the transaction data included in the
573 * request itself, eg in the case that the node has
574 * fallen behind.
575 * </pre>
576 */
577 boolean hasCommittedTxId();
578 /**
579 * <code>optional uint64 committedTxId = 4;</code>
580 *
581 * <pre>
582 * Whenever a writer makes a request, it informs
583 * the node of the latest committed txid. This may
584 * be higher than the transaction data included in the
585 * request itself, eg in the case that the node has
586 * fallen behind.
587 * </pre>
588 */
589 long getCommittedTxId();
590 }
591 /**
592 * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
593 */
594 public static final class RequestInfoProto extends
595 com.google.protobuf.GeneratedMessage
596 implements RequestInfoProtoOrBuilder {
597 // Use RequestInfoProto.newBuilder() to construct.
598 private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
599 super(builder);
600 this.unknownFields = builder.getUnknownFields();
601 }
602 private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
603
604 private static final RequestInfoProto defaultInstance;
605 public static RequestInfoProto getDefaultInstance() {
606 return defaultInstance;
607 }
608
609 public RequestInfoProto getDefaultInstanceForType() {
610 return defaultInstance;
611 }
612
613 private final com.google.protobuf.UnknownFieldSet unknownFields;
614 @java.lang.Override
615 public final com.google.protobuf.UnknownFieldSet
616 getUnknownFields() {
617 return this.unknownFields;
618 }
619 private RequestInfoProto(
620 com.google.protobuf.CodedInputStream input,
621 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
622 throws com.google.protobuf.InvalidProtocolBufferException {
623 initFields();
624 int mutable_bitField0_ = 0;
625 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
626 com.google.protobuf.UnknownFieldSet.newBuilder();
627 try {
628 boolean done = false;
629 while (!done) {
630 int tag = input.readTag();
631 switch (tag) {
632 case 0:
633 done = true;
634 break;
635 default: {
636 if (!parseUnknownField(input, unknownFields,
637 extensionRegistry, tag)) {
638 done = true;
639 }
640 break;
641 }
642 case 10: {
643 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
644 if (((bitField0_ & 0x00000001) == 0x00000001)) {
645 subBuilder = journalId_.toBuilder();
646 }
647 journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
648 if (subBuilder != null) {
649 subBuilder.mergeFrom(journalId_);
650 journalId_ = subBuilder.buildPartial();
651 }
652 bitField0_ |= 0x00000001;
653 break;
654 }
655 case 16: {
656 bitField0_ |= 0x00000002;
657 epoch_ = input.readUInt64();
658 break;
659 }
660 case 24: {
661 bitField0_ |= 0x00000004;
662 ipcSerialNumber_ = input.readUInt64();
663 break;
664 }
665 case 32: {
666 bitField0_ |= 0x00000008;
667 committedTxId_ = input.readUInt64();
668 break;
669 }
670 }
671 }
672 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
673 throw e.setUnfinishedMessage(this);
674 } catch (java.io.IOException e) {
675 throw new com.google.protobuf.InvalidProtocolBufferException(
676 e.getMessage()).setUnfinishedMessage(this);
677 } finally {
678 this.unknownFields = unknownFields.build();
679 makeExtensionsImmutable();
680 }
681 }
682 public static final com.google.protobuf.Descriptors.Descriptor
683 getDescriptor() {
684 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
685 }
686
687 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
688 internalGetFieldAccessorTable() {
689 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
690 .ensureFieldAccessorsInitialized(
691 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
692 }
693
694 public static com.google.protobuf.Parser<RequestInfoProto> PARSER =
695 new com.google.protobuf.AbstractParser<RequestInfoProto>() {
696 public RequestInfoProto parsePartialFrom(
697 com.google.protobuf.CodedInputStream input,
698 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
699 throws com.google.protobuf.InvalidProtocolBufferException {
700 return new RequestInfoProto(input, extensionRegistry);
701 }
702 };
703
704 @java.lang.Override
705 public com.google.protobuf.Parser<RequestInfoProto> getParserForType() {
706 return PARSER;
707 }
708
709 private int bitField0_;
710 // required .hadoop.hdfs.JournalIdProto journalId = 1;
711 public static final int JOURNALID_FIELD_NUMBER = 1;
712 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
713 /**
714 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
715 */
716 public boolean hasJournalId() {
717 return ((bitField0_ & 0x00000001) == 0x00000001);
718 }
719 /**
720 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
721 */
722 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
723 return journalId_;
724 }
725 /**
726 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
727 */
728 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
729 return journalId_;
730 }
731
732 // required uint64 epoch = 2;
733 public static final int EPOCH_FIELD_NUMBER = 2;
734 private long epoch_;
735 /**
736 * <code>required uint64 epoch = 2;</code>
737 */
738 public boolean hasEpoch() {
739 return ((bitField0_ & 0x00000002) == 0x00000002);
740 }
741 /**
742 * <code>required uint64 epoch = 2;</code>
743 */
744 public long getEpoch() {
745 return epoch_;
746 }
747
748 // required uint64 ipcSerialNumber = 3;
749 public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
750 private long ipcSerialNumber_;
751 /**
752 * <code>required uint64 ipcSerialNumber = 3;</code>
753 */
754 public boolean hasIpcSerialNumber() {
755 return ((bitField0_ & 0x00000004) == 0x00000004);
756 }
757 /**
758 * <code>required uint64 ipcSerialNumber = 3;</code>
759 */
760 public long getIpcSerialNumber() {
761 return ipcSerialNumber_;
762 }
763
764 // optional uint64 committedTxId = 4;
765 public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
766 private long committedTxId_;
767 /**
768 * <code>optional uint64 committedTxId = 4;</code>
769 *
770 * <pre>
771 * Whenever a writer makes a request, it informs
772 * the node of the latest committed txid. This may
773 * be higher than the transaction data included in the
774 * request itself, eg in the case that the node has
775 * fallen behind.
776 * </pre>
777 */
778 public boolean hasCommittedTxId() {
779 return ((bitField0_ & 0x00000008) == 0x00000008);
780 }
781 /**
782 * <code>optional uint64 committedTxId = 4;</code>
783 *
784 * <pre>
785 * Whenever a writer makes a request, it informs
786 * the node of the latest committed txid. This may
787 * be higher than the transaction data included in the
788 * request itself, eg in the case that the node has
789 * fallen behind.
790 * </pre>
791 */
792 public long getCommittedTxId() {
793 return committedTxId_;
794 }
795
796 private void initFields() {
797 journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
798 epoch_ = 0L;
799 ipcSerialNumber_ = 0L;
800 committedTxId_ = 0L;
801 }
802 private byte memoizedIsInitialized = -1;
803 public final boolean isInitialized() {
804 byte isInitialized = memoizedIsInitialized;
805 if (isInitialized != -1) return isInitialized == 1;
806
807 if (!hasJournalId()) {
808 memoizedIsInitialized = 0;
809 return false;
810 }
811 if (!hasEpoch()) {
812 memoizedIsInitialized = 0;
813 return false;
814 }
815 if (!hasIpcSerialNumber()) {
816 memoizedIsInitialized = 0;
817 return false;
818 }
819 if (!getJournalId().isInitialized()) {
820 memoizedIsInitialized = 0;
821 return false;
822 }
823 memoizedIsInitialized = 1;
824 return true;
825 }
826
827 public void writeTo(com.google.protobuf.CodedOutputStream output)
828 throws java.io.IOException {
829 getSerializedSize();
830 if (((bitField0_ & 0x00000001) == 0x00000001)) {
831 output.writeMessage(1, journalId_);
832 }
833 if (((bitField0_ & 0x00000002) == 0x00000002)) {
834 output.writeUInt64(2, epoch_);
835 }
836 if (((bitField0_ & 0x00000004) == 0x00000004)) {
837 output.writeUInt64(3, ipcSerialNumber_);
838 }
839 if (((bitField0_ & 0x00000008) == 0x00000008)) {
840 output.writeUInt64(4, committedTxId_);
841 }
842 getUnknownFields().writeTo(output);
843 }
844
845 private int memoizedSerializedSize = -1;
846 public int getSerializedSize() {
847 int size = memoizedSerializedSize;
848 if (size != -1) return size;
849
850 size = 0;
851 if (((bitField0_ & 0x00000001) == 0x00000001)) {
852 size += com.google.protobuf.CodedOutputStream
853 .computeMessageSize(1, journalId_);
854 }
855 if (((bitField0_ & 0x00000002) == 0x00000002)) {
856 size += com.google.protobuf.CodedOutputStream
857 .computeUInt64Size(2, epoch_);
858 }
859 if (((bitField0_ & 0x00000004) == 0x00000004)) {
860 size += com.google.protobuf.CodedOutputStream
861 .computeUInt64Size(3, ipcSerialNumber_);
862 }
863 if (((bitField0_ & 0x00000008) == 0x00000008)) {
864 size += com.google.protobuf.CodedOutputStream
865 .computeUInt64Size(4, committedTxId_);
866 }
867 size += getUnknownFields().getSerializedSize();
868 memoizedSerializedSize = size;
869 return size;
870 }
871
872 private static final long serialVersionUID = 0L;
873 @java.lang.Override
874 protected java.lang.Object writeReplace()
875 throws java.io.ObjectStreamException {
876 return super.writeReplace();
877 }
878
879 @java.lang.Override
880 public boolean equals(final java.lang.Object obj) {
881 if (obj == this) {
882 return true;
883 }
884 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
885 return super.equals(obj);
886 }
887 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
888
889 boolean result = true;
890 result = result && (hasJournalId() == other.hasJournalId());
891 if (hasJournalId()) {
892 result = result && getJournalId()
893 .equals(other.getJournalId());
894 }
895 result = result && (hasEpoch() == other.hasEpoch());
896 if (hasEpoch()) {
897 result = result && (getEpoch()
898 == other.getEpoch());
899 }
900 result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
901 if (hasIpcSerialNumber()) {
902 result = result && (getIpcSerialNumber()
903 == other.getIpcSerialNumber());
904 }
905 result = result && (hasCommittedTxId() == other.hasCommittedTxId());
906 if (hasCommittedTxId()) {
907 result = result && (getCommittedTxId()
908 == other.getCommittedTxId());
909 }
910 result = result &&
911 getUnknownFields().equals(other.getUnknownFields());
912 return result;
913 }
914
915 private int memoizedHashCode = 0;
916 @java.lang.Override
917 public int hashCode() {
918 if (memoizedHashCode != 0) {
919 return memoizedHashCode;
920 }
921 int hash = 41;
922 hash = (19 * hash) + getDescriptorForType().hashCode();
923 if (hasJournalId()) {
924 hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
925 hash = (53 * hash) + getJournalId().hashCode();
926 }
927 if (hasEpoch()) {
928 hash = (37 * hash) + EPOCH_FIELD_NUMBER;
929 hash = (53 * hash) + hashLong(getEpoch());
930 }
931 if (hasIpcSerialNumber()) {
932 hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
933 hash = (53 * hash) + hashLong(getIpcSerialNumber());
934 }
935 if (hasCommittedTxId()) {
936 hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
937 hash = (53 * hash) + hashLong(getCommittedTxId());
938 }
939 hash = (29 * hash) + getUnknownFields().hashCode();
940 memoizedHashCode = hash;
941 return hash;
942 }
943
944 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
945 com.google.protobuf.ByteString data)
946 throws com.google.protobuf.InvalidProtocolBufferException {
947 return PARSER.parseFrom(data);
948 }
949 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
950 com.google.protobuf.ByteString data,
951 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
952 throws com.google.protobuf.InvalidProtocolBufferException {
953 return PARSER.parseFrom(data, extensionRegistry);
954 }
955 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
956 throws com.google.protobuf.InvalidProtocolBufferException {
957 return PARSER.parseFrom(data);
958 }
959 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
960 byte[] data,
961 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
962 throws com.google.protobuf.InvalidProtocolBufferException {
963 return PARSER.parseFrom(data, extensionRegistry);
964 }
965 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
966 throws java.io.IOException {
967 return PARSER.parseFrom(input);
968 }
969 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
970 java.io.InputStream input,
971 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
972 throws java.io.IOException {
973 return PARSER.parseFrom(input, extensionRegistry);
974 }
975 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
976 throws java.io.IOException {
977 return PARSER.parseDelimitedFrom(input);
978 }
979 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
980 java.io.InputStream input,
981 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
982 throws java.io.IOException {
983 return PARSER.parseDelimitedFrom(input, extensionRegistry);
984 }
985 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
986 com.google.protobuf.CodedInputStream input)
987 throws java.io.IOException {
988 return PARSER.parseFrom(input);
989 }
990 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
991 com.google.protobuf.CodedInputStream input,
992 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
993 throws java.io.IOException {
994 return PARSER.parseFrom(input, extensionRegistry);
995 }
996
997 public static Builder newBuilder() { return Builder.create(); }
998 public Builder newBuilderForType() { return newBuilder(); }
999 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
1000 return newBuilder().mergeFrom(prototype);
1001 }
1002 public Builder toBuilder() { return newBuilder(this); }
1003
1004 @java.lang.Override
1005 protected Builder newBuilderForType(
1006 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1007 Builder builder = new Builder(parent);
1008 return builder;
1009 }
1010 /**
1011 * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
1012 */
1013 public static final class Builder extends
1014 com.google.protobuf.GeneratedMessage.Builder<Builder>
1015 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
1016 public static final com.google.protobuf.Descriptors.Descriptor
1017 getDescriptor() {
1018 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1019 }
1020
1021 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1022 internalGetFieldAccessorTable() {
1023 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
1024 .ensureFieldAccessorsInitialized(
1025 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
1026 }
1027
1028 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
1029 private Builder() {
1030 maybeForceBuilderInitialization();
1031 }
1032
1033 private Builder(
1034 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1035 super(parent);
1036 maybeForceBuilderInitialization();
1037 }
1038 private void maybeForceBuilderInitialization() {
1039 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1040 getJournalIdFieldBuilder();
1041 }
1042 }
1043 private static Builder create() {
1044 return new Builder();
1045 }
1046
1047 public Builder clear() {
1048 super.clear();
1049 if (journalIdBuilder_ == null) {
1050 journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1051 } else {
1052 journalIdBuilder_.clear();
1053 }
1054 bitField0_ = (bitField0_ & ~0x00000001);
1055 epoch_ = 0L;
1056 bitField0_ = (bitField0_ & ~0x00000002);
1057 ipcSerialNumber_ = 0L;
1058 bitField0_ = (bitField0_ & ~0x00000004);
1059 committedTxId_ = 0L;
1060 bitField0_ = (bitField0_ & ~0x00000008);
1061 return this;
1062 }
1063
1064 public Builder clone() {
1065 return create().mergeFrom(buildPartial());
1066 }
1067
1068 public com.google.protobuf.Descriptors.Descriptor
1069 getDescriptorForType() {
1070 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1071 }
1072
1073 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
1074 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
1075 }
1076
1077 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
1078 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
1079 if (!result.isInitialized()) {
1080 throw newUninitializedMessageException(result);
1081 }
1082 return result;
1083 }
1084
1085 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
1086 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
1087 int from_bitField0_ = bitField0_;
1088 int to_bitField0_ = 0;
1089 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1090 to_bitField0_ |= 0x00000001;
1091 }
1092 if (journalIdBuilder_ == null) {
1093 result.journalId_ = journalId_;
1094 } else {
1095 result.journalId_ = journalIdBuilder_.build();
1096 }
1097 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1098 to_bitField0_ |= 0x00000002;
1099 }
1100 result.epoch_ = epoch_;
1101 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1102 to_bitField0_ |= 0x00000004;
1103 }
1104 result.ipcSerialNumber_ = ipcSerialNumber_;
1105 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1106 to_bitField0_ |= 0x00000008;
1107 }
1108 result.committedTxId_ = committedTxId_;
1109 result.bitField0_ = to_bitField0_;
1110 onBuilt();
1111 return result;
1112 }
1113
1114 public Builder mergeFrom(com.google.protobuf.Message other) {
1115 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
1116 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
1117 } else {
1118 super.mergeFrom(other);
1119 return this;
1120 }
1121 }
1122
1123 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
1124 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
1125 if (other.hasJournalId()) {
1126 mergeJournalId(other.getJournalId());
1127 }
1128 if (other.hasEpoch()) {
1129 setEpoch(other.getEpoch());
1130 }
1131 if (other.hasIpcSerialNumber()) {
1132 setIpcSerialNumber(other.getIpcSerialNumber());
1133 }
1134 if (other.hasCommittedTxId()) {
1135 setCommittedTxId(other.getCommittedTxId());
1136 }
1137 this.mergeUnknownFields(other.getUnknownFields());
1138 return this;
1139 }
1140
1141 public final boolean isInitialized() {
1142 if (!hasJournalId()) {
1143
1144 return false;
1145 }
1146 if (!hasEpoch()) {
1147
1148 return false;
1149 }
1150 if (!hasIpcSerialNumber()) {
1151
1152 return false;
1153 }
1154 if (!getJournalId().isInitialized()) {
1155
1156 return false;
1157 }
1158 return true;
1159 }
1160
1161 public Builder mergeFrom(
1162 com.google.protobuf.CodedInputStream input,
1163 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1164 throws java.io.IOException {
1165 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null;
1166 try {
1167 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1168 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1169 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage();
1170 throw e;
1171 } finally {
1172 if (parsedMessage != null) {
1173 mergeFrom(parsedMessage);
1174 }
1175 }
1176 return this;
1177 }
1178 private int bitField0_;
1179
1180 // required .hadoop.hdfs.JournalIdProto journalId = 1;
1181 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1182 private com.google.protobuf.SingleFieldBuilder<
1183 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
1184 /**
1185 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1186 */
1187 public boolean hasJournalId() {
1188 return ((bitField0_ & 0x00000001) == 0x00000001);
1189 }
1190 /**
1191 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1192 */
1193 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
1194 if (journalIdBuilder_ == null) {
1195 return journalId_;
1196 } else {
1197 return journalIdBuilder_.getMessage();
1198 }
1199 }
1200 /**
1201 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1202 */
1203 public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1204 if (journalIdBuilder_ == null) {
1205 if (value == null) {
1206 throw new NullPointerException();
1207 }
1208 journalId_ = value;
1209 onChanged();
1210 } else {
1211 journalIdBuilder_.setMessage(value);
1212 }
1213 bitField0_ |= 0x00000001;
1214 return this;
1215 }
1216 /**
1217 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1218 */
1219 public Builder setJournalId(
1220 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
1221 if (journalIdBuilder_ == null) {
1222 journalId_ = builderForValue.build();
1223 onChanged();
1224 } else {
1225 journalIdBuilder_.setMessage(builderForValue.build());
1226 }
1227 bitField0_ |= 0x00000001;
1228 return this;
1229 }
1230 /**
1231 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1232 */
1233 public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1234 if (journalIdBuilder_ == null) {
1235 if (((bitField0_ & 0x00000001) == 0x00000001) &&
1236 journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
1237 journalId_ =
1238 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
1239 } else {
1240 journalId_ = value;
1241 }
1242 onChanged();
1243 } else {
1244 journalIdBuilder_.mergeFrom(value);
1245 }
1246 bitField0_ |= 0x00000001;
1247 return this;
1248 }
1249 /**
1250 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1251 */
1252 public Builder clearJournalId() {
1253 if (journalIdBuilder_ == null) {
1254 journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1255 onChanged();
1256 } else {
1257 journalIdBuilder_.clear();
1258 }
1259 bitField0_ = (bitField0_ & ~0x00000001);
1260 return this;
1261 }
1262 /**
1263 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1264 */
1265 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1266 bitField0_ |= 0x00000001;
1267 onChanged();
1268 return getJournalIdFieldBuilder().getBuilder();
1269 }
1270 /**
1271 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1272 */
1273 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1274 if (journalIdBuilder_ != null) {
1275 return journalIdBuilder_.getMessageOrBuilder();
1276 } else {
1277 return journalId_;
1278 }
1279 }
1280 /**
1281 * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1282 */
1283 private com.google.protobuf.SingleFieldBuilder<
1284 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
1285 getJournalIdFieldBuilder() {
1286 if (journalIdBuilder_ == null) {
1287 journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1288 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1289 journalId_,
1290 getParentForChildren(),
1291 isClean());
1292 journalId_ = null;
1293 }
1294 return journalIdBuilder_;
1295 }
1296
1297 // required uint64 epoch = 2;
1298 private long epoch_ ;
1299 /**
1300 * <code>required uint64 epoch = 2;</code>
1301 */
1302 public boolean hasEpoch() {
1303 return ((bitField0_ & 0x00000002) == 0x00000002);
1304 }
1305 /**
1306 * <code>required uint64 epoch = 2;</code>
1307 */
1308 public long getEpoch() {
1309 return epoch_;
1310 }
1311 /**
1312 * <code>required uint64 epoch = 2;</code>
1313 */
1314 public Builder setEpoch(long value) {
1315 bitField0_ |= 0x00000002;
1316 epoch_ = value;
1317 onChanged();
1318 return this;
1319 }
1320 /**
1321 * <code>required uint64 epoch = 2;</code>
1322 */
1323 public Builder clearEpoch() {
1324 bitField0_ = (bitField0_ & ~0x00000002);
1325 epoch_ = 0L;
1326 onChanged();
1327 return this;
1328 }
1329
1330 // required uint64 ipcSerialNumber = 3;
1331 private long ipcSerialNumber_ ;
1332 /**
1333 * <code>required uint64 ipcSerialNumber = 3;</code>
1334 */
1335 public boolean hasIpcSerialNumber() {
1336 return ((bitField0_ & 0x00000004) == 0x00000004);
1337 }
1338 /**
1339 * <code>required uint64 ipcSerialNumber = 3;</code>
1340 */
1341 public long getIpcSerialNumber() {
1342 return ipcSerialNumber_;
1343 }
1344 /**
1345 * <code>required uint64 ipcSerialNumber = 3;</code>
1346 */
1347 public Builder setIpcSerialNumber(long value) {
1348 bitField0_ |= 0x00000004;
1349 ipcSerialNumber_ = value;
1350 onChanged();
1351 return this;
1352 }
1353 /**
1354 * <code>required uint64 ipcSerialNumber = 3;</code>
1355 */
1356 public Builder clearIpcSerialNumber() {
1357 bitField0_ = (bitField0_ & ~0x00000004);
1358 ipcSerialNumber_ = 0L;
1359 onChanged();
1360 return this;
1361 }
1362
1363 // optional uint64 committedTxId = 4;
1364 private long committedTxId_ ;
1365 /**
1366 * <code>optional uint64 committedTxId = 4;</code>
1367 *
1368 * <pre>
1369 * Whenever a writer makes a request, it informs
1370 * the node of the latest committed txid. This may
1371 * be higher than the transaction data included in the
1372 * request itself, eg in the case that the node has
1373 * fallen behind.
1374 * </pre>
1375 */
1376 public boolean hasCommittedTxId() {
1377 return ((bitField0_ & 0x00000008) == 0x00000008);
1378 }
1379 /**
1380 * <code>optional uint64 committedTxId = 4;</code>
1381 *
1382 * <pre>
1383 * Whenever a writer makes a request, it informs
1384 * the node of the latest committed txid. This may
1385 * be higher than the transaction data included in the
1386 * request itself, eg in the case that the node has
1387 * fallen behind.
1388 * </pre>
1389 */
1390 public long getCommittedTxId() {
1391 return committedTxId_;
1392 }
1393 /**
1394 * <code>optional uint64 committedTxId = 4;</code>
1395 *
1396 * <pre>
1397 * Whenever a writer makes a request, it informs
1398 * the node of the latest committed txid. This may
1399 * be higher than the transaction data included in the
1400 * request itself, eg in the case that the node has
1401 * fallen behind.
1402 * </pre>
1403 */
1404 public Builder setCommittedTxId(long value) {
1405 bitField0_ |= 0x00000008;
1406 committedTxId_ = value;
1407 onChanged();
1408 return this;
1409 }
1410 /**
1411 * <code>optional uint64 committedTxId = 4;</code>
1412 *
1413 * <pre>
1414 * Whenever a writer makes a request, it informs
1415 * the node of the latest committed txid. This may
1416 * be higher than the transaction data included in the
1417 * request itself, eg in the case that the node has
1418 * fallen behind.
1419 * </pre>
1420 */
1421 public Builder clearCommittedTxId() {
1422 bitField0_ = (bitField0_ & ~0x00000008);
1423 committedTxId_ = 0L;
1424 onChanged();
1425 return this;
1426 }
1427
1428 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RequestInfoProto)
1429 }
1430
1431 static {
1432 defaultInstance = new RequestInfoProto(true);
1433 defaultInstance.initFields();
1434 }
1435
1436 // @@protoc_insertion_point(class_scope:hadoop.hdfs.RequestInfoProto)
1437 }
1438
1439 public interface SegmentStateProtoOrBuilder
1440 extends com.google.protobuf.MessageOrBuilder {
1441
1442 // required uint64 startTxId = 1;
1443 /**
1444 * <code>required uint64 startTxId = 1;</code>
1445 */
1446 boolean hasStartTxId();
1447 /**
1448 * <code>required uint64 startTxId = 1;</code>
1449 */
1450 long getStartTxId();
1451
1452 // required uint64 endTxId = 2;
1453 /**
1454 * <code>required uint64 endTxId = 2;</code>
1455 */
1456 boolean hasEndTxId();
1457 /**
1458 * <code>required uint64 endTxId = 2;</code>
1459 */
1460 long getEndTxId();
1461
1462 // required bool isInProgress = 3;
1463 /**
1464 * <code>required bool isInProgress = 3;</code>
1465 */
1466 boolean hasIsInProgress();
1467 /**
1468 * <code>required bool isInProgress = 3;</code>
1469 */
1470 boolean getIsInProgress();
1471 }
1472 /**
1473 * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1474 */
1475 public static final class SegmentStateProto extends
1476 com.google.protobuf.GeneratedMessage
1477 implements SegmentStateProtoOrBuilder {
1478 // Use SegmentStateProto.newBuilder() to construct.
1479 private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1480 super(builder);
1481 this.unknownFields = builder.getUnknownFields();
1482 }
1483 private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1484
1485 private static final SegmentStateProto defaultInstance;
1486 public static SegmentStateProto getDefaultInstance() {
1487 return defaultInstance;
1488 }
1489
1490 public SegmentStateProto getDefaultInstanceForType() {
1491 return defaultInstance;
1492 }
1493
1494 private final com.google.protobuf.UnknownFieldSet unknownFields;
1495 @java.lang.Override
1496 public final com.google.protobuf.UnknownFieldSet
1497 getUnknownFields() {
1498 return this.unknownFields;
1499 }
1500 private SegmentStateProto(
1501 com.google.protobuf.CodedInputStream input,
1502 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1503 throws com.google.protobuf.InvalidProtocolBufferException {
1504 initFields();
1505 int mutable_bitField0_ = 0;
1506 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1507 com.google.protobuf.UnknownFieldSet.newBuilder();
1508 try {
1509 boolean done = false;
1510 while (!done) {
1511 int tag = input.readTag();
1512 switch (tag) {
1513 case 0:
1514 done = true;
1515 break;
1516 default: {
1517 if (!parseUnknownField(input, unknownFields,
1518 extensionRegistry, tag)) {
1519 done = true;
1520 }
1521 break;
1522 }
1523 case 8: {
1524 bitField0_ |= 0x00000001;
1525 startTxId_ = input.readUInt64();
1526 break;
1527 }
1528 case 16: {
1529 bitField0_ |= 0x00000002;
1530 endTxId_ = input.readUInt64();
1531 break;
1532 }
1533 case 24: {
1534 bitField0_ |= 0x00000004;
1535 isInProgress_ = input.readBool();
1536 break;
1537 }
1538 }
1539 }
1540 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1541 throw e.setUnfinishedMessage(this);
1542 } catch (java.io.IOException e) {
1543 throw new com.google.protobuf.InvalidProtocolBufferException(
1544 e.getMessage()).setUnfinishedMessage(this);
1545 } finally {
1546 this.unknownFields = unknownFields.build();
1547 makeExtensionsImmutable();
1548 }
1549 }
1550 public static final com.google.protobuf.Descriptors.Descriptor
1551 getDescriptor() {
1552 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1553 }
1554
1555 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1556 internalGetFieldAccessorTable() {
1557 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1558 .ensureFieldAccessorsInitialized(
1559 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1560 }
1561
1562 public static com.google.protobuf.Parser<SegmentStateProto> PARSER =
1563 new com.google.protobuf.AbstractParser<SegmentStateProto>() {
1564 public SegmentStateProto parsePartialFrom(
1565 com.google.protobuf.CodedInputStream input,
1566 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1567 throws com.google.protobuf.InvalidProtocolBufferException {
1568 return new SegmentStateProto(input, extensionRegistry);
1569 }
1570 };
1571
1572 @java.lang.Override
1573 public com.google.protobuf.Parser<SegmentStateProto> getParserForType() {
1574 return PARSER;
1575 }
1576
1577 private int bitField0_;
1578 // required uint64 startTxId = 1;
1579 public static final int STARTTXID_FIELD_NUMBER = 1;
1580 private long startTxId_;
1581 /**
1582 * <code>required uint64 startTxId = 1;</code>
1583 */
1584 public boolean hasStartTxId() {
1585 return ((bitField0_ & 0x00000001) == 0x00000001);
1586 }
1587 /**
1588 * <code>required uint64 startTxId = 1;</code>
1589 */
1590 public long getStartTxId() {
1591 return startTxId_;
1592 }
1593
1594 // required uint64 endTxId = 2;
1595 public static final int ENDTXID_FIELD_NUMBER = 2;
1596 private long endTxId_;
1597 /**
1598 * <code>required uint64 endTxId = 2;</code>
1599 */
1600 public boolean hasEndTxId() {
1601 return ((bitField0_ & 0x00000002) == 0x00000002);
1602 }
1603 /**
1604 * <code>required uint64 endTxId = 2;</code>
1605 */
1606 public long getEndTxId() {
1607 return endTxId_;
1608 }
1609
1610 // required bool isInProgress = 3;
1611 public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1612 private boolean isInProgress_;
1613 /**
1614 * <code>required bool isInProgress = 3;</code>
1615 */
1616 public boolean hasIsInProgress() {
1617 return ((bitField0_ & 0x00000004) == 0x00000004);
1618 }
1619 /**
1620 * <code>required bool isInProgress = 3;</code>
1621 */
1622 public boolean getIsInProgress() {
1623 return isInProgress_;
1624 }
1625
1626 private void initFields() {
1627 startTxId_ = 0L;
1628 endTxId_ = 0L;
1629 isInProgress_ = false;
1630 }
1631 private byte memoizedIsInitialized = -1;
1632 public final boolean isInitialized() {
1633 byte isInitialized = memoizedIsInitialized;
1634 if (isInitialized != -1) return isInitialized == 1;
1635
1636 if (!hasStartTxId()) {
1637 memoizedIsInitialized = 0;
1638 return false;
1639 }
1640 if (!hasEndTxId()) {
1641 memoizedIsInitialized = 0;
1642 return false;
1643 }
1644 if (!hasIsInProgress()) {
1645 memoizedIsInitialized = 0;
1646 return false;
1647 }
1648 memoizedIsInitialized = 1;
1649 return true;
1650 }
1651
1652 public void writeTo(com.google.protobuf.CodedOutputStream output)
1653 throws java.io.IOException {
1654 getSerializedSize();
1655 if (((bitField0_ & 0x00000001) == 0x00000001)) {
1656 output.writeUInt64(1, startTxId_);
1657 }
1658 if (((bitField0_ & 0x00000002) == 0x00000002)) {
1659 output.writeUInt64(2, endTxId_);
1660 }
1661 if (((bitField0_ & 0x00000004) == 0x00000004)) {
1662 output.writeBool(3, isInProgress_);
1663 }
1664 getUnknownFields().writeTo(output);
1665 }
1666
1667 private int memoizedSerializedSize = -1;
1668 public int getSerializedSize() {
1669 int size = memoizedSerializedSize;
1670 if (size != -1) return size;
1671
1672 size = 0;
1673 if (((bitField0_ & 0x00000001) == 0x00000001)) {
1674 size += com.google.protobuf.CodedOutputStream
1675 .computeUInt64Size(1, startTxId_);
1676 }
1677 if (((bitField0_ & 0x00000002) == 0x00000002)) {
1678 size += com.google.protobuf.CodedOutputStream
1679 .computeUInt64Size(2, endTxId_);
1680 }
1681 if (((bitField0_ & 0x00000004) == 0x00000004)) {
1682 size += com.google.protobuf.CodedOutputStream
1683 .computeBoolSize(3, isInProgress_);
1684 }
1685 size += getUnknownFields().getSerializedSize();
1686 memoizedSerializedSize = size;
1687 return size;
1688 }
1689
1690 private static final long serialVersionUID = 0L;
1691 @java.lang.Override
1692 protected java.lang.Object writeReplace()
1693 throws java.io.ObjectStreamException {
1694 return super.writeReplace();
1695 }
1696
1697 @java.lang.Override
1698 public boolean equals(final java.lang.Object obj) {
1699 if (obj == this) {
1700 return true;
1701 }
1702 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1703 return super.equals(obj);
1704 }
1705 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1706
1707 boolean result = true;
1708 result = result && (hasStartTxId() == other.hasStartTxId());
1709 if (hasStartTxId()) {
1710 result = result && (getStartTxId()
1711 == other.getStartTxId());
1712 }
1713 result = result && (hasEndTxId() == other.hasEndTxId());
1714 if (hasEndTxId()) {
1715 result = result && (getEndTxId()
1716 == other.getEndTxId());
1717 }
1718 result = result && (hasIsInProgress() == other.hasIsInProgress());
1719 if (hasIsInProgress()) {
1720 result = result && (getIsInProgress()
1721 == other.getIsInProgress());
1722 }
1723 result = result &&
1724 getUnknownFields().equals(other.getUnknownFields());
1725 return result;
1726 }
1727
1728 private int memoizedHashCode = 0;
1729 @java.lang.Override
1730 public int hashCode() {
1731 if (memoizedHashCode != 0) {
1732 return memoizedHashCode;
1733 }
1734 int hash = 41;
1735 hash = (19 * hash) + getDescriptorForType().hashCode();
1736 if (hasStartTxId()) {
1737 hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1738 hash = (53 * hash) + hashLong(getStartTxId());
1739 }
1740 if (hasEndTxId()) {
1741 hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1742 hash = (53 * hash) + hashLong(getEndTxId());
1743 }
1744 if (hasIsInProgress()) {
1745 hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1746 hash = (53 * hash) + hashBoolean(getIsInProgress());
1747 }
1748 hash = (29 * hash) + getUnknownFields().hashCode();
1749 memoizedHashCode = hash;
1750 return hash;
1751 }
1752
1753 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1754 com.google.protobuf.ByteString data)
1755 throws com.google.protobuf.InvalidProtocolBufferException {
1756 return PARSER.parseFrom(data);
1757 }
1758 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1759 com.google.protobuf.ByteString data,
1760 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1761 throws com.google.protobuf.InvalidProtocolBufferException {
1762 return PARSER.parseFrom(data, extensionRegistry);
1763 }
1764 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1765 throws com.google.protobuf.InvalidProtocolBufferException {
1766 return PARSER.parseFrom(data);
1767 }
1768 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1769 byte[] data,
1770 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1771 throws com.google.protobuf.InvalidProtocolBufferException {
1772 return PARSER.parseFrom(data, extensionRegistry);
1773 }
1774 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1775 throws java.io.IOException {
1776 return PARSER.parseFrom(input);
1777 }
1778 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1779 java.io.InputStream input,
1780 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781 throws java.io.IOException {
1782 return PARSER.parseFrom(input, extensionRegistry);
1783 }
1784 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1785 throws java.io.IOException {
1786 return PARSER.parseDelimitedFrom(input);
1787 }
1788 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1789 java.io.InputStream input,
1790 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1791 throws java.io.IOException {
1792 return PARSER.parseDelimitedFrom(input, extensionRegistry);
1793 }
1794 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1795 com.google.protobuf.CodedInputStream input)
1796 throws java.io.IOException {
1797 return PARSER.parseFrom(input);
1798 }
1799 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1800 com.google.protobuf.CodedInputStream input,
1801 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1802 throws java.io.IOException {
1803 return PARSER.parseFrom(input, extensionRegistry);
1804 }
1805
1806 public static Builder newBuilder() { return Builder.create(); }
1807 public Builder newBuilderForType() { return newBuilder(); }
1808 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1809 return newBuilder().mergeFrom(prototype);
1810 }
1811 public Builder toBuilder() { return newBuilder(this); }
1812
1813 @java.lang.Override
1814 protected Builder newBuilderForType(
1815 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1816 Builder builder = new Builder(parent);
1817 return builder;
1818 }
1819 /**
1820 * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1821 */
1822 public static final class Builder extends
1823 com.google.protobuf.GeneratedMessage.Builder<Builder>
1824 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1825 public static final com.google.protobuf.Descriptors.Descriptor
1826 getDescriptor() {
1827 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1828 }
1829
1830 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1831 internalGetFieldAccessorTable() {
1832 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1833 .ensureFieldAccessorsInitialized(
1834 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1835 }
1836
1837 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1838 private Builder() {
1839 maybeForceBuilderInitialization();
1840 }
1841
1842 private Builder(
1843 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1844 super(parent);
1845 maybeForceBuilderInitialization();
1846 }
1847 private void maybeForceBuilderInitialization() {
1848 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1849 }
1850 }
1851 private static Builder create() {
1852 return new Builder();
1853 }
1854
1855 public Builder clear() {
1856 super.clear();
1857 startTxId_ = 0L;
1858 bitField0_ = (bitField0_ & ~0x00000001);
1859 endTxId_ = 0L;
1860 bitField0_ = (bitField0_ & ~0x00000002);
1861 isInProgress_ = false;
1862 bitField0_ = (bitField0_ & ~0x00000004);
1863 return this;
1864 }
1865
1866 public Builder clone() {
1867 return create().mergeFrom(buildPartial());
1868 }
1869
1870 public com.google.protobuf.Descriptors.Descriptor
1871 getDescriptorForType() {
1872 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1873 }
1874
1875 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1876 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1877 }
1878
1879 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1880 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1881 if (!result.isInitialized()) {
1882 throw newUninitializedMessageException(result);
1883 }
1884 return result;
1885 }
1886
1887 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1888 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1889 int from_bitField0_ = bitField0_;
1890 int to_bitField0_ = 0;
1891 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1892 to_bitField0_ |= 0x00000001;
1893 }
1894 result.startTxId_ = startTxId_;
1895 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1896 to_bitField0_ |= 0x00000002;
1897 }
1898 result.endTxId_ = endTxId_;
1899 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1900 to_bitField0_ |= 0x00000004;
1901 }
1902 result.isInProgress_ = isInProgress_;
1903 result.bitField0_ = to_bitField0_;
1904 onBuilt();
1905 return result;
1906 }
1907
1908 public Builder mergeFrom(com.google.protobuf.Message other) {
1909 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1910 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1911 } else {
1912 super.mergeFrom(other);
1913 return this;
1914 }
1915 }
1916
1917 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1918 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1919 if (other.hasStartTxId()) {
1920 setStartTxId(other.getStartTxId());
1921 }
1922 if (other.hasEndTxId()) {
1923 setEndTxId(other.getEndTxId());
1924 }
1925 if (other.hasIsInProgress()) {
1926 setIsInProgress(other.getIsInProgress());
1927 }
1928 this.mergeUnknownFields(other.getUnknownFields());
1929 return this;
1930 }
1931
1932 public final boolean isInitialized() {
1933 if (!hasStartTxId()) {
1934
1935 return false;
1936 }
1937 if (!hasEndTxId()) {
1938
1939 return false;
1940 }
1941 if (!hasIsInProgress()) {
1942
1943 return false;
1944 }
1945 return true;
1946 }
1947
1948 public Builder mergeFrom(
1949 com.google.protobuf.CodedInputStream input,
1950 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1951 throws java.io.IOException {
1952 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null;
1953 try {
1954 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1955 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1956 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage();
1957 throw e;
1958 } finally {
1959 if (parsedMessage != null) {
1960 mergeFrom(parsedMessage);
1961 }
1962 }
1963 return this;
1964 }
1965 private int bitField0_;
1966
1967 // required uint64 startTxId = 1;
1968 private long startTxId_ ;
1969 /**
1970 * <code>required uint64 startTxId = 1;</code>
1971 */
1972 public boolean hasStartTxId() {
1973 return ((bitField0_ & 0x00000001) == 0x00000001);
1974 }
1975 /**
1976 * <code>required uint64 startTxId = 1;</code>
1977 */
1978 public long getStartTxId() {
1979 return startTxId_;
1980 }
1981 /**
1982 * <code>required uint64 startTxId = 1;</code>
1983 */
1984 public Builder setStartTxId(long value) {
1985 bitField0_ |= 0x00000001;
1986 startTxId_ = value;
1987 onChanged();
1988 return this;
1989 }
1990 /**
1991 * <code>required uint64 startTxId = 1;</code>
1992 */
1993 public Builder clearStartTxId() {
1994 bitField0_ = (bitField0_ & ~0x00000001);
1995 startTxId_ = 0L;
1996 onChanged();
1997 return this;
1998 }
1999
2000 // required uint64 endTxId = 2;
2001 private long endTxId_ ;
2002 /**
2003 * <code>required uint64 endTxId = 2;</code>
2004 */
2005 public boolean hasEndTxId() {
2006 return ((bitField0_ & 0x00000002) == 0x00000002);
2007 }
2008 /**
2009 * <code>required uint64 endTxId = 2;</code>
2010 */
2011 public long getEndTxId() {
2012 return endTxId_;
2013 }
2014 /**
2015 * <code>required uint64 endTxId = 2;</code>
2016 */
2017 public Builder setEndTxId(long value) {
2018 bitField0_ |= 0x00000002;
2019 endTxId_ = value;
2020 onChanged();
2021 return this;
2022 }
2023 /**
2024 * <code>required uint64 endTxId = 2;</code>
2025 */
2026 public Builder clearEndTxId() {
2027 bitField0_ = (bitField0_ & ~0x00000002);
2028 endTxId_ = 0L;
2029 onChanged();
2030 return this;
2031 }
2032
2033 // required bool isInProgress = 3;
2034 private boolean isInProgress_ ;
2035 /**
2036 * <code>required bool isInProgress = 3;</code>
2037 */
2038 public boolean hasIsInProgress() {
2039 return ((bitField0_ & 0x00000004) == 0x00000004);
2040 }
2041 /**
2042 * <code>required bool isInProgress = 3;</code>
2043 */
2044 public boolean getIsInProgress() {
2045 return isInProgress_;
2046 }
2047 /**
2048 * <code>required bool isInProgress = 3;</code>
2049 */
2050 public Builder setIsInProgress(boolean value) {
2051 bitField0_ |= 0x00000004;
2052 isInProgress_ = value;
2053 onChanged();
2054 return this;
2055 }
2056 /**
2057 * <code>required bool isInProgress = 3;</code>
2058 */
2059 public Builder clearIsInProgress() {
2060 bitField0_ = (bitField0_ & ~0x00000004);
2061 isInProgress_ = false;
2062 onChanged();
2063 return this;
2064 }
2065
2066 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SegmentStateProto)
2067 }
2068
2069 static {
2070 defaultInstance = new SegmentStateProto(true);
2071 defaultInstance.initFields();
2072 }
2073
2074 // @@protoc_insertion_point(class_scope:hadoop.hdfs.SegmentStateProto)
2075 }
2076
2077 public interface PersistedRecoveryPaxosDataOrBuilder
2078 extends com.google.protobuf.MessageOrBuilder {
2079
2080 // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2081 /**
2082 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2083 */
2084 boolean hasSegmentState();
2085 /**
2086 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2087 */
2088 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
2089 /**
2090 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2091 */
2092 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
2093
2094 // required uint64 acceptedInEpoch = 2;
2095 /**
2096 * <code>required uint64 acceptedInEpoch = 2;</code>
2097 */
2098 boolean hasAcceptedInEpoch();
2099 /**
2100 * <code>required uint64 acceptedInEpoch = 2;</code>
2101 */
2102 long getAcceptedInEpoch();
2103 }
2104 /**
2105 * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2106 *
2107 * <pre>
2108 **
2109 * The storage format used on local disk for previously
2110 * accepted decisions.
2111 * </pre>
2112 */
2113 public static final class PersistedRecoveryPaxosData extends
2114 com.google.protobuf.GeneratedMessage
2115 implements PersistedRecoveryPaxosDataOrBuilder {
2116 // Use PersistedRecoveryPaxosData.newBuilder() to construct.
2117 private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2118 super(builder);
2119 this.unknownFields = builder.getUnknownFields();
2120 }
2121 private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2122
2123 private static final PersistedRecoveryPaxosData defaultInstance;
2124 public static PersistedRecoveryPaxosData getDefaultInstance() {
2125 return defaultInstance;
2126 }
2127
2128 public PersistedRecoveryPaxosData getDefaultInstanceForType() {
2129 return defaultInstance;
2130 }
2131
2132 private final com.google.protobuf.UnknownFieldSet unknownFields;
2133 @java.lang.Override
2134 public final com.google.protobuf.UnknownFieldSet
2135 getUnknownFields() {
2136 return this.unknownFields;
2137 }
2138 private PersistedRecoveryPaxosData(
2139 com.google.protobuf.CodedInputStream input,
2140 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2141 throws com.google.protobuf.InvalidProtocolBufferException {
2142 initFields();
2143 int mutable_bitField0_ = 0;
2144 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2145 com.google.protobuf.UnknownFieldSet.newBuilder();
2146 try {
2147 boolean done = false;
2148 while (!done) {
2149 int tag = input.readTag();
2150 switch (tag) {
2151 case 0:
2152 done = true;
2153 break;
2154 default: {
2155 if (!parseUnknownField(input, unknownFields,
2156 extensionRegistry, tag)) {
2157 done = true;
2158 }
2159 break;
2160 }
2161 case 10: {
2162 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
2163 if (((bitField0_ & 0x00000001) == 0x00000001)) {
2164 subBuilder = segmentState_.toBuilder();
2165 }
2166 segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
2167 if (subBuilder != null) {
2168 subBuilder.mergeFrom(segmentState_);
2169 segmentState_ = subBuilder.buildPartial();
2170 }
2171 bitField0_ |= 0x00000001;
2172 break;
2173 }
2174 case 16: {
2175 bitField0_ |= 0x00000002;
2176 acceptedInEpoch_ = input.readUInt64();
2177 break;
2178 }
2179 }
2180 }
2181 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2182 throw e.setUnfinishedMessage(this);
2183 } catch (java.io.IOException e) {
2184 throw new com.google.protobuf.InvalidProtocolBufferException(
2185 e.getMessage()).setUnfinishedMessage(this);
2186 } finally {
2187 this.unknownFields = unknownFields.build();
2188 makeExtensionsImmutable();
2189 }
2190 }
2191 public static final com.google.protobuf.Descriptors.Descriptor
2192 getDescriptor() {
2193 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2194 }
2195
2196 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2197 internalGetFieldAccessorTable() {
2198 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2199 .ensureFieldAccessorsInitialized(
2200 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2201 }
2202
2203 public static com.google.protobuf.Parser<PersistedRecoveryPaxosData> PARSER =
2204 new com.google.protobuf.AbstractParser<PersistedRecoveryPaxosData>() {
2205 public PersistedRecoveryPaxosData parsePartialFrom(
2206 com.google.protobuf.CodedInputStream input,
2207 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2208 throws com.google.protobuf.InvalidProtocolBufferException {
2209 return new PersistedRecoveryPaxosData(input, extensionRegistry);
2210 }
2211 };
2212
2213 @java.lang.Override
2214 public com.google.protobuf.Parser<PersistedRecoveryPaxosData> getParserForType() {
2215 return PARSER;
2216 }
2217
2218 private int bitField0_;
2219 // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2220 public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
2221 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
2222 /**
2223 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2224 */
2225 public boolean hasSegmentState() {
2226 return ((bitField0_ & 0x00000001) == 0x00000001);
2227 }
2228 /**
2229 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2230 */
2231 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2232 return segmentState_;
2233 }
2234 /**
2235 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2236 */
2237 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2238 return segmentState_;
2239 }
2240
2241 // required uint64 acceptedInEpoch = 2;
2242 public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
2243 private long acceptedInEpoch_;
2244 /**
2245 * <code>required uint64 acceptedInEpoch = 2;</code>
2246 */
2247 public boolean hasAcceptedInEpoch() {
2248 return ((bitField0_ & 0x00000002) == 0x00000002);
2249 }
2250 /**
2251 * <code>required uint64 acceptedInEpoch = 2;</code>
2252 */
2253 public long getAcceptedInEpoch() {
2254 return acceptedInEpoch_;
2255 }
2256
2257 private void initFields() {
2258 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2259 acceptedInEpoch_ = 0L;
2260 }
2261 private byte memoizedIsInitialized = -1;
2262 public final boolean isInitialized() {
2263 byte isInitialized = memoizedIsInitialized;
2264 if (isInitialized != -1) return isInitialized == 1;
2265
2266 if (!hasSegmentState()) {
2267 memoizedIsInitialized = 0;
2268 return false;
2269 }
2270 if (!hasAcceptedInEpoch()) {
2271 memoizedIsInitialized = 0;
2272 return false;
2273 }
2274 if (!getSegmentState().isInitialized()) {
2275 memoizedIsInitialized = 0;
2276 return false;
2277 }
2278 memoizedIsInitialized = 1;
2279 return true;
2280 }
2281
2282 public void writeTo(com.google.protobuf.CodedOutputStream output)
2283 throws java.io.IOException {
2284 getSerializedSize();
2285 if (((bitField0_ & 0x00000001) == 0x00000001)) {
2286 output.writeMessage(1, segmentState_);
2287 }
2288 if (((bitField0_ & 0x00000002) == 0x00000002)) {
2289 output.writeUInt64(2, acceptedInEpoch_);
2290 }
2291 getUnknownFields().writeTo(output);
2292 }
2293
2294 private int memoizedSerializedSize = -1;
2295 public int getSerializedSize() {
2296 int size = memoizedSerializedSize;
2297 if (size != -1) return size;
2298
2299 size = 0;
2300 if (((bitField0_ & 0x00000001) == 0x00000001)) {
2301 size += com.google.protobuf.CodedOutputStream
2302 .computeMessageSize(1, segmentState_);
2303 }
2304 if (((bitField0_ & 0x00000002) == 0x00000002)) {
2305 size += com.google.protobuf.CodedOutputStream
2306 .computeUInt64Size(2, acceptedInEpoch_);
2307 }
2308 size += getUnknownFields().getSerializedSize();
2309 memoizedSerializedSize = size;
2310 return size;
2311 }
2312
2313 private static final long serialVersionUID = 0L;
2314 @java.lang.Override
2315 protected java.lang.Object writeReplace()
2316 throws java.io.ObjectStreamException {
2317 return super.writeReplace();
2318 }
2319
2320 @java.lang.Override
2321 public boolean equals(final java.lang.Object obj) {
2322 if (obj == this) {
2323 return true;
2324 }
2325 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
2326 return super.equals(obj);
2327 }
2328 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
2329
2330 boolean result = true;
2331 result = result && (hasSegmentState() == other.hasSegmentState());
2332 if (hasSegmentState()) {
2333 result = result && getSegmentState()
2334 .equals(other.getSegmentState());
2335 }
2336 result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
2337 if (hasAcceptedInEpoch()) {
2338 result = result && (getAcceptedInEpoch()
2339 == other.getAcceptedInEpoch());
2340 }
2341 result = result &&
2342 getUnknownFields().equals(other.getUnknownFields());
2343 return result;
2344 }
2345
2346 private int memoizedHashCode = 0;
2347 @java.lang.Override
2348 public int hashCode() {
2349 if (memoizedHashCode != 0) {
2350 return memoizedHashCode;
2351 }
2352 int hash = 41;
2353 hash = (19 * hash) + getDescriptorForType().hashCode();
2354 if (hasSegmentState()) {
2355 hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
2356 hash = (53 * hash) + getSegmentState().hashCode();
2357 }
2358 if (hasAcceptedInEpoch()) {
2359 hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
2360 hash = (53 * hash) + hashLong(getAcceptedInEpoch());
2361 }
2362 hash = (29 * hash) + getUnknownFields().hashCode();
2363 memoizedHashCode = hash;
2364 return hash;
2365 }
2366
2367 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2368 com.google.protobuf.ByteString data)
2369 throws com.google.protobuf.InvalidProtocolBufferException {
2370 return PARSER.parseFrom(data);
2371 }
2372 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2373 com.google.protobuf.ByteString data,
2374 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2375 throws com.google.protobuf.InvalidProtocolBufferException {
2376 return PARSER.parseFrom(data, extensionRegistry);
2377 }
2378 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
2379 throws com.google.protobuf.InvalidProtocolBufferException {
2380 return PARSER.parseFrom(data);
2381 }
2382 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2383 byte[] data,
2384 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2385 throws com.google.protobuf.InvalidProtocolBufferException {
2386 return PARSER.parseFrom(data, extensionRegistry);
2387 }
2388 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
2389 throws java.io.IOException {
2390 return PARSER.parseFrom(input);
2391 }
2392 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2393 java.io.InputStream input,
2394 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2395 throws java.io.IOException {
2396 return PARSER.parseFrom(input, extensionRegistry);
2397 }
2398 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
2399 throws java.io.IOException {
2400 return PARSER.parseDelimitedFrom(input);
2401 }
2402 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
2403 java.io.InputStream input,
2404 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2405 throws java.io.IOException {
2406 return PARSER.parseDelimitedFrom(input, extensionRegistry);
2407 }
2408 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2409 com.google.protobuf.CodedInputStream input)
2410 throws java.io.IOException {
2411 return PARSER.parseFrom(input);
2412 }
2413 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2414 com.google.protobuf.CodedInputStream input,
2415 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2416 throws java.io.IOException {
2417 return PARSER.parseFrom(input, extensionRegistry);
2418 }
2419
2420 public static Builder newBuilder() { return Builder.create(); }
2421 public Builder newBuilderForType() { return newBuilder(); }
2422 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
2423 return newBuilder().mergeFrom(prototype);
2424 }
2425 public Builder toBuilder() { return newBuilder(this); }
2426
2427 @java.lang.Override
2428 protected Builder newBuilderForType(
2429 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2430 Builder builder = new Builder(parent);
2431 return builder;
2432 }
2433 /**
2434 * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2435 *
2436 * <pre>
2437 **
2438 * The storage format used on local disk for previously
2439 * accepted decisions.
2440 * </pre>
2441 */
2442 public static final class Builder extends
2443 com.google.protobuf.GeneratedMessage.Builder<Builder>
2444 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
2445 public static final com.google.protobuf.Descriptors.Descriptor
2446 getDescriptor() {
2447 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2448 }
2449
2450 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2451 internalGetFieldAccessorTable() {
2452 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2453 .ensureFieldAccessorsInitialized(
2454 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2455 }
2456
2457 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
2458 private Builder() {
2459 maybeForceBuilderInitialization();
2460 }
2461
2462 private Builder(
2463 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2464 super(parent);
2465 maybeForceBuilderInitialization();
2466 }
2467 private void maybeForceBuilderInitialization() {
2468 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2469 getSegmentStateFieldBuilder();
2470 }
2471 }
2472 private static Builder create() {
2473 return new Builder();
2474 }
2475
2476 public Builder clear() {
2477 super.clear();
2478 if (segmentStateBuilder_ == null) {
2479 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2480 } else {
2481 segmentStateBuilder_.clear();
2482 }
2483 bitField0_ = (bitField0_ & ~0x00000001);
2484 acceptedInEpoch_ = 0L;
2485 bitField0_ = (bitField0_ & ~0x00000002);
2486 return this;
2487 }
2488
2489 public Builder clone() {
2490 return create().mergeFrom(buildPartial());
2491 }
2492
2493 public com.google.protobuf.Descriptors.Descriptor
2494 getDescriptorForType() {
2495 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2496 }
2497
2498 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
2499 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
2500 }
2501
2502 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
2503 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
2504 if (!result.isInitialized()) {
2505 throw newUninitializedMessageException(result);
2506 }
2507 return result;
2508 }
2509
2510 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
2511 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
2512 int from_bitField0_ = bitField0_;
2513 int to_bitField0_ = 0;
2514 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2515 to_bitField0_ |= 0x00000001;
2516 }
2517 if (segmentStateBuilder_ == null) {
2518 result.segmentState_ = segmentState_;
2519 } else {
2520 result.segmentState_ = segmentStateBuilder_.build();
2521 }
2522 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2523 to_bitField0_ |= 0x00000002;
2524 }
2525 result.acceptedInEpoch_ = acceptedInEpoch_;
2526 result.bitField0_ = to_bitField0_;
2527 onBuilt();
2528 return result;
2529 }
2530
2531 public Builder mergeFrom(com.google.protobuf.Message other) {
2532 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
2533 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
2534 } else {
2535 super.mergeFrom(other);
2536 return this;
2537 }
2538 }
2539
2540 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
2541 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
2542 if (other.hasSegmentState()) {
2543 mergeSegmentState(other.getSegmentState());
2544 }
2545 if (other.hasAcceptedInEpoch()) {
2546 setAcceptedInEpoch(other.getAcceptedInEpoch());
2547 }
2548 this.mergeUnknownFields(other.getUnknownFields());
2549 return this;
2550 }
2551
2552 public final boolean isInitialized() {
2553 if (!hasSegmentState()) {
2554
2555 return false;
2556 }
2557 if (!hasAcceptedInEpoch()) {
2558
2559 return false;
2560 }
2561 if (!getSegmentState().isInitialized()) {
2562
2563 return false;
2564 }
2565 return true;
2566 }
2567
2568 public Builder mergeFrom(
2569 com.google.protobuf.CodedInputStream input,
2570 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2571 throws java.io.IOException {
2572 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null;
2573 try {
2574 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2575 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2576 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage();
2577 throw e;
2578 } finally {
2579 if (parsedMessage != null) {
2580 mergeFrom(parsedMessage);
2581 }
2582 }
2583 return this;
2584 }
2585 private int bitField0_;
2586
2587 // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2588 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2589 private com.google.protobuf.SingleFieldBuilder<
2590 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2591 /**
2592 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2593 */
2594 public boolean hasSegmentState() {
2595 return ((bitField0_ & 0x00000001) == 0x00000001);
2596 }
2597 /**
2598 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2599 */
2600 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2601 if (segmentStateBuilder_ == null) {
2602 return segmentState_;
2603 } else {
2604 return segmentStateBuilder_.getMessage();
2605 }
2606 }
2607 /**
2608 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2609 */
2610 public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2611 if (segmentStateBuilder_ == null) {
2612 if (value == null) {
2613 throw new NullPointerException();
2614 }
2615 segmentState_ = value;
2616 onChanged();
2617 } else {
2618 segmentStateBuilder_.setMessage(value);
2619 }
2620 bitField0_ |= 0x00000001;
2621 return this;
2622 }
2623 /**
2624 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2625 */
2626 public Builder setSegmentState(
2627 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2628 if (segmentStateBuilder_ == null) {
2629 segmentState_ = builderForValue.build();
2630 onChanged();
2631 } else {
2632 segmentStateBuilder_.setMessage(builderForValue.build());
2633 }
2634 bitField0_ |= 0x00000001;
2635 return this;
2636 }
2637 /**
2638 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2639 */
2640 public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2641 if (segmentStateBuilder_ == null) {
2642 if (((bitField0_ & 0x00000001) == 0x00000001) &&
2643 segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2644 segmentState_ =
2645 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2646 } else {
2647 segmentState_ = value;
2648 }
2649 onChanged();
2650 } else {
2651 segmentStateBuilder_.mergeFrom(value);
2652 }
2653 bitField0_ |= 0x00000001;
2654 return this;
2655 }
2656 /**
2657 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2658 */
2659 public Builder clearSegmentState() {
2660 if (segmentStateBuilder_ == null) {
2661 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2662 onChanged();
2663 } else {
2664 segmentStateBuilder_.clear();
2665 }
2666 bitField0_ = (bitField0_ & ~0x00000001);
2667 return this;
2668 }
2669 /**
2670 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2671 */
2672 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2673 bitField0_ |= 0x00000001;
2674 onChanged();
2675 return getSegmentStateFieldBuilder().getBuilder();
2676 }
2677 /**
2678 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2679 */
2680 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2681 if (segmentStateBuilder_ != null) {
2682 return segmentStateBuilder_.getMessageOrBuilder();
2683 } else {
2684 return segmentState_;
2685 }
2686 }
2687 /**
2688 * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2689 */
2690 private com.google.protobuf.SingleFieldBuilder<
2691 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>
2692 getSegmentStateFieldBuilder() {
2693 if (segmentStateBuilder_ == null) {
2694 segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2695 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2696 segmentState_,
2697 getParentForChildren(),
2698 isClean());
2699 segmentState_ = null;
2700 }
2701 return segmentStateBuilder_;
2702 }
2703
2704 // required uint64 acceptedInEpoch = 2;
2705 private long acceptedInEpoch_ ;
2706 /**
2707 * <code>required uint64 acceptedInEpoch = 2;</code>
2708 */
2709 public boolean hasAcceptedInEpoch() {
2710 return ((bitField0_ & 0x00000002) == 0x00000002);
2711 }
2712 /**
2713 * <code>required uint64 acceptedInEpoch = 2;</code>
2714 */
2715 public long getAcceptedInEpoch() {
2716 return acceptedInEpoch_;
2717 }
2718 /**
2719 * <code>required uint64 acceptedInEpoch = 2;</code>
2720 */
2721 public Builder setAcceptedInEpoch(long value) {
2722 bitField0_ |= 0x00000002;
2723 acceptedInEpoch_ = value;
2724 onChanged();
2725 return this;
2726 }
2727 /**
2728 * <code>required uint64 acceptedInEpoch = 2;</code>
2729 */
2730 public Builder clearAcceptedInEpoch() {
2731 bitField0_ = (bitField0_ & ~0x00000002);
2732 acceptedInEpoch_ = 0L;
2733 onChanged();
2734 return this;
2735 }
2736
2737 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2738 }
2739
2740 static {
2741 defaultInstance = new PersistedRecoveryPaxosData(true);
2742 defaultInstance.initFields();
2743 }
2744
2745 // @@protoc_insertion_point(class_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2746 }
2747
2748 public interface JournalRequestProtoOrBuilder
2749 extends com.google.protobuf.MessageOrBuilder {
2750
2751 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2752 /**
2753 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2754 */
2755 boolean hasReqInfo();
2756 /**
2757 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2758 */
2759 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2760 /**
2761 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2762 */
2763 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2764
2765 // required uint64 firstTxnId = 2;
2766 /**
2767 * <code>required uint64 firstTxnId = 2;</code>
2768 */
2769 boolean hasFirstTxnId();
2770 /**
2771 * <code>required uint64 firstTxnId = 2;</code>
2772 */
2773 long getFirstTxnId();
2774
2775 // required uint32 numTxns = 3;
2776 /**
2777 * <code>required uint32 numTxns = 3;</code>
2778 */
2779 boolean hasNumTxns();
2780 /**
2781 * <code>required uint32 numTxns = 3;</code>
2782 */
2783 int getNumTxns();
2784
2785 // required bytes records = 4;
2786 /**
2787 * <code>required bytes records = 4;</code>
2788 */
2789 boolean hasRecords();
2790 /**
2791 * <code>required bytes records = 4;</code>
2792 */
2793 com.google.protobuf.ByteString getRecords();
2794
2795 // required uint64 segmentTxnId = 5;
2796 /**
2797 * <code>required uint64 segmentTxnId = 5;</code>
2798 */
2799 boolean hasSegmentTxnId();
2800 /**
2801 * <code>required uint64 segmentTxnId = 5;</code>
2802 */
2803 long getSegmentTxnId();
2804 }
2805 /**
2806 * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
2807 */
2808 public static final class JournalRequestProto extends
2809 com.google.protobuf.GeneratedMessage
2810 implements JournalRequestProtoOrBuilder {
2811 // Use JournalRequestProto.newBuilder() to construct.
2812 private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813 super(builder);
2814 this.unknownFields = builder.getUnknownFields();
2815 }
2816 private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817
2818 private static final JournalRequestProto defaultInstance;
2819 public static JournalRequestProto getDefaultInstance() {
2820 return defaultInstance;
2821 }
2822
2823 public JournalRequestProto getDefaultInstanceForType() {
2824 return defaultInstance;
2825 }
2826
2827 private final com.google.protobuf.UnknownFieldSet unknownFields;
2828 @java.lang.Override
2829 public final com.google.protobuf.UnknownFieldSet
2830 getUnknownFields() {
2831 return this.unknownFields;
2832 }
2833 private JournalRequestProto(
2834 com.google.protobuf.CodedInputStream input,
2835 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836 throws com.google.protobuf.InvalidProtocolBufferException {
2837 initFields();
2838 int mutable_bitField0_ = 0;
2839 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840 com.google.protobuf.UnknownFieldSet.newBuilder();
2841 try {
2842 boolean done = false;
2843 while (!done) {
2844 int tag = input.readTag();
2845 switch (tag) {
2846 case 0:
2847 done = true;
2848 break;
2849 default: {
2850 if (!parseUnknownField(input, unknownFields,
2851 extensionRegistry, tag)) {
2852 done = true;
2853 }
2854 break;
2855 }
2856 case 10: {
2857 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
2858 if (((bitField0_ & 0x00000001) == 0x00000001)) {
2859 subBuilder = reqInfo_.toBuilder();
2860 }
2861 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
2862 if (subBuilder != null) {
2863 subBuilder.mergeFrom(reqInfo_);
2864 reqInfo_ = subBuilder.buildPartial();
2865 }
2866 bitField0_ |= 0x00000001;
2867 break;
2868 }
2869 case 16: {
2870 bitField0_ |= 0x00000002;
2871 firstTxnId_ = input.readUInt64();
2872 break;
2873 }
2874 case 24: {
2875 bitField0_ |= 0x00000004;
2876 numTxns_ = input.readUInt32();
2877 break;
2878 }
2879 case 34: {
2880 bitField0_ |= 0x00000008;
2881 records_ = input.readBytes();
2882 break;
2883 }
2884 case 40: {
2885 bitField0_ |= 0x00000010;
2886 segmentTxnId_ = input.readUInt64();
2887 break;
2888 }
2889 }
2890 }
2891 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2892 throw e.setUnfinishedMessage(this);
2893 } catch (java.io.IOException e) {
2894 throw new com.google.protobuf.InvalidProtocolBufferException(
2895 e.getMessage()).setUnfinishedMessage(this);
2896 } finally {
2897 this.unknownFields = unknownFields.build();
2898 makeExtensionsImmutable();
2899 }
2900 }
2901 public static final com.google.protobuf.Descriptors.Descriptor
2902 getDescriptor() {
2903 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2904 }
2905
2906 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2907 internalGetFieldAccessorTable() {
2908 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
2909 .ensureFieldAccessorsInitialized(
2910 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
2911 }
2912
2913 public static com.google.protobuf.Parser<JournalRequestProto> PARSER =
2914 new com.google.protobuf.AbstractParser<JournalRequestProto>() {
2915 public JournalRequestProto parsePartialFrom(
2916 com.google.protobuf.CodedInputStream input,
2917 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2918 throws com.google.protobuf.InvalidProtocolBufferException {
2919 return new JournalRequestProto(input, extensionRegistry);
2920 }
2921 };
2922
2923 @java.lang.Override
2924 public com.google.protobuf.Parser<JournalRequestProto> getParserForType() {
2925 return PARSER;
2926 }
2927
2928 private int bitField0_;
2929 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2930 public static final int REQINFO_FIELD_NUMBER = 1;
2931 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2932 /**
2933 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2934 */
2935 public boolean hasReqInfo() {
2936 return ((bitField0_ & 0x00000001) == 0x00000001);
2937 }
2938 /**
2939 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2940 */
2941 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2942 return reqInfo_;
2943 }
2944 /**
2945 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2946 */
2947 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2948 return reqInfo_;
2949 }
2950
2951 // required uint64 firstTxnId = 2;
2952 public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2953 private long firstTxnId_;
2954 /**
2955 * <code>required uint64 firstTxnId = 2;</code>
2956 */
2957 public boolean hasFirstTxnId() {
2958 return ((bitField0_ & 0x00000002) == 0x00000002);
2959 }
2960 /**
2961 * <code>required uint64 firstTxnId = 2;</code>
2962 */
2963 public long getFirstTxnId() {
2964 return firstTxnId_;
2965 }
2966
2967 // required uint32 numTxns = 3;
2968 public static final int NUMTXNS_FIELD_NUMBER = 3;
2969 private int numTxns_;
2970 /**
2971 * <code>required uint32 numTxns = 3;</code>
2972 */
2973 public boolean hasNumTxns() {
2974 return ((bitField0_ & 0x00000004) == 0x00000004);
2975 }
2976 /**
2977 * <code>required uint32 numTxns = 3;</code>
2978 */
2979 public int getNumTxns() {
2980 return numTxns_;
2981 }
2982
2983 // required bytes records = 4;
2984 public static final int RECORDS_FIELD_NUMBER = 4;
2985 private com.google.protobuf.ByteString records_;
2986 /**
2987 * <code>required bytes records = 4;</code>
2988 */
2989 public boolean hasRecords() {
2990 return ((bitField0_ & 0x00000008) == 0x00000008);
2991 }
2992 /**
2993 * <code>required bytes records = 4;</code>
2994 */
2995 public com.google.protobuf.ByteString getRecords() {
2996 return records_;
2997 }
2998
2999 // required uint64 segmentTxnId = 5;
3000 public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
3001 private long segmentTxnId_;
3002 /**
3003 * <code>required uint64 segmentTxnId = 5;</code>
3004 */
3005 public boolean hasSegmentTxnId() {
3006 return ((bitField0_ & 0x00000010) == 0x00000010);
3007 }
3008 /**
3009 * <code>required uint64 segmentTxnId = 5;</code>
3010 */
3011 public long getSegmentTxnId() {
3012 return segmentTxnId_;
3013 }
3014
3015 private void initFields() {
3016 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3017 firstTxnId_ = 0L;
3018 numTxns_ = 0;
3019 records_ = com.google.protobuf.ByteString.EMPTY;
3020 segmentTxnId_ = 0L;
3021 }
3022 private byte memoizedIsInitialized = -1;
3023 public final boolean isInitialized() {
3024 byte isInitialized = memoizedIsInitialized;
3025 if (isInitialized != -1) return isInitialized == 1;
3026
3027 if (!hasReqInfo()) {
3028 memoizedIsInitialized = 0;
3029 return false;
3030 }
3031 if (!hasFirstTxnId()) {
3032 memoizedIsInitialized = 0;
3033 return false;
3034 }
3035 if (!hasNumTxns()) {
3036 memoizedIsInitialized = 0;
3037 return false;
3038 }
3039 if (!hasRecords()) {
3040 memoizedIsInitialized = 0;
3041 return false;
3042 }
3043 if (!hasSegmentTxnId()) {
3044 memoizedIsInitialized = 0;
3045 return false;
3046 }
3047 if (!getReqInfo().isInitialized()) {
3048 memoizedIsInitialized = 0;
3049 return false;
3050 }
3051 memoizedIsInitialized = 1;
3052 return true;
3053 }
3054
3055 public void writeTo(com.google.protobuf.CodedOutputStream output)
3056 throws java.io.IOException {
3057 getSerializedSize();
3058 if (((bitField0_ & 0x00000001) == 0x00000001)) {
3059 output.writeMessage(1, reqInfo_);
3060 }
3061 if (((bitField0_ & 0x00000002) == 0x00000002)) {
3062 output.writeUInt64(2, firstTxnId_);
3063 }
3064 if (((bitField0_ & 0x00000004) == 0x00000004)) {
3065 output.writeUInt32(3, numTxns_);
3066 }
3067 if (((bitField0_ & 0x00000008) == 0x00000008)) {
3068 output.writeBytes(4, records_);
3069 }
3070 if (((bitField0_ & 0x00000010) == 0x00000010)) {
3071 output.writeUInt64(5, segmentTxnId_);
3072 }
3073 getUnknownFields().writeTo(output);
3074 }
3075
3076 private int memoizedSerializedSize = -1;
3077 public int getSerializedSize() {
3078 int size = memoizedSerializedSize;
3079 if (size != -1) return size;
3080
3081 size = 0;
3082 if (((bitField0_ & 0x00000001) == 0x00000001)) {
3083 size += com.google.protobuf.CodedOutputStream
3084 .computeMessageSize(1, reqInfo_);
3085 }
3086 if (((bitField0_ & 0x00000002) == 0x00000002)) {
3087 size += com.google.protobuf.CodedOutputStream
3088 .computeUInt64Size(2, firstTxnId_);
3089 }
3090 if (((bitField0_ & 0x00000004) == 0x00000004)) {
3091 size += com.google.protobuf.CodedOutputStream
3092 .computeUInt32Size(3, numTxns_);
3093 }
3094 if (((bitField0_ & 0x00000008) == 0x00000008)) {
3095 size += com.google.protobuf.CodedOutputStream
3096 .computeBytesSize(4, records_);
3097 }
3098 if (((bitField0_ & 0x00000010) == 0x00000010)) {
3099 size += com.google.protobuf.CodedOutputStream
3100 .computeUInt64Size(5, segmentTxnId_);
3101 }
3102 size += getUnknownFields().getSerializedSize();
3103 memoizedSerializedSize = size;
3104 return size;
3105 }
3106
3107 private static final long serialVersionUID = 0L;
3108 @java.lang.Override
3109 protected java.lang.Object writeReplace()
3110 throws java.io.ObjectStreamException {
3111 return super.writeReplace();
3112 }
3113
3114 @java.lang.Override
3115 public boolean equals(final java.lang.Object obj) {
3116 if (obj == this) {
3117 return true;
3118 }
3119 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
3120 return super.equals(obj);
3121 }
3122 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
3123
3124 boolean result = true;
3125 result = result && (hasReqInfo() == other.hasReqInfo());
3126 if (hasReqInfo()) {
3127 result = result && getReqInfo()
3128 .equals(other.getReqInfo());
3129 }
3130 result = result && (hasFirstTxnId() == other.hasFirstTxnId());
3131 if (hasFirstTxnId()) {
3132 result = result && (getFirstTxnId()
3133 == other.getFirstTxnId());
3134 }
3135 result = result && (hasNumTxns() == other.hasNumTxns());
3136 if (hasNumTxns()) {
3137 result = result && (getNumTxns()
3138 == other.getNumTxns());
3139 }
3140 result = result && (hasRecords() == other.hasRecords());
3141 if (hasRecords()) {
3142 result = result && getRecords()
3143 .equals(other.getRecords());
3144 }
3145 result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
3146 if (hasSegmentTxnId()) {
3147 result = result && (getSegmentTxnId()
3148 == other.getSegmentTxnId());
3149 }
3150 result = result &&
3151 getUnknownFields().equals(other.getUnknownFields());
3152 return result;
3153 }
3154
3155 private int memoizedHashCode = 0;
3156 @java.lang.Override
3157 public int hashCode() {
3158 if (memoizedHashCode != 0) {
3159 return memoizedHashCode;
3160 }
3161 int hash = 41;
3162 hash = (19 * hash) + getDescriptorForType().hashCode();
3163 if (hasReqInfo()) {
3164 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3165 hash = (53 * hash) + getReqInfo().hashCode();
3166 }
3167 if (hasFirstTxnId()) {
3168 hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
3169 hash = (53 * hash) + hashLong(getFirstTxnId());
3170 }
3171 if (hasNumTxns()) {
3172 hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
3173 hash = (53 * hash) + getNumTxns();
3174 }
3175 if (hasRecords()) {
3176 hash = (37 * hash) + RECORDS_FIELD_NUMBER;
3177 hash = (53 * hash) + getRecords().hashCode();
3178 }
3179 if (hasSegmentTxnId()) {
3180 hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
3181 hash = (53 * hash) + hashLong(getSegmentTxnId());
3182 }
3183 hash = (29 * hash) + getUnknownFields().hashCode();
3184 memoizedHashCode = hash;
3185 return hash;
3186 }
3187
3188 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3189 com.google.protobuf.ByteString data)
3190 throws com.google.protobuf.InvalidProtocolBufferException {
3191 return PARSER.parseFrom(data);
3192 }
3193 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3194 com.google.protobuf.ByteString data,
3195 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3196 throws com.google.protobuf.InvalidProtocolBufferException {
3197 return PARSER.parseFrom(data, extensionRegistry);
3198 }
3199 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
3200 throws com.google.protobuf.InvalidProtocolBufferException {
3201 return PARSER.parseFrom(data);
3202 }
3203 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3204 byte[] data,
3205 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3206 throws com.google.protobuf.InvalidProtocolBufferException {
3207 return PARSER.parseFrom(data, extensionRegistry);
3208 }
3209 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
3210 throws java.io.IOException {
3211 return PARSER.parseFrom(input);
3212 }
3213 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3214 java.io.InputStream input,
3215 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3216 throws java.io.IOException {
3217 return PARSER.parseFrom(input, extensionRegistry);
3218 }
3219 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
3220 throws java.io.IOException {
3221 return PARSER.parseDelimitedFrom(input);
3222 }
3223 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
3224 java.io.InputStream input,
3225 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3226 throws java.io.IOException {
3227 return PARSER.parseDelimitedFrom(input, extensionRegistry);
3228 }
3229 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3230 com.google.protobuf.CodedInputStream input)
3231 throws java.io.IOException {
3232 return PARSER.parseFrom(input);
3233 }
3234 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3235 com.google.protobuf.CodedInputStream input,
3236 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3237 throws java.io.IOException {
3238 return PARSER.parseFrom(input, extensionRegistry);
3239 }
3240
3241 public static Builder newBuilder() { return Builder.create(); }
3242 public Builder newBuilderForType() { return newBuilder(); }
3243 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
3244 return newBuilder().mergeFrom(prototype);
3245 }
3246 public Builder toBuilder() { return newBuilder(this); }
3247
3248 @java.lang.Override
3249 protected Builder newBuilderForType(
3250 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3251 Builder builder = new Builder(parent);
3252 return builder;
3253 }
3254 /**
3255 * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
3256 */
3257 public static final class Builder extends
3258 com.google.protobuf.GeneratedMessage.Builder<Builder>
3259 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
3260 public static final com.google.protobuf.Descriptors.Descriptor
3261 getDescriptor() {
3262 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3263 }
3264
3265 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3266 internalGetFieldAccessorTable() {
3267 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
3268 .ensureFieldAccessorsInitialized(
3269 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
3270 }
3271
3272 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
3273 private Builder() {
3274 maybeForceBuilderInitialization();
3275 }
3276
3277 private Builder(
3278 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3279 super(parent);
3280 maybeForceBuilderInitialization();
3281 }
3282 private void maybeForceBuilderInitialization() {
3283 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3284 getReqInfoFieldBuilder();
3285 }
3286 }
3287 private static Builder create() {
3288 return new Builder();
3289 }
3290
3291 public Builder clear() {
3292 super.clear();
3293 if (reqInfoBuilder_ == null) {
3294 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3295 } else {
3296 reqInfoBuilder_.clear();
3297 }
3298 bitField0_ = (bitField0_ & ~0x00000001);
3299 firstTxnId_ = 0L;
3300 bitField0_ = (bitField0_ & ~0x00000002);
3301 numTxns_ = 0;
3302 bitField0_ = (bitField0_ & ~0x00000004);
3303 records_ = com.google.protobuf.ByteString.EMPTY;
3304 bitField0_ = (bitField0_ & ~0x00000008);
3305 segmentTxnId_ = 0L;
3306 bitField0_ = (bitField0_ & ~0x00000010);
3307 return this;
3308 }
3309
3310 public Builder clone() {
3311 return create().mergeFrom(buildPartial());
3312 }
3313
3314 public com.google.protobuf.Descriptors.Descriptor
3315 getDescriptorForType() {
3316 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3317 }
3318
3319 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
3320 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
3321 }
3322
3323 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
3324 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
3325 if (!result.isInitialized()) {
3326 throw newUninitializedMessageException(result);
3327 }
3328 return result;
3329 }
3330
3331 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
3332 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
3333 int from_bitField0_ = bitField0_;
3334 int to_bitField0_ = 0;
3335 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3336 to_bitField0_ |= 0x00000001;
3337 }
3338 if (reqInfoBuilder_ == null) {
3339 result.reqInfo_ = reqInfo_;
3340 } else {
3341 result.reqInfo_ = reqInfoBuilder_.build();
3342 }
3343 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3344 to_bitField0_ |= 0x00000002;
3345 }
3346 result.firstTxnId_ = firstTxnId_;
3347 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3348 to_bitField0_ |= 0x00000004;
3349 }
3350 result.numTxns_ = numTxns_;
3351 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
3352 to_bitField0_ |= 0x00000008;
3353 }
3354 result.records_ = records_;
3355 if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
3356 to_bitField0_ |= 0x00000010;
3357 }
3358 result.segmentTxnId_ = segmentTxnId_;
3359 result.bitField0_ = to_bitField0_;
3360 onBuilt();
3361 return result;
3362 }
3363
3364 public Builder mergeFrom(com.google.protobuf.Message other) {
3365 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
3366 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
3367 } else {
3368 super.mergeFrom(other);
3369 return this;
3370 }
3371 }
3372
3373 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
3374 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
3375 if (other.hasReqInfo()) {
3376 mergeReqInfo(other.getReqInfo());
3377 }
3378 if (other.hasFirstTxnId()) {
3379 setFirstTxnId(other.getFirstTxnId());
3380 }
3381 if (other.hasNumTxns()) {
3382 setNumTxns(other.getNumTxns());
3383 }
3384 if (other.hasRecords()) {
3385 setRecords(other.getRecords());
3386 }
3387 if (other.hasSegmentTxnId()) {
3388 setSegmentTxnId(other.getSegmentTxnId());
3389 }
3390 this.mergeUnknownFields(other.getUnknownFields());
3391 return this;
3392 }
3393
3394 public final boolean isInitialized() {
3395 if (!hasReqInfo()) {
3396
3397 return false;
3398 }
3399 if (!hasFirstTxnId()) {
3400
3401 return false;
3402 }
3403 if (!hasNumTxns()) {
3404
3405 return false;
3406 }
3407 if (!hasRecords()) {
3408
3409 return false;
3410 }
3411 if (!hasSegmentTxnId()) {
3412
3413 return false;
3414 }
3415 if (!getReqInfo().isInitialized()) {
3416
3417 return false;
3418 }
3419 return true;
3420 }
3421
3422 public Builder mergeFrom(
3423 com.google.protobuf.CodedInputStream input,
3424 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3425 throws java.io.IOException {
3426 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null;
3427 try {
3428 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3429 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3430 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage();
3431 throw e;
3432 } finally {
3433 if (parsedMessage != null) {
3434 mergeFrom(parsedMessage);
3435 }
3436 }
3437 return this;
3438 }
3439 private int bitField0_;
3440
3441 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3442 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3443 private com.google.protobuf.SingleFieldBuilder<
3444 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3445 /**
3446 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3447 */
3448 public boolean hasReqInfo() {
3449 return ((bitField0_ & 0x00000001) == 0x00000001);
3450 }
3451 /**
3452 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3453 */
3454 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3455 if (reqInfoBuilder_ == null) {
3456 return reqInfo_;
3457 } else {
3458 return reqInfoBuilder_.getMessage();
3459 }
3460 }
3461 /**
3462 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3463 */
3464 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3465 if (reqInfoBuilder_ == null) {
3466 if (value == null) {
3467 throw new NullPointerException();
3468 }
3469 reqInfo_ = value;
3470 onChanged();
3471 } else {
3472 reqInfoBuilder_.setMessage(value);
3473 }
3474 bitField0_ |= 0x00000001;
3475 return this;
3476 }
3477 /**
3478 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3479 */
3480 public Builder setReqInfo(
3481 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3482 if (reqInfoBuilder_ == null) {
3483 reqInfo_ = builderForValue.build();
3484 onChanged();
3485 } else {
3486 reqInfoBuilder_.setMessage(builderForValue.build());
3487 }
3488 bitField0_ |= 0x00000001;
3489 return this;
3490 }
3491 /**
3492 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3493 */
3494 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3495 if (reqInfoBuilder_ == null) {
3496 if (((bitField0_ & 0x00000001) == 0x00000001) &&
3497 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3498 reqInfo_ =
3499 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3500 } else {
3501 reqInfo_ = value;
3502 }
3503 onChanged();
3504 } else {
3505 reqInfoBuilder_.mergeFrom(value);
3506 }
3507 bitField0_ |= 0x00000001;
3508 return this;
3509 }
3510 /**
3511 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3512 */
3513 public Builder clearReqInfo() {
3514 if (reqInfoBuilder_ == null) {
3515 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3516 onChanged();
3517 } else {
3518 reqInfoBuilder_.clear();
3519 }
3520 bitField0_ = (bitField0_ & ~0x00000001);
3521 return this;
3522 }
3523 /**
3524 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3525 */
3526 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3527 bitField0_ |= 0x00000001;
3528 onChanged();
3529 return getReqInfoFieldBuilder().getBuilder();
3530 }
3531 /**
3532 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3533 */
3534 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3535 if (reqInfoBuilder_ != null) {
3536 return reqInfoBuilder_.getMessageOrBuilder();
3537 } else {
3538 return reqInfo_;
3539 }
3540 }
3541 /**
3542 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3543 */
3544 private com.google.protobuf.SingleFieldBuilder<
3545 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
3546 getReqInfoFieldBuilder() {
3547 if (reqInfoBuilder_ == null) {
3548 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3549 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3550 reqInfo_,
3551 getParentForChildren(),
3552 isClean());
3553 reqInfo_ = null;
3554 }
3555 return reqInfoBuilder_;
3556 }
3557
3558 // required uint64 firstTxnId = 2;
3559 private long firstTxnId_ ;
3560 /**
3561 * <code>required uint64 firstTxnId = 2;</code>
3562 */
3563 public boolean hasFirstTxnId() {
3564 return ((bitField0_ & 0x00000002) == 0x00000002);
3565 }
3566 /**
3567 * <code>required uint64 firstTxnId = 2;</code>
3568 */
3569 public long getFirstTxnId() {
3570 return firstTxnId_;
3571 }
3572 /**
3573 * <code>required uint64 firstTxnId = 2;</code>
3574 */
3575 public Builder setFirstTxnId(long value) {
3576 bitField0_ |= 0x00000002;
3577 firstTxnId_ = value;
3578 onChanged();
3579 return this;
3580 }
3581 /**
3582 * <code>required uint64 firstTxnId = 2;</code>
3583 */
3584 public Builder clearFirstTxnId() {
3585 bitField0_ = (bitField0_ & ~0x00000002);
3586 firstTxnId_ = 0L;
3587 onChanged();
3588 return this;
3589 }
3590
3591 // required uint32 numTxns = 3;
3592 private int numTxns_ ;
3593 /**
3594 * <code>required uint32 numTxns = 3;</code>
3595 */
3596 public boolean hasNumTxns() {
3597 return ((bitField0_ & 0x00000004) == 0x00000004);
3598 }
3599 /**
3600 * <code>required uint32 numTxns = 3;</code>
3601 */
3602 public int getNumTxns() {
3603 return numTxns_;
3604 }
3605 /**
3606 * <code>required uint32 numTxns = 3;</code>
3607 */
3608 public Builder setNumTxns(int value) {
3609 bitField0_ |= 0x00000004;
3610 numTxns_ = value;
3611 onChanged();
3612 return this;
3613 }
3614 /**
3615 * <code>required uint32 numTxns = 3;</code>
3616 */
3617 public Builder clearNumTxns() {
3618 bitField0_ = (bitField0_ & ~0x00000004);
3619 numTxns_ = 0;
3620 onChanged();
3621 return this;
3622 }
3623
3624 // required bytes records = 4;
3625 private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
3626 /**
3627 * <code>required bytes records = 4;</code>
3628 */
3629 public boolean hasRecords() {
3630 return ((bitField0_ & 0x00000008) == 0x00000008);
3631 }
3632 /**
3633 * <code>required bytes records = 4;</code>
3634 */
3635 public com.google.protobuf.ByteString getRecords() {
3636 return records_;
3637 }
3638 /**
3639 * <code>required bytes records = 4;</code>
3640 */
3641 public Builder setRecords(com.google.protobuf.ByteString value) {
3642 if (value == null) {
3643 throw new NullPointerException();
3644 }
3645 bitField0_ |= 0x00000008;
3646 records_ = value;
3647 onChanged();
3648 return this;
3649 }
3650 /**
3651 * <code>required bytes records = 4;</code>
3652 */
3653 public Builder clearRecords() {
3654 bitField0_ = (bitField0_ & ~0x00000008);
3655 records_ = getDefaultInstance().getRecords();
3656 onChanged();
3657 return this;
3658 }
3659
3660 // required uint64 segmentTxnId = 5;
3661 private long segmentTxnId_ ;
3662 /**
3663 * <code>required uint64 segmentTxnId = 5;</code>
3664 */
3665 public boolean hasSegmentTxnId() {
3666 return ((bitField0_ & 0x00000010) == 0x00000010);
3667 }
3668 /**
3669 * <code>required uint64 segmentTxnId = 5;</code>
3670 */
3671 public long getSegmentTxnId() {
3672 return segmentTxnId_;
3673 }
3674 /**
3675 * <code>required uint64 segmentTxnId = 5;</code>
3676 */
3677 public Builder setSegmentTxnId(long value) {
3678 bitField0_ |= 0x00000010;
3679 segmentTxnId_ = value;
3680 onChanged();
3681 return this;
3682 }
3683 /**
3684 * <code>required uint64 segmentTxnId = 5;</code>
3685 */
3686 public Builder clearSegmentTxnId() {
3687 bitField0_ = (bitField0_ & ~0x00000010);
3688 segmentTxnId_ = 0L;
3689 onChanged();
3690 return this;
3691 }
3692
3693 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalRequestProto)
3694 }
3695
3696 static {
3697 defaultInstance = new JournalRequestProto(true);
3698 defaultInstance.initFields();
3699 }
3700
3701 // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalRequestProto)
3702 }
3703
3704 public interface JournalResponseProtoOrBuilder
3705 extends com.google.protobuf.MessageOrBuilder {
3706 }
3707 /**
3708 * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3709 */
3710 public static final class JournalResponseProto extends
3711 com.google.protobuf.GeneratedMessage
3712 implements JournalResponseProtoOrBuilder {
3713 // Use JournalResponseProto.newBuilder() to construct.
3714 private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3715 super(builder);
3716 this.unknownFields = builder.getUnknownFields();
3717 }
3718 private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3719
3720 private static final JournalResponseProto defaultInstance;
3721 public static JournalResponseProto getDefaultInstance() {
3722 return defaultInstance;
3723 }
3724
3725 public JournalResponseProto getDefaultInstanceForType() {
3726 return defaultInstance;
3727 }
3728
3729 private final com.google.protobuf.UnknownFieldSet unknownFields;
3730 @java.lang.Override
3731 public final com.google.protobuf.UnknownFieldSet
3732 getUnknownFields() {
3733 return this.unknownFields;
3734 }
3735 private JournalResponseProto(
3736 com.google.protobuf.CodedInputStream input,
3737 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3738 throws com.google.protobuf.InvalidProtocolBufferException {
3739 initFields();
3740 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3741 com.google.protobuf.UnknownFieldSet.newBuilder();
3742 try {
3743 boolean done = false;
3744 while (!done) {
3745 int tag = input.readTag();
3746 switch (tag) {
3747 case 0:
3748 done = true;
3749 break;
3750 default: {
3751 if (!parseUnknownField(input, unknownFields,
3752 extensionRegistry, tag)) {
3753 done = true;
3754 }
3755 break;
3756 }
3757 }
3758 }
3759 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3760 throw e.setUnfinishedMessage(this);
3761 } catch (java.io.IOException e) {
3762 throw new com.google.protobuf.InvalidProtocolBufferException(
3763 e.getMessage()).setUnfinishedMessage(this);
3764 } finally {
3765 this.unknownFields = unknownFields.build();
3766 makeExtensionsImmutable();
3767 }
3768 }
3769 public static final com.google.protobuf.Descriptors.Descriptor
3770 getDescriptor() {
3771 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3772 }
3773
3774 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775 internalGetFieldAccessorTable() {
3776 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3777 .ensureFieldAccessorsInitialized(
3778 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3779 }
3780
3781 public static com.google.protobuf.Parser<JournalResponseProto> PARSER =
3782 new com.google.protobuf.AbstractParser<JournalResponseProto>() {
3783 public JournalResponseProto parsePartialFrom(
3784 com.google.protobuf.CodedInputStream input,
3785 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3786 throws com.google.protobuf.InvalidProtocolBufferException {
3787 return new JournalResponseProto(input, extensionRegistry);
3788 }
3789 };
3790
3791 @java.lang.Override
3792 public com.google.protobuf.Parser<JournalResponseProto> getParserForType() {
3793 return PARSER;
3794 }
3795
3796 private void initFields() {
3797 }
3798 private byte memoizedIsInitialized = -1;
3799 public final boolean isInitialized() {
3800 byte isInitialized = memoizedIsInitialized;
3801 if (isInitialized != -1) return isInitialized == 1;
3802
3803 memoizedIsInitialized = 1;
3804 return true;
3805 }
3806
3807 public void writeTo(com.google.protobuf.CodedOutputStream output)
3808 throws java.io.IOException {
3809 getSerializedSize();
3810 getUnknownFields().writeTo(output);
3811 }
3812
3813 private int memoizedSerializedSize = -1;
3814 public int getSerializedSize() {
3815 int size = memoizedSerializedSize;
3816 if (size != -1) return size;
3817
3818 size = 0;
3819 size += getUnknownFields().getSerializedSize();
3820 memoizedSerializedSize = size;
3821 return size;
3822 }
3823
3824 private static final long serialVersionUID = 0L;
3825 @java.lang.Override
3826 protected java.lang.Object writeReplace()
3827 throws java.io.ObjectStreamException {
3828 return super.writeReplace();
3829 }
3830
3831 @java.lang.Override
3832 public boolean equals(final java.lang.Object obj) {
3833 if (obj == this) {
3834 return true;
3835 }
3836 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3837 return super.equals(obj);
3838 }
3839 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3840
3841 boolean result = true;
3842 result = result &&
3843 getUnknownFields().equals(other.getUnknownFields());
3844 return result;
3845 }
3846
3847 private int memoizedHashCode = 0;
3848 @java.lang.Override
3849 public int hashCode() {
3850 if (memoizedHashCode != 0) {
3851 return memoizedHashCode;
3852 }
3853 int hash = 41;
3854 hash = (19 * hash) + getDescriptorForType().hashCode();
3855 hash = (29 * hash) + getUnknownFields().hashCode();
3856 memoizedHashCode = hash;
3857 return hash;
3858 }
3859
3860 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3861 com.google.protobuf.ByteString data)
3862 throws com.google.protobuf.InvalidProtocolBufferException {
3863 return PARSER.parseFrom(data);
3864 }
3865 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3866 com.google.protobuf.ByteString data,
3867 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3868 throws com.google.protobuf.InvalidProtocolBufferException {
3869 return PARSER.parseFrom(data, extensionRegistry);
3870 }
3871 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3872 throws com.google.protobuf.InvalidProtocolBufferException {
3873 return PARSER.parseFrom(data);
3874 }
3875 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3876 byte[] data,
3877 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3878 throws com.google.protobuf.InvalidProtocolBufferException {
3879 return PARSER.parseFrom(data, extensionRegistry);
3880 }
3881 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3882 throws java.io.IOException {
3883 return PARSER.parseFrom(input);
3884 }
3885 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3886 java.io.InputStream input,
3887 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3888 throws java.io.IOException {
3889 return PARSER.parseFrom(input, extensionRegistry);
3890 }
3891 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3892 throws java.io.IOException {
3893 return PARSER.parseDelimitedFrom(input);
3894 }
3895 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3896 java.io.InputStream input,
3897 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3898 throws java.io.IOException {
3899 return PARSER.parseDelimitedFrom(input, extensionRegistry);
3900 }
3901 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3902 com.google.protobuf.CodedInputStream input)
3903 throws java.io.IOException {
3904 return PARSER.parseFrom(input);
3905 }
3906 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3907 com.google.protobuf.CodedInputStream input,
3908 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3909 throws java.io.IOException {
3910 return PARSER.parseFrom(input, extensionRegistry);
3911 }
3912
3913 public static Builder newBuilder() { return Builder.create(); }
3914 public Builder newBuilderForType() { return newBuilder(); }
3915 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3916 return newBuilder().mergeFrom(prototype);
3917 }
3918 public Builder toBuilder() { return newBuilder(this); }
3919
3920 @java.lang.Override
3921 protected Builder newBuilderForType(
3922 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3923 Builder builder = new Builder(parent);
3924 return builder;
3925 }
3926 /**
3927 * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3928 */
3929 public static final class Builder extends
3930 com.google.protobuf.GeneratedMessage.Builder<Builder>
3931 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3932 public static final com.google.protobuf.Descriptors.Descriptor
3933 getDescriptor() {
3934 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3935 }
3936
3937 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3938 internalGetFieldAccessorTable() {
3939 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3940 .ensureFieldAccessorsInitialized(
3941 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3942 }
3943
3944 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3945 private Builder() {
3946 maybeForceBuilderInitialization();
3947 }
3948
3949 private Builder(
3950 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3951 super(parent);
3952 maybeForceBuilderInitialization();
3953 }
3954 private void maybeForceBuilderInitialization() {
3955 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3956 }
3957 }
3958 private static Builder create() {
3959 return new Builder();
3960 }
3961
3962 public Builder clear() {
3963 super.clear();
3964 return this;
3965 }
3966
3967 public Builder clone() {
3968 return create().mergeFrom(buildPartial());
3969 }
3970
3971 public com.google.protobuf.Descriptors.Descriptor
3972 getDescriptorForType() {
3973 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3974 }
3975
3976 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3977 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3978 }
3979
3980 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3981 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3982 if (!result.isInitialized()) {
3983 throw newUninitializedMessageException(result);
3984 }
3985 return result;
3986 }
3987
3988 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3989 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3990 onBuilt();
3991 return result;
3992 }
3993
3994 public Builder mergeFrom(com.google.protobuf.Message other) {
3995 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3996 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3997 } else {
3998 super.mergeFrom(other);
3999 return this;
4000 }
4001 }
4002
4003 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
4004 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
4005 this.mergeUnknownFields(other.getUnknownFields());
4006 return this;
4007 }
4008
4009 public final boolean isInitialized() {
4010 return true;
4011 }
4012
4013 public Builder mergeFrom(
4014 com.google.protobuf.CodedInputStream input,
4015 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4016 throws java.io.IOException {
4017 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null;
4018 try {
4019 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4020 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4021 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage();
4022 throw e;
4023 } finally {
4024 if (parsedMessage != null) {
4025 mergeFrom(parsedMessage);
4026 }
4027 }
4028 return this;
4029 }
4030
4031 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalResponseProto)
4032 }
4033
4034 static {
4035 defaultInstance = new JournalResponseProto(true);
4036 defaultInstance.initFields();
4037 }
4038
4039 // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalResponseProto)
4040 }
4041
4042 public interface HeartbeatRequestProtoOrBuilder
4043 extends com.google.protobuf.MessageOrBuilder {
4044
4045 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4046 /**
4047 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4048 */
4049 boolean hasReqInfo();
4050 /**
4051 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4052 */
4053 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4054 /**
4055 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4056 */
4057 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4058 }
4059 /**
4060 * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4061 */
4062 public static final class HeartbeatRequestProto extends
4063 com.google.protobuf.GeneratedMessage
4064 implements HeartbeatRequestProtoOrBuilder {
4065 // Use HeartbeatRequestProto.newBuilder() to construct.
4066 private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4067 super(builder);
4068 this.unknownFields = builder.getUnknownFields();
4069 }
4070 private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4071
4072 private static final HeartbeatRequestProto defaultInstance;
4073 public static HeartbeatRequestProto getDefaultInstance() {
4074 return defaultInstance;
4075 }
4076
4077 public HeartbeatRequestProto getDefaultInstanceForType() {
4078 return defaultInstance;
4079 }
4080
4081 private final com.google.protobuf.UnknownFieldSet unknownFields;
4082 @java.lang.Override
4083 public final com.google.protobuf.UnknownFieldSet
4084 getUnknownFields() {
4085 return this.unknownFields;
4086 }
4087 private HeartbeatRequestProto(
4088 com.google.protobuf.CodedInputStream input,
4089 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4090 throws com.google.protobuf.InvalidProtocolBufferException {
4091 initFields();
4092 int mutable_bitField0_ = 0;
4093 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4094 com.google.protobuf.UnknownFieldSet.newBuilder();
4095 try {
4096 boolean done = false;
4097 while (!done) {
4098 int tag = input.readTag();
4099 switch (tag) {
4100 case 0:
4101 done = true;
4102 break;
4103 default: {
4104 if (!parseUnknownField(input, unknownFields,
4105 extensionRegistry, tag)) {
4106 done = true;
4107 }
4108 break;
4109 }
4110 case 10: {
4111 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
4112 if (((bitField0_ & 0x00000001) == 0x00000001)) {
4113 subBuilder = reqInfo_.toBuilder();
4114 }
4115 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
4116 if (subBuilder != null) {
4117 subBuilder.mergeFrom(reqInfo_);
4118 reqInfo_ = subBuilder.buildPartial();
4119 }
4120 bitField0_ |= 0x00000001;
4121 break;
4122 }
4123 }
4124 }
4125 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4126 throw e.setUnfinishedMessage(this);
4127 } catch (java.io.IOException e) {
4128 throw new com.google.protobuf.InvalidProtocolBufferException(
4129 e.getMessage()).setUnfinishedMessage(this);
4130 } finally {
4131 this.unknownFields = unknownFields.build();
4132 makeExtensionsImmutable();
4133 }
4134 }
4135 public static final com.google.protobuf.Descriptors.Descriptor
4136 getDescriptor() {
4137 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4138 }
4139
4140 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4141 internalGetFieldAccessorTable() {
4142 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4143 .ensureFieldAccessorsInitialized(
4144 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4145 }
4146
4147 public static com.google.protobuf.Parser<HeartbeatRequestProto> PARSER =
4148 new com.google.protobuf.AbstractParser<HeartbeatRequestProto>() {
4149 public HeartbeatRequestProto parsePartialFrom(
4150 com.google.protobuf.CodedInputStream input,
4151 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4152 throws com.google.protobuf.InvalidProtocolBufferException {
4153 return new HeartbeatRequestProto(input, extensionRegistry);
4154 }
4155 };
4156
4157 @java.lang.Override
4158 public com.google.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
4159 return PARSER;
4160 }
4161
4162 private int bitField0_;
4163 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4164 public static final int REQINFO_FIELD_NUMBER = 1;
4165 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4166 /**
4167 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4168 */
4169 public boolean hasReqInfo() {
4170 return ((bitField0_ & 0x00000001) == 0x00000001);
4171 }
4172 /**
4173 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4174 */
4175 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4176 return reqInfo_;
4177 }
4178 /**
4179 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4180 */
4181 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4182 return reqInfo_;
4183 }
4184
4185 private void initFields() {
4186 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4187 }
4188 private byte memoizedIsInitialized = -1;
4189 public final boolean isInitialized() {
4190 byte isInitialized = memoizedIsInitialized;
4191 if (isInitialized != -1) return isInitialized == 1;
4192
4193 if (!hasReqInfo()) {
4194 memoizedIsInitialized = 0;
4195 return false;
4196 }
4197 if (!getReqInfo().isInitialized()) {
4198 memoizedIsInitialized = 0;
4199 return false;
4200 }
4201 memoizedIsInitialized = 1;
4202 return true;
4203 }
4204
4205 public void writeTo(com.google.protobuf.CodedOutputStream output)
4206 throws java.io.IOException {
4207 getSerializedSize();
4208 if (((bitField0_ & 0x00000001) == 0x00000001)) {
4209 output.writeMessage(1, reqInfo_);
4210 }
4211 getUnknownFields().writeTo(output);
4212 }
4213
4214 private int memoizedSerializedSize = -1;
4215 public int getSerializedSize() {
4216 int size = memoizedSerializedSize;
4217 if (size != -1) return size;
4218
4219 size = 0;
4220 if (((bitField0_ & 0x00000001) == 0x00000001)) {
4221 size += com.google.protobuf.CodedOutputStream
4222 .computeMessageSize(1, reqInfo_);
4223 }
4224 size += getUnknownFields().getSerializedSize();
4225 memoizedSerializedSize = size;
4226 return size;
4227 }
4228
4229 private static final long serialVersionUID = 0L;
4230 @java.lang.Override
4231 protected java.lang.Object writeReplace()
4232 throws java.io.ObjectStreamException {
4233 return super.writeReplace();
4234 }
4235
4236 @java.lang.Override
4237 public boolean equals(final java.lang.Object obj) {
4238 if (obj == this) {
4239 return true;
4240 }
4241 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
4242 return super.equals(obj);
4243 }
4244 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
4245
4246 boolean result = true;
4247 result = result && (hasReqInfo() == other.hasReqInfo());
4248 if (hasReqInfo()) {
4249 result = result && getReqInfo()
4250 .equals(other.getReqInfo());
4251 }
4252 result = result &&
4253 getUnknownFields().equals(other.getUnknownFields());
4254 return result;
4255 }
4256
4257 private int memoizedHashCode = 0;
4258 @java.lang.Override
4259 public int hashCode() {
4260 if (memoizedHashCode != 0) {
4261 return memoizedHashCode;
4262 }
4263 int hash = 41;
4264 hash = (19 * hash) + getDescriptorForType().hashCode();
4265 if (hasReqInfo()) {
4266 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4267 hash = (53 * hash) + getReqInfo().hashCode();
4268 }
4269 hash = (29 * hash) + getUnknownFields().hashCode();
4270 memoizedHashCode = hash;
4271 return hash;
4272 }
4273
4274 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4275 com.google.protobuf.ByteString data)
4276 throws com.google.protobuf.InvalidProtocolBufferException {
4277 return PARSER.parseFrom(data);
4278 }
4279 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4280 com.google.protobuf.ByteString data,
4281 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4282 throws com.google.protobuf.InvalidProtocolBufferException {
4283 return PARSER.parseFrom(data, extensionRegistry);
4284 }
4285 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
4286 throws com.google.protobuf.InvalidProtocolBufferException {
4287 return PARSER.parseFrom(data);
4288 }
4289 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4290 byte[] data,
4291 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4292 throws com.google.protobuf.InvalidProtocolBufferException {
4293 return PARSER.parseFrom(data, extensionRegistry);
4294 }
4295 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
4296 throws java.io.IOException {
4297 return PARSER.parseFrom(input);
4298 }
4299 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4300 java.io.InputStream input,
4301 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4302 throws java.io.IOException {
4303 return PARSER.parseFrom(input, extensionRegistry);
4304 }
4305 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
4306 throws java.io.IOException {
4307 return PARSER.parseDelimitedFrom(input);
4308 }
4309 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
4310 java.io.InputStream input,
4311 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4312 throws java.io.IOException {
4313 return PARSER.parseDelimitedFrom(input, extensionRegistry);
4314 }
4315 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4316 com.google.protobuf.CodedInputStream input)
4317 throws java.io.IOException {
4318 return PARSER.parseFrom(input);
4319 }
4320 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4321 com.google.protobuf.CodedInputStream input,
4322 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4323 throws java.io.IOException {
4324 return PARSER.parseFrom(input, extensionRegistry);
4325 }
4326
4327 public static Builder newBuilder() { return Builder.create(); }
4328 public Builder newBuilderForType() { return newBuilder(); }
4329 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
4330 return newBuilder().mergeFrom(prototype);
4331 }
4332 public Builder toBuilder() { return newBuilder(this); }
4333
4334 @java.lang.Override
4335 protected Builder newBuilderForType(
4336 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4337 Builder builder = new Builder(parent);
4338 return builder;
4339 }
4340 /**
4341 * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4342 */
4343 public static final class Builder extends
4344 com.google.protobuf.GeneratedMessage.Builder<Builder>
4345 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
4346 public static final com.google.protobuf.Descriptors.Descriptor
4347 getDescriptor() {
4348 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4349 }
4350
4351 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4352 internalGetFieldAccessorTable() {
4353 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4354 .ensureFieldAccessorsInitialized(
4355 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4356 }
4357
4358 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
4359 private Builder() {
4360 maybeForceBuilderInitialization();
4361 }
4362
4363 private Builder(
4364 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4365 super(parent);
4366 maybeForceBuilderInitialization();
4367 }
4368 private void maybeForceBuilderInitialization() {
4369 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4370 getReqInfoFieldBuilder();
4371 }
4372 }
4373 private static Builder create() {
4374 return new Builder();
4375 }
4376
4377 public Builder clear() {
4378 super.clear();
4379 if (reqInfoBuilder_ == null) {
4380 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4381 } else {
4382 reqInfoBuilder_.clear();
4383 }
4384 bitField0_ = (bitField0_ & ~0x00000001);
4385 return this;
4386 }
4387
4388 public Builder clone() {
4389 return create().mergeFrom(buildPartial());
4390 }
4391
4392 public com.google.protobuf.Descriptors.Descriptor
4393 getDescriptorForType() {
4394 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4395 }
4396
4397 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
4398 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
4399 }
4400
4401 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
4402 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
4403 if (!result.isInitialized()) {
4404 throw newUninitializedMessageException(result);
4405 }
4406 return result;
4407 }
4408
4409 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
4410 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
4411 int from_bitField0_ = bitField0_;
4412 int to_bitField0_ = 0;
4413 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4414 to_bitField0_ |= 0x00000001;
4415 }
4416 if (reqInfoBuilder_ == null) {
4417 result.reqInfo_ = reqInfo_;
4418 } else {
4419 result.reqInfo_ = reqInfoBuilder_.build();
4420 }
4421 result.bitField0_ = to_bitField0_;
4422 onBuilt();
4423 return result;
4424 }
4425
4426 public Builder mergeFrom(com.google.protobuf.Message other) {
4427 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
4428 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
4429 } else {
4430 super.mergeFrom(other);
4431 return this;
4432 }
4433 }
4434
4435 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
4436 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
4437 if (other.hasReqInfo()) {
4438 mergeReqInfo(other.getReqInfo());
4439 }
4440 this.mergeUnknownFields(other.getUnknownFields());
4441 return this;
4442 }
4443
4444 public final boolean isInitialized() {
4445 if (!hasReqInfo()) {
4446
4447 return false;
4448 }
4449 if (!getReqInfo().isInitialized()) {
4450
4451 return false;
4452 }
4453 return true;
4454 }
4455
4456 public Builder mergeFrom(
4457 com.google.protobuf.CodedInputStream input,
4458 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4459 throws java.io.IOException {
4460 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null;
4461 try {
4462 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4463 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4464 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
4465 throw e;
4466 } finally {
4467 if (parsedMessage != null) {
4468 mergeFrom(parsedMessage);
4469 }
4470 }
4471 return this;
4472 }
4473 private int bitField0_;
4474
4475 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4476 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4477 private com.google.protobuf.SingleFieldBuilder<
4478 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4479 /**
4480 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4481 */
4482 public boolean hasReqInfo() {
4483 return ((bitField0_ & 0x00000001) == 0x00000001);
4484 }
4485 /**
4486 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4487 */
4488 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4489 if (reqInfoBuilder_ == null) {
4490 return reqInfo_;
4491 } else {
4492 return reqInfoBuilder_.getMessage();
4493 }
4494 }
4495 /**
4496 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4497 */
4498 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4499 if (reqInfoBuilder_ == null) {
4500 if (value == null) {
4501 throw new NullPointerException();
4502 }
4503 reqInfo_ = value;
4504 onChanged();
4505 } else {
4506 reqInfoBuilder_.setMessage(value);
4507 }
4508 bitField0_ |= 0x00000001;
4509 return this;
4510 }
4511 /**
4512 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4513 */
4514 public Builder setReqInfo(
4515 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4516 if (reqInfoBuilder_ == null) {
4517 reqInfo_ = builderForValue.build();
4518 onChanged();
4519 } else {
4520 reqInfoBuilder_.setMessage(builderForValue.build());
4521 }
4522 bitField0_ |= 0x00000001;
4523 return this;
4524 }
4525 /**
4526 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4527 */
4528 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4529 if (reqInfoBuilder_ == null) {
4530 if (((bitField0_ & 0x00000001) == 0x00000001) &&
4531 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4532 reqInfo_ =
4533 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4534 } else {
4535 reqInfo_ = value;
4536 }
4537 onChanged();
4538 } else {
4539 reqInfoBuilder_.mergeFrom(value);
4540 }
4541 bitField0_ |= 0x00000001;
4542 return this;
4543 }
4544 /**
4545 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4546 */
4547 public Builder clearReqInfo() {
4548 if (reqInfoBuilder_ == null) {
4549 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4550 onChanged();
4551 } else {
4552 reqInfoBuilder_.clear();
4553 }
4554 bitField0_ = (bitField0_ & ~0x00000001);
4555 return this;
4556 }
4557 /**
4558 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4559 */
4560 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4561 bitField0_ |= 0x00000001;
4562 onChanged();
4563 return getReqInfoFieldBuilder().getBuilder();
4564 }
4565 /**
4566 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4567 */
4568 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4569 if (reqInfoBuilder_ != null) {
4570 return reqInfoBuilder_.getMessageOrBuilder();
4571 } else {
4572 return reqInfo_;
4573 }
4574 }
4575 /**
4576 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4577 */
4578 private com.google.protobuf.SingleFieldBuilder<
4579 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
4580 getReqInfoFieldBuilder() {
4581 if (reqInfoBuilder_ == null) {
4582 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4583 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4584 reqInfo_,
4585 getParentForChildren(),
4586 isClean());
4587 reqInfo_ = null;
4588 }
4589 return reqInfoBuilder_;
4590 }
4591
4592 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatRequestProto)
4593 }
4594
4595 static {
4596 defaultInstance = new HeartbeatRequestProto(true);
4597 defaultInstance.initFields();
4598 }
4599
4600 // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatRequestProto)
4601 }
4602
4603 public interface HeartbeatResponseProtoOrBuilder
4604 extends com.google.protobuf.MessageOrBuilder {
4605 }
4606 /**
4607 * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4608 *
4609 * <pre>
4610 * void response
4611 * </pre>
4612 */
4613 public static final class HeartbeatResponseProto extends
4614 com.google.protobuf.GeneratedMessage
4615 implements HeartbeatResponseProtoOrBuilder {
4616 // Use HeartbeatResponseProto.newBuilder() to construct.
4617 private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4618 super(builder);
4619 this.unknownFields = builder.getUnknownFields();
4620 }
4621 private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4622
4623 private static final HeartbeatResponseProto defaultInstance;
4624 public static HeartbeatResponseProto getDefaultInstance() {
4625 return defaultInstance;
4626 }
4627
4628 public HeartbeatResponseProto getDefaultInstanceForType() {
4629 return defaultInstance;
4630 }
4631
4632 private final com.google.protobuf.UnknownFieldSet unknownFields;
4633 @java.lang.Override
4634 public final com.google.protobuf.UnknownFieldSet
4635 getUnknownFields() {
4636 return this.unknownFields;
4637 }
4638 private HeartbeatResponseProto(
4639 com.google.protobuf.CodedInputStream input,
4640 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4641 throws com.google.protobuf.InvalidProtocolBufferException {
4642 initFields();
4643 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4644 com.google.protobuf.UnknownFieldSet.newBuilder();
4645 try {
4646 boolean done = false;
4647 while (!done) {
4648 int tag = input.readTag();
4649 switch (tag) {
4650 case 0:
4651 done = true;
4652 break;
4653 default: {
4654 if (!parseUnknownField(input, unknownFields,
4655 extensionRegistry, tag)) {
4656 done = true;
4657 }
4658 break;
4659 }
4660 }
4661 }
4662 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4663 throw e.setUnfinishedMessage(this);
4664 } catch (java.io.IOException e) {
4665 throw new com.google.protobuf.InvalidProtocolBufferException(
4666 e.getMessage()).setUnfinishedMessage(this);
4667 } finally {
4668 this.unknownFields = unknownFields.build();
4669 makeExtensionsImmutable();
4670 }
4671 }
4672 public static final com.google.protobuf.Descriptors.Descriptor
4673 getDescriptor() {
4674 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4675 }
4676
4677 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4678 internalGetFieldAccessorTable() {
4679 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4680 .ensureFieldAccessorsInitialized(
4681 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4682 }
4683
4684 public static com.google.protobuf.Parser<HeartbeatResponseProto> PARSER =
4685 new com.google.protobuf.AbstractParser<HeartbeatResponseProto>() {
4686 public HeartbeatResponseProto parsePartialFrom(
4687 com.google.protobuf.CodedInputStream input,
4688 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689 throws com.google.protobuf.InvalidProtocolBufferException {
4690 return new HeartbeatResponseProto(input, extensionRegistry);
4691 }
4692 };
4693
4694 @java.lang.Override
4695 public com.google.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
4696 return PARSER;
4697 }
4698
4699 private void initFields() {
4700 }
4701 private byte memoizedIsInitialized = -1;
4702 public final boolean isInitialized() {
4703 byte isInitialized = memoizedIsInitialized;
4704 if (isInitialized != -1) return isInitialized == 1;
4705
4706 memoizedIsInitialized = 1;
4707 return true;
4708 }
4709
4710 public void writeTo(com.google.protobuf.CodedOutputStream output)
4711 throws java.io.IOException {
4712 getSerializedSize();
4713 getUnknownFields().writeTo(output);
4714 }
4715
4716 private int memoizedSerializedSize = -1;
4717 public int getSerializedSize() {
4718 int size = memoizedSerializedSize;
4719 if (size != -1) return size;
4720
4721 size = 0;
4722 size += getUnknownFields().getSerializedSize();
4723 memoizedSerializedSize = size;
4724 return size;
4725 }
4726
4727 private static final long serialVersionUID = 0L;
4728 @java.lang.Override
4729 protected java.lang.Object writeReplace()
4730 throws java.io.ObjectStreamException {
4731 return super.writeReplace();
4732 }
4733
4734 @java.lang.Override
4735 public boolean equals(final java.lang.Object obj) {
4736 if (obj == this) {
4737 return true;
4738 }
4739 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
4740 return super.equals(obj);
4741 }
4742 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
4743
4744 boolean result = true;
4745 result = result &&
4746 getUnknownFields().equals(other.getUnknownFields());
4747 return result;
4748 }
4749
4750 private int memoizedHashCode = 0;
4751 @java.lang.Override
4752 public int hashCode() {
4753 if (memoizedHashCode != 0) {
4754 return memoizedHashCode;
4755 }
4756 int hash = 41;
4757 hash = (19 * hash) + getDescriptorForType().hashCode();
4758 hash = (29 * hash) + getUnknownFields().hashCode();
4759 memoizedHashCode = hash;
4760 return hash;
4761 }
4762
4763 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4764 com.google.protobuf.ByteString data)
4765 throws com.google.protobuf.InvalidProtocolBufferException {
4766 return PARSER.parseFrom(data);
4767 }
4768 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4769 com.google.protobuf.ByteString data,
4770 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4771 throws com.google.protobuf.InvalidProtocolBufferException {
4772 return PARSER.parseFrom(data, extensionRegistry);
4773 }
4774 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
4775 throws com.google.protobuf.InvalidProtocolBufferException {
4776 return PARSER.parseFrom(data);
4777 }
4778 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4779 byte[] data,
4780 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4781 throws com.google.protobuf.InvalidProtocolBufferException {
4782 return PARSER.parseFrom(data, extensionRegistry);
4783 }
4784 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
4785 throws java.io.IOException {
4786 return PARSER.parseFrom(input);
4787 }
4788 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4789 java.io.InputStream input,
4790 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4791 throws java.io.IOException {
4792 return PARSER.parseFrom(input, extensionRegistry);
4793 }
4794 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
4795 throws java.io.IOException {
4796 return PARSER.parseDelimitedFrom(input);
4797 }
4798 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
4799 java.io.InputStream input,
4800 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4801 throws java.io.IOException {
4802 return PARSER.parseDelimitedFrom(input, extensionRegistry);
4803 }
4804 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4805 com.google.protobuf.CodedInputStream input)
4806 throws java.io.IOException {
4807 return PARSER.parseFrom(input);
4808 }
4809 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4810 com.google.protobuf.CodedInputStream input,
4811 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4812 throws java.io.IOException {
4813 return PARSER.parseFrom(input, extensionRegistry);
4814 }
4815
4816 public static Builder newBuilder() { return Builder.create(); }
4817 public Builder newBuilderForType() { return newBuilder(); }
4818 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
4819 return newBuilder().mergeFrom(prototype);
4820 }
4821 public Builder toBuilder() { return newBuilder(this); }
4822
4823 @java.lang.Override
4824 protected Builder newBuilderForType(
4825 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4826 Builder builder = new Builder(parent);
4827 return builder;
4828 }
4829 /**
4830 * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4831 *
4832 * <pre>
4833 * void response
4834 * </pre>
4835 */
4836 public static final class Builder extends
4837 com.google.protobuf.GeneratedMessage.Builder<Builder>
4838 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
4839 public static final com.google.protobuf.Descriptors.Descriptor
4840 getDescriptor() {
4841 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4842 }
4843
4844 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4845 internalGetFieldAccessorTable() {
4846 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4847 .ensureFieldAccessorsInitialized(
4848 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4849 }
4850
4851 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
4852 private Builder() {
4853 maybeForceBuilderInitialization();
4854 }
4855
4856 private Builder(
4857 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4858 super(parent);
4859 maybeForceBuilderInitialization();
4860 }
4861 private void maybeForceBuilderInitialization() {
4862 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4863 }
4864 }
4865 private static Builder create() {
4866 return new Builder();
4867 }
4868
4869 public Builder clear() {
4870 super.clear();
4871 return this;
4872 }
4873
4874 public Builder clone() {
4875 return create().mergeFrom(buildPartial());
4876 }
4877
4878 public com.google.protobuf.Descriptors.Descriptor
4879 getDescriptorForType() {
4880 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4881 }
4882
4883 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
4884 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
4885 }
4886
4887 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
4888 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
4889 if (!result.isInitialized()) {
4890 throw newUninitializedMessageException(result);
4891 }
4892 return result;
4893 }
4894
4895 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
4896 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
4897 onBuilt();
4898 return result;
4899 }
4900
4901 public Builder mergeFrom(com.google.protobuf.Message other) {
4902 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
4903 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
4904 } else {
4905 super.mergeFrom(other);
4906 return this;
4907 }
4908 }
4909
4910 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
4911 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
4912 this.mergeUnknownFields(other.getUnknownFields());
4913 return this;
4914 }
4915
4916 public final boolean isInitialized() {
4917 return true;
4918 }
4919
4920 public Builder mergeFrom(
4921 com.google.protobuf.CodedInputStream input,
4922 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4923 throws java.io.IOException {
4924 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null;
4925 try {
4926 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4927 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4928 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
4929 throw e;
4930 } finally {
4931 if (parsedMessage != null) {
4932 mergeFrom(parsedMessage);
4933 }
4934 }
4935 return this;
4936 }
4937
4938 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatResponseProto)
4939 }
4940
4941 static {
4942 defaultInstance = new HeartbeatResponseProto(true);
4943 defaultInstance.initFields();
4944 }
4945
4946 // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatResponseProto)
4947 }
4948
4949 public interface StartLogSegmentRequestProtoOrBuilder
4950 extends com.google.protobuf.MessageOrBuilder {
4951
4952 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4953 /**
4954 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4955 */
4956 boolean hasReqInfo();
4957 /**
4958 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4959 */
4960 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4961 /**
4962 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4963 */
4964 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4965
4966 // required uint64 txid = 2;
4967 /**
4968 * <code>required uint64 txid = 2;</code>
4969 *
4970 * <pre>
4971 * Transaction ID
4972 * </pre>
4973 */
4974 boolean hasTxid();
4975 /**
4976 * <code>required uint64 txid = 2;</code>
4977 *
4978 * <pre>
4979 * Transaction ID
4980 * </pre>
4981 */
4982 long getTxid();
4983 }
4984 /**
4985 * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
4986 *
4987 * <pre>
4988 **
4989 * startLogSegment()
4990 * </pre>
4991 */
4992 public static final class StartLogSegmentRequestProto extends
4993 com.google.protobuf.GeneratedMessage
4994 implements StartLogSegmentRequestProtoOrBuilder {
4995 // Use StartLogSegmentRequestProto.newBuilder() to construct.
4996 private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4997 super(builder);
4998 this.unknownFields = builder.getUnknownFields();
4999 }
5000 private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5001
5002 private static final StartLogSegmentRequestProto defaultInstance;
5003 public static StartLogSegmentRequestProto getDefaultInstance() {
5004 return defaultInstance;
5005 }
5006
5007 public StartLogSegmentRequestProto getDefaultInstanceForType() {
5008 return defaultInstance;
5009 }
5010
5011 private final com.google.protobuf.UnknownFieldSet unknownFields;
5012 @java.lang.Override
5013 public final com.google.protobuf.UnknownFieldSet
5014 getUnknownFields() {
5015 return this.unknownFields;
5016 }
5017 private StartLogSegmentRequestProto(
5018 com.google.protobuf.CodedInputStream input,
5019 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5020 throws com.google.protobuf.InvalidProtocolBufferException {
5021 initFields();
5022 int mutable_bitField0_ = 0;
5023 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5024 com.google.protobuf.UnknownFieldSet.newBuilder();
5025 try {
5026 boolean done = false;
5027 while (!done) {
5028 int tag = input.readTag();
5029 switch (tag) {
5030 case 0:
5031 done = true;
5032 break;
5033 default: {
5034 if (!parseUnknownField(input, unknownFields,
5035 extensionRegistry, tag)) {
5036 done = true;
5037 }
5038 break;
5039 }
5040 case 10: {
5041 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
5042 if (((bitField0_ & 0x00000001) == 0x00000001)) {
5043 subBuilder = reqInfo_.toBuilder();
5044 }
5045 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
5046 if (subBuilder != null) {
5047 subBuilder.mergeFrom(reqInfo_);
5048 reqInfo_ = subBuilder.buildPartial();
5049 }
5050 bitField0_ |= 0x00000001;
5051 break;
5052 }
5053 case 16: {
5054 bitField0_ |= 0x00000002;
5055 txid_ = input.readUInt64();
5056 break;
5057 }
5058 }
5059 }
5060 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5061 throw e.setUnfinishedMessage(this);
5062 } catch (java.io.IOException e) {
5063 throw new com.google.protobuf.InvalidProtocolBufferException(
5064 e.getMessage()).setUnfinishedMessage(this);
5065 } finally {
5066 this.unknownFields = unknownFields.build();
5067 makeExtensionsImmutable();
5068 }
5069 }
5070 public static final com.google.protobuf.Descriptors.Descriptor
5071 getDescriptor() {
5072 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5073 }
5074
5075 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5076 internalGetFieldAccessorTable() {
5077 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5078 .ensureFieldAccessorsInitialized(
5079 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5080 }
5081
5082 public static com.google.protobuf.Parser<StartLogSegmentRequestProto> PARSER =
5083 new com.google.protobuf.AbstractParser<StartLogSegmentRequestProto>() {
5084 public StartLogSegmentRequestProto parsePartialFrom(
5085 com.google.protobuf.CodedInputStream input,
5086 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5087 throws com.google.protobuf.InvalidProtocolBufferException {
5088 return new StartLogSegmentRequestProto(input, extensionRegistry);
5089 }
5090 };
5091
5092 @java.lang.Override
5093 public com.google.protobuf.Parser<StartLogSegmentRequestProto> getParserForType() {
5094 return PARSER;
5095 }
5096
5097 private int bitField0_;
5098 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5099 public static final int REQINFO_FIELD_NUMBER = 1;
5100 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5101 /**
5102 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5103 */
5104 public boolean hasReqInfo() {
5105 return ((bitField0_ & 0x00000001) == 0x00000001);
5106 }
5107 /**
5108 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5109 */
5110 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5111 return reqInfo_;
5112 }
5113 /**
5114 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5115 */
5116 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5117 return reqInfo_;
5118 }
5119
5120 // required uint64 txid = 2;
5121 public static final int TXID_FIELD_NUMBER = 2;
5122 private long txid_;
5123 /**
5124 * <code>required uint64 txid = 2;</code>
5125 *
5126 * <pre>
5127 * Transaction ID
5128 * </pre>
5129 */
5130 public boolean hasTxid() {
5131 return ((bitField0_ & 0x00000002) == 0x00000002);
5132 }
5133 /**
5134 * <code>required uint64 txid = 2;</code>
5135 *
5136 * <pre>
5137 * Transaction ID
5138 * </pre>
5139 */
5140 public long getTxid() {
5141 return txid_;
5142 }
5143
5144 private void initFields() {
5145 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5146 txid_ = 0L;
5147 }
5148 private byte memoizedIsInitialized = -1;
5149 public final boolean isInitialized() {
5150 byte isInitialized = memoizedIsInitialized;
5151 if (isInitialized != -1) return isInitialized == 1;
5152
5153 if (!hasReqInfo()) {
5154 memoizedIsInitialized = 0;
5155 return false;
5156 }
5157 if (!hasTxid()) {
5158 memoizedIsInitialized = 0;
5159 return false;
5160 }
5161 if (!getReqInfo().isInitialized()) {
5162 memoizedIsInitialized = 0;
5163 return false;
5164 }
5165 memoizedIsInitialized = 1;
5166 return true;
5167 }
5168
5169 public void writeTo(com.google.protobuf.CodedOutputStream output)
5170 throws java.io.IOException {
5171 getSerializedSize();
5172 if (((bitField0_ & 0x00000001) == 0x00000001)) {
5173 output.writeMessage(1, reqInfo_);
5174 }
5175 if (((bitField0_ & 0x00000002) == 0x00000002)) {
5176 output.writeUInt64(2, txid_);
5177 }
5178 getUnknownFields().writeTo(output);
5179 }
5180
5181 private int memoizedSerializedSize = -1;
5182 public int getSerializedSize() {
5183 int size = memoizedSerializedSize;
5184 if (size != -1) return size;
5185
5186 size = 0;
5187 if (((bitField0_ & 0x00000001) == 0x00000001)) {
5188 size += com.google.protobuf.CodedOutputStream
5189 .computeMessageSize(1, reqInfo_);
5190 }
5191 if (((bitField0_ & 0x00000002) == 0x00000002)) {
5192 size += com.google.protobuf.CodedOutputStream
5193 .computeUInt64Size(2, txid_);
5194 }
5195 size += getUnknownFields().getSerializedSize();
5196 memoizedSerializedSize = size;
5197 return size;
5198 }
5199
5200 private static final long serialVersionUID = 0L;
5201 @java.lang.Override
5202 protected java.lang.Object writeReplace()
5203 throws java.io.ObjectStreamException {
5204 return super.writeReplace();
5205 }
5206
5207 @java.lang.Override
5208 public boolean equals(final java.lang.Object obj) {
5209 if (obj == this) {
5210 return true;
5211 }
5212 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
5213 return super.equals(obj);
5214 }
5215 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
5216
5217 boolean result = true;
5218 result = result && (hasReqInfo() == other.hasReqInfo());
5219 if (hasReqInfo()) {
5220 result = result && getReqInfo()
5221 .equals(other.getReqInfo());
5222 }
5223 result = result && (hasTxid() == other.hasTxid());
5224 if (hasTxid()) {
5225 result = result && (getTxid()
5226 == other.getTxid());
5227 }
5228 result = result &&
5229 getUnknownFields().equals(other.getUnknownFields());
5230 return result;
5231 }
5232
5233 private int memoizedHashCode = 0;
5234 @java.lang.Override
5235 public int hashCode() {
5236 if (memoizedHashCode != 0) {
5237 return memoizedHashCode;
5238 }
5239 int hash = 41;
5240 hash = (19 * hash) + getDescriptorForType().hashCode();
5241 if (hasReqInfo()) {
5242 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5243 hash = (53 * hash) + getReqInfo().hashCode();
5244 }
5245 if (hasTxid()) {
5246 hash = (37 * hash) + TXID_FIELD_NUMBER;
5247 hash = (53 * hash) + hashLong(getTxid());
5248 }
5249 hash = (29 * hash) + getUnknownFields().hashCode();
5250 memoizedHashCode = hash;
5251 return hash;
5252 }
5253
5254 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5255 com.google.protobuf.ByteString data)
5256 throws com.google.protobuf.InvalidProtocolBufferException {
5257 return PARSER.parseFrom(data);
5258 }
5259 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5260 com.google.protobuf.ByteString data,
5261 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5262 throws com.google.protobuf.InvalidProtocolBufferException {
5263 return PARSER.parseFrom(data, extensionRegistry);
5264 }
5265 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
5266 throws com.google.protobuf.InvalidProtocolBufferException {
5267 return PARSER.parseFrom(data);
5268 }
5269 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5270 byte[] data,
5271 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5272 throws com.google.protobuf.InvalidProtocolBufferException {
5273 return PARSER.parseFrom(data, extensionRegistry);
5274 }
5275 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
5276 throws java.io.IOException {
5277 return PARSER.parseFrom(input);
5278 }
5279 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5280 java.io.InputStream input,
5281 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5282 throws java.io.IOException {
5283 return PARSER.parseFrom(input, extensionRegistry);
5284 }
5285 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5286 throws java.io.IOException {
5287 return PARSER.parseDelimitedFrom(input);
5288 }
5289 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
5290 java.io.InputStream input,
5291 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5292 throws java.io.IOException {
5293 return PARSER.parseDelimitedFrom(input, extensionRegistry);
5294 }
5295 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5296 com.google.protobuf.CodedInputStream input)
5297 throws java.io.IOException {
5298 return PARSER.parseFrom(input);
5299 }
5300 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5301 com.google.protobuf.CodedInputStream input,
5302 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5303 throws java.io.IOException {
5304 return PARSER.parseFrom(input, extensionRegistry);
5305 }
5306
5307 public static Builder newBuilder() { return Builder.create(); }
5308 public Builder newBuilderForType() { return newBuilder(); }
5309 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
5310 return newBuilder().mergeFrom(prototype);
5311 }
5312 public Builder toBuilder() { return newBuilder(this); }
5313
5314 @java.lang.Override
5315 protected Builder newBuilderForType(
5316 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5317 Builder builder = new Builder(parent);
5318 return builder;
5319 }
5320 /**
5321 * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
5322 *
5323 * <pre>
5324 **
5325 * startLogSegment()
5326 * </pre>
5327 */
5328 public static final class Builder extends
5329 com.google.protobuf.GeneratedMessage.Builder<Builder>
5330 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
5331 public static final com.google.protobuf.Descriptors.Descriptor
5332 getDescriptor() {
5333 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5334 }
5335
5336 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5337 internalGetFieldAccessorTable() {
5338 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5339 .ensureFieldAccessorsInitialized(
5340 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5341 }
5342
5343 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
5344 private Builder() {
5345 maybeForceBuilderInitialization();
5346 }
5347
5348 private Builder(
5349 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5350 super(parent);
5351 maybeForceBuilderInitialization();
5352 }
5353 private void maybeForceBuilderInitialization() {
5354 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5355 getReqInfoFieldBuilder();
5356 }
5357 }
5358 private static Builder create() {
5359 return new Builder();
5360 }
5361
5362 public Builder clear() {
5363 super.clear();
5364 if (reqInfoBuilder_ == null) {
5365 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5366 } else {
5367 reqInfoBuilder_.clear();
5368 }
5369 bitField0_ = (bitField0_ & ~0x00000001);
5370 txid_ = 0L;
5371 bitField0_ = (bitField0_ & ~0x00000002);
5372 return this;
5373 }
5374
5375 public Builder clone() {
5376 return create().mergeFrom(buildPartial());
5377 }
5378
5379 public com.google.protobuf.Descriptors.Descriptor
5380 getDescriptorForType() {
5381 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5382 }
5383
5384 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
5385 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
5386 }
5387
5388 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
5389 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
5390 if (!result.isInitialized()) {
5391 throw newUninitializedMessageException(result);
5392 }
5393 return result;
5394 }
5395
5396 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
5397 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
5398 int from_bitField0_ = bitField0_;
5399 int to_bitField0_ = 0;
5400 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5401 to_bitField0_ |= 0x00000001;
5402 }
5403 if (reqInfoBuilder_ == null) {
5404 result.reqInfo_ = reqInfo_;
5405 } else {
5406 result.reqInfo_ = reqInfoBuilder_.build();
5407 }
5408 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5409 to_bitField0_ |= 0x00000002;
5410 }
5411 result.txid_ = txid_;
5412 result.bitField0_ = to_bitField0_;
5413 onBuilt();
5414 return result;
5415 }
5416
5417 public Builder mergeFrom(com.google.protobuf.Message other) {
5418 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
5419 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
5420 } else {
5421 super.mergeFrom(other);
5422 return this;
5423 }
5424 }
5425
5426 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
5427 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
5428 if (other.hasReqInfo()) {
5429 mergeReqInfo(other.getReqInfo());
5430 }
5431 if (other.hasTxid()) {
5432 setTxid(other.getTxid());
5433 }
5434 this.mergeUnknownFields(other.getUnknownFields());
5435 return this;
5436 }
5437
5438 public final boolean isInitialized() {
5439 if (!hasReqInfo()) {
5440
5441 return false;
5442 }
5443 if (!hasTxid()) {
5444
5445 return false;
5446 }
5447 if (!getReqInfo().isInitialized()) {
5448
5449 return false;
5450 }
5451 return true;
5452 }
5453
5454 public Builder mergeFrom(
5455 com.google.protobuf.CodedInputStream input,
5456 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5457 throws java.io.IOException {
5458 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null;
5459 try {
5460 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5461 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5462 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage();
5463 throw e;
5464 } finally {
5465 if (parsedMessage != null) {
5466 mergeFrom(parsedMessage);
5467 }
5468 }
5469 return this;
5470 }
5471 private int bitField0_;
5472
5473 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5474 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5475 private com.google.protobuf.SingleFieldBuilder<
5476 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5477 /**
5478 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5479 */
5480 public boolean hasReqInfo() {
5481 return ((bitField0_ & 0x00000001) == 0x00000001);
5482 }
5483 /**
5484 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5485 */
5486 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5487 if (reqInfoBuilder_ == null) {
5488 return reqInfo_;
5489 } else {
5490 return reqInfoBuilder_.getMessage();
5491 }
5492 }
5493 /**
5494 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5495 */
5496 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5497 if (reqInfoBuilder_ == null) {
5498 if (value == null) {
5499 throw new NullPointerException();
5500 }
5501 reqInfo_ = value;
5502 onChanged();
5503 } else {
5504 reqInfoBuilder_.setMessage(value);
5505 }
5506 bitField0_ |= 0x00000001;
5507 return this;
5508 }
5509 /**
5510 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5511 */
5512 public Builder setReqInfo(
5513 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5514 if (reqInfoBuilder_ == null) {
5515 reqInfo_ = builderForValue.build();
5516 onChanged();
5517 } else {
5518 reqInfoBuilder_.setMessage(builderForValue.build());
5519 }
5520 bitField0_ |= 0x00000001;
5521 return this;
5522 }
5523 /**
5524 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5525 */
5526 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5527 if (reqInfoBuilder_ == null) {
5528 if (((bitField0_ & 0x00000001) == 0x00000001) &&
5529 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5530 reqInfo_ =
5531 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5532 } else {
5533 reqInfo_ = value;
5534 }
5535 onChanged();
5536 } else {
5537 reqInfoBuilder_.mergeFrom(value);
5538 }
5539 bitField0_ |= 0x00000001;
5540 return this;
5541 }
5542 /**
5543 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5544 */
5545 public Builder clearReqInfo() {
5546 if (reqInfoBuilder_ == null) {
5547 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5548 onChanged();
5549 } else {
5550 reqInfoBuilder_.clear();
5551 }
5552 bitField0_ = (bitField0_ & ~0x00000001);
5553 return this;
5554 }
5555 /**
5556 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5557 */
5558 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5559 bitField0_ |= 0x00000001;
5560 onChanged();
5561 return getReqInfoFieldBuilder().getBuilder();
5562 }
5563 /**
5564 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5565 */
5566 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5567 if (reqInfoBuilder_ != null) {
5568 return reqInfoBuilder_.getMessageOrBuilder();
5569 } else {
5570 return reqInfo_;
5571 }
5572 }
5573 /**
5574 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5575 */
5576 private com.google.protobuf.SingleFieldBuilder<
5577 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
5578 getReqInfoFieldBuilder() {
5579 if (reqInfoBuilder_ == null) {
5580 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5581 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5582 reqInfo_,
5583 getParentForChildren(),
5584 isClean());
5585 reqInfo_ = null;
5586 }
5587 return reqInfoBuilder_;
5588 }
5589
5590 // required uint64 txid = 2;
5591 private long txid_ ;
5592 /**
5593 * <code>required uint64 txid = 2;</code>
5594 *
5595 * <pre>
5596 * Transaction ID
5597 * </pre>
5598 */
5599 public boolean hasTxid() {
5600 return ((bitField0_ & 0x00000002) == 0x00000002);
5601 }
5602 /**
5603 * <code>required uint64 txid = 2;</code>
5604 *
5605 * <pre>
5606 * Transaction ID
5607 * </pre>
5608 */
5609 public long getTxid() {
5610 return txid_;
5611 }
5612 /**
5613 * <code>required uint64 txid = 2;</code>
5614 *
5615 * <pre>
5616 * Transaction ID
5617 * </pre>
5618 */
5619 public Builder setTxid(long value) {
5620 bitField0_ |= 0x00000002;
5621 txid_ = value;
5622 onChanged();
5623 return this;
5624 }
5625 /**
5626 * <code>required uint64 txid = 2;</code>
5627 *
5628 * <pre>
5629 * Transaction ID
5630 * </pre>
5631 */
5632 public Builder clearTxid() {
5633 bitField0_ = (bitField0_ & ~0x00000002);
5634 txid_ = 0L;
5635 onChanged();
5636 return this;
5637 }
5638
5639 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5640 }
5641
5642 static {
5643 defaultInstance = new StartLogSegmentRequestProto(true);
5644 defaultInstance.initFields();
5645 }
5646
5647 // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5648 }
5649
5650 public interface StartLogSegmentResponseProtoOrBuilder
5651 extends com.google.protobuf.MessageOrBuilder {
5652 }
5653 /**
5654 * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5655 */
5656 public static final class StartLogSegmentResponseProto extends
5657 com.google.protobuf.GeneratedMessage
5658 implements StartLogSegmentResponseProtoOrBuilder {
5659 // Use StartLogSegmentResponseProto.newBuilder() to construct.
5660 private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5661 super(builder);
5662 this.unknownFields = builder.getUnknownFields();
5663 }
5664 private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5665
5666 private static final StartLogSegmentResponseProto defaultInstance;
5667 public static StartLogSegmentResponseProto getDefaultInstance() {
5668 return defaultInstance;
5669 }
5670
5671 public StartLogSegmentResponseProto getDefaultInstanceForType() {
5672 return defaultInstance;
5673 }
5674
5675 private final com.google.protobuf.UnknownFieldSet unknownFields;
5676 @java.lang.Override
5677 public final com.google.protobuf.UnknownFieldSet
5678 getUnknownFields() {
5679 return this.unknownFields;
5680 }
5681 private StartLogSegmentResponseProto(
5682 com.google.protobuf.CodedInputStream input,
5683 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5684 throws com.google.protobuf.InvalidProtocolBufferException {
5685 initFields();
5686 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5687 com.google.protobuf.UnknownFieldSet.newBuilder();
5688 try {
5689 boolean done = false;
5690 while (!done) {
5691 int tag = input.readTag();
5692 switch (tag) {
5693 case 0:
5694 done = true;
5695 break;
5696 default: {
5697 if (!parseUnknownField(input, unknownFields,
5698 extensionRegistry, tag)) {
5699 done = true;
5700 }
5701 break;
5702 }
5703 }
5704 }
5705 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5706 throw e.setUnfinishedMessage(this);
5707 } catch (java.io.IOException e) {
5708 throw new com.google.protobuf.InvalidProtocolBufferException(
5709 e.getMessage()).setUnfinishedMessage(this);
5710 } finally {
5711 this.unknownFields = unknownFields.build();
5712 makeExtensionsImmutable();
5713 }
5714 }
5715 public static final com.google.protobuf.Descriptors.Descriptor
5716 getDescriptor() {
5717 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5718 }
5719
5720 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5721 internalGetFieldAccessorTable() {
5722 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5723 .ensureFieldAccessorsInitialized(
5724 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5725 }
5726
5727 public static com.google.protobuf.Parser<StartLogSegmentResponseProto> PARSER =
5728 new com.google.protobuf.AbstractParser<StartLogSegmentResponseProto>() {
5729 public StartLogSegmentResponseProto parsePartialFrom(
5730 com.google.protobuf.CodedInputStream input,
5731 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5732 throws com.google.protobuf.InvalidProtocolBufferException {
5733 return new StartLogSegmentResponseProto(input, extensionRegistry);
5734 }
5735 };
5736
5737 @java.lang.Override
5738 public com.google.protobuf.Parser<StartLogSegmentResponseProto> getParserForType() {
5739 return PARSER;
5740 }
5741
5742 private void initFields() {
5743 }
5744 private byte memoizedIsInitialized = -1;
5745 public final boolean isInitialized() {
5746 byte isInitialized = memoizedIsInitialized;
5747 if (isInitialized != -1) return isInitialized == 1;
5748
5749 memoizedIsInitialized = 1;
5750 return true;
5751 }
5752
5753 public void writeTo(com.google.protobuf.CodedOutputStream output)
5754 throws java.io.IOException {
5755 getSerializedSize();
5756 getUnknownFields().writeTo(output);
5757 }
5758
5759 private int memoizedSerializedSize = -1;
5760 public int getSerializedSize() {
5761 int size = memoizedSerializedSize;
5762 if (size != -1) return size;
5763
5764 size = 0;
5765 size += getUnknownFields().getSerializedSize();
5766 memoizedSerializedSize = size;
5767 return size;
5768 }
5769
5770 private static final long serialVersionUID = 0L;
5771 @java.lang.Override
5772 protected java.lang.Object writeReplace()
5773 throws java.io.ObjectStreamException {
5774 return super.writeReplace();
5775 }
5776
5777 @java.lang.Override
5778 public boolean equals(final java.lang.Object obj) {
5779 if (obj == this) {
5780 return true;
5781 }
5782 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
5783 return super.equals(obj);
5784 }
5785 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
5786
5787 boolean result = true;
5788 result = result &&
5789 getUnknownFields().equals(other.getUnknownFields());
5790 return result;
5791 }
5792
5793 private int memoizedHashCode = 0;
5794 @java.lang.Override
5795 public int hashCode() {
5796 if (memoizedHashCode != 0) {
5797 return memoizedHashCode;
5798 }
5799 int hash = 41;
5800 hash = (19 * hash) + getDescriptorForType().hashCode();
5801 hash = (29 * hash) + getUnknownFields().hashCode();
5802 memoizedHashCode = hash;
5803 return hash;
5804 }
5805
5806 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5807 com.google.protobuf.ByteString data)
5808 throws com.google.protobuf.InvalidProtocolBufferException {
5809 return PARSER.parseFrom(data);
5810 }
5811 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5812 com.google.protobuf.ByteString data,
5813 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5814 throws com.google.protobuf.InvalidProtocolBufferException {
5815 return PARSER.parseFrom(data, extensionRegistry);
5816 }
5817 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
5818 throws com.google.protobuf.InvalidProtocolBufferException {
5819 return PARSER.parseFrom(data);
5820 }
5821 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5822 byte[] data,
5823 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5824 throws com.google.protobuf.InvalidProtocolBufferException {
5825 return PARSER.parseFrom(data, extensionRegistry);
5826 }
5827 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
5828 throws java.io.IOException {
5829 return PARSER.parseFrom(input);
5830 }
5831 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5832 java.io.InputStream input,
5833 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5834 throws java.io.IOException {
5835 return PARSER.parseFrom(input, extensionRegistry);
5836 }
5837 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5838 throws java.io.IOException {
5839 return PARSER.parseDelimitedFrom(input);
5840 }
5841 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
5842 java.io.InputStream input,
5843 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5844 throws java.io.IOException {
5845 return PARSER.parseDelimitedFrom(input, extensionRegistry);
5846 }
5847 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5848 com.google.protobuf.CodedInputStream input)
5849 throws java.io.IOException {
5850 return PARSER.parseFrom(input);
5851 }
5852 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5853 com.google.protobuf.CodedInputStream input,
5854 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5855 throws java.io.IOException {
5856 return PARSER.parseFrom(input, extensionRegistry);
5857 }
5858
5859 public static Builder newBuilder() { return Builder.create(); }
5860 public Builder newBuilderForType() { return newBuilder(); }
5861 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
5862 return newBuilder().mergeFrom(prototype);
5863 }
5864 public Builder toBuilder() { return newBuilder(this); }
5865
5866 @java.lang.Override
5867 protected Builder newBuilderForType(
5868 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5869 Builder builder = new Builder(parent);
5870 return builder;
5871 }
5872 /**
5873 * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5874 */
5875 public static final class Builder extends
5876 com.google.protobuf.GeneratedMessage.Builder<Builder>
5877 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
5878 public static final com.google.protobuf.Descriptors.Descriptor
5879 getDescriptor() {
5880 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5881 }
5882
5883 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5884 internalGetFieldAccessorTable() {
5885 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5886 .ensureFieldAccessorsInitialized(
5887 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5888 }
5889
5890 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
5891 private Builder() {
5892 maybeForceBuilderInitialization();
5893 }
5894
5895 private Builder(
5896 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5897 super(parent);
5898 maybeForceBuilderInitialization();
5899 }
5900 private void maybeForceBuilderInitialization() {
5901 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5902 }
5903 }
5904 private static Builder create() {
5905 return new Builder();
5906 }
5907
5908 public Builder clear() {
5909 super.clear();
5910 return this;
5911 }
5912
5913 public Builder clone() {
5914 return create().mergeFrom(buildPartial());
5915 }
5916
5917 public com.google.protobuf.Descriptors.Descriptor
5918 getDescriptorForType() {
5919 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5920 }
5921
5922 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
5923 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
5924 }
5925
5926 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
5927 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
5928 if (!result.isInitialized()) {
5929 throw newUninitializedMessageException(result);
5930 }
5931 return result;
5932 }
5933
5934 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
5935 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
5936 onBuilt();
5937 return result;
5938 }
5939
5940 public Builder mergeFrom(com.google.protobuf.Message other) {
5941 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
5942 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
5943 } else {
5944 super.mergeFrom(other);
5945 return this;
5946 }
5947 }
5948
5949 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
5950 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
5951 this.mergeUnknownFields(other.getUnknownFields());
5952 return this;
5953 }
5954
5955 public final boolean isInitialized() {
5956 return true;
5957 }
5958
5959 public Builder mergeFrom(
5960 com.google.protobuf.CodedInputStream input,
5961 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5962 throws java.io.IOException {
5963 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null;
5964 try {
5965 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5966 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5967 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage();
5968 throw e;
5969 } finally {
5970 if (parsedMessage != null) {
5971 mergeFrom(parsedMessage);
5972 }
5973 }
5974 return this;
5975 }
5976
5977 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5978 }
5979
5980 static {
5981 defaultInstance = new StartLogSegmentResponseProto(true);
5982 defaultInstance.initFields();
5983 }
5984
5985 // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5986 }
5987
5988 public interface FinalizeLogSegmentRequestProtoOrBuilder
5989 extends com.google.protobuf.MessageOrBuilder {
5990
5991 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5992 /**
5993 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5994 */
5995 boolean hasReqInfo();
5996 /**
5997 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5998 */
5999 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
6000 /**
6001 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6002 */
6003 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
6004
6005 // required uint64 startTxId = 2;
6006 /**
6007 * <code>required uint64 startTxId = 2;</code>
6008 */
6009 boolean hasStartTxId();
6010 /**
6011 * <code>required uint64 startTxId = 2;</code>
6012 */
6013 long getStartTxId();
6014
6015 // required uint64 endTxId = 3;
6016 /**
6017 * <code>required uint64 endTxId = 3;</code>
6018 */
6019 boolean hasEndTxId();
6020 /**
6021 * <code>required uint64 endTxId = 3;</code>
6022 */
6023 long getEndTxId();
6024 }
6025 /**
6026 * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6027 *
6028 * <pre>
6029 **
6030 * finalizeLogSegment()
6031 * </pre>
6032 */
6033 public static final class FinalizeLogSegmentRequestProto extends
6034 com.google.protobuf.GeneratedMessage
6035 implements FinalizeLogSegmentRequestProtoOrBuilder {
6036 // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
6037 private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6038 super(builder);
6039 this.unknownFields = builder.getUnknownFields();
6040 }
6041 private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6042
6043 private static final FinalizeLogSegmentRequestProto defaultInstance;
6044 public static FinalizeLogSegmentRequestProto getDefaultInstance() {
6045 return defaultInstance;
6046 }
6047
6048 public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6049 return defaultInstance;
6050 }
6051
6052 private final com.google.protobuf.UnknownFieldSet unknownFields;
6053 @java.lang.Override
6054 public final com.google.protobuf.UnknownFieldSet
6055 getUnknownFields() {
6056 return this.unknownFields;
6057 }
6058 private FinalizeLogSegmentRequestProto(
6059 com.google.protobuf.CodedInputStream input,
6060 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6061 throws com.google.protobuf.InvalidProtocolBufferException {
6062 initFields();
6063 int mutable_bitField0_ = 0;
6064 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6065 com.google.protobuf.UnknownFieldSet.newBuilder();
6066 try {
6067 boolean done = false;
6068 while (!done) {
6069 int tag = input.readTag();
6070 switch (tag) {
6071 case 0:
6072 done = true;
6073 break;
6074 default: {
6075 if (!parseUnknownField(input, unknownFields,
6076 extensionRegistry, tag)) {
6077 done = true;
6078 }
6079 break;
6080 }
6081 case 10: {
6082 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
6083 if (((bitField0_ & 0x00000001) == 0x00000001)) {
6084 subBuilder = reqInfo_.toBuilder();
6085 }
6086 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
6087 if (subBuilder != null) {
6088 subBuilder.mergeFrom(reqInfo_);
6089 reqInfo_ = subBuilder.buildPartial();
6090 }
6091 bitField0_ |= 0x00000001;
6092 break;
6093 }
6094 case 16: {
6095 bitField0_ |= 0x00000002;
6096 startTxId_ = input.readUInt64();
6097 break;
6098 }
6099 case 24: {
6100 bitField0_ |= 0x00000004;
6101 endTxId_ = input.readUInt64();
6102 break;
6103 }
6104 }
6105 }
6106 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6107 throw e.setUnfinishedMessage(this);
6108 } catch (java.io.IOException e) {
6109 throw new com.google.protobuf.InvalidProtocolBufferException(
6110 e.getMessage()).setUnfinishedMessage(this);
6111 } finally {
6112 this.unknownFields = unknownFields.build();
6113 makeExtensionsImmutable();
6114 }
6115 }
6116 public static final com.google.protobuf.Descriptors.Descriptor
6117 getDescriptor() {
6118 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6119 }
6120
6121 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6122 internalGetFieldAccessorTable() {
6123 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6124 .ensureFieldAccessorsInitialized(
6125 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6126 }
6127
6128 public static com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> PARSER =
6129 new com.google.protobuf.AbstractParser<FinalizeLogSegmentRequestProto>() {
6130 public FinalizeLogSegmentRequestProto parsePartialFrom(
6131 com.google.protobuf.CodedInputStream input,
6132 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6133 throws com.google.protobuf.InvalidProtocolBufferException {
6134 return new FinalizeLogSegmentRequestProto(input, extensionRegistry);
6135 }
6136 };
6137
6138 @java.lang.Override
6139 public com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> getParserForType() {
6140 return PARSER;
6141 }
6142
6143 private int bitField0_;
6144 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6145 public static final int REQINFO_FIELD_NUMBER = 1;
6146 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
6147 /**
6148 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6149 */
6150 public boolean hasReqInfo() {
6151 return ((bitField0_ & 0x00000001) == 0x00000001);
6152 }
6153 /**
6154 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6155 */
6156 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6157 return reqInfo_;
6158 }
6159 /**
6160 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6161 */
6162 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6163 return reqInfo_;
6164 }
6165
6166 // required uint64 startTxId = 2;
6167 public static final int STARTTXID_FIELD_NUMBER = 2;
6168 private long startTxId_;
6169 /**
6170 * <code>required uint64 startTxId = 2;</code>
6171 */
6172 public boolean hasStartTxId() {
6173 return ((bitField0_ & 0x00000002) == 0x00000002);
6174 }
6175 /**
6176 * <code>required uint64 startTxId = 2;</code>
6177 */
6178 public long getStartTxId() {
6179 return startTxId_;
6180 }
6181
6182 // required uint64 endTxId = 3;
6183 public static final int ENDTXID_FIELD_NUMBER = 3;
6184 private long endTxId_;
6185 /**
6186 * <code>required uint64 endTxId = 3;</code>
6187 */
6188 public boolean hasEndTxId() {
6189 return ((bitField0_ & 0x00000004) == 0x00000004);
6190 }
6191 /**
6192 * <code>required uint64 endTxId = 3;</code>
6193 */
6194 public long getEndTxId() {
6195 return endTxId_;
6196 }
6197
6198 private void initFields() {
6199 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6200 startTxId_ = 0L;
6201 endTxId_ = 0L;
6202 }
6203 private byte memoizedIsInitialized = -1;
6204 public final boolean isInitialized() {
6205 byte isInitialized = memoizedIsInitialized;
6206 if (isInitialized != -1) return isInitialized == 1;
6207
6208 if (!hasReqInfo()) {
6209 memoizedIsInitialized = 0;
6210 return false;
6211 }
6212 if (!hasStartTxId()) {
6213 memoizedIsInitialized = 0;
6214 return false;
6215 }
6216 if (!hasEndTxId()) {
6217 memoizedIsInitialized = 0;
6218 return false;
6219 }
6220 if (!getReqInfo().isInitialized()) {
6221 memoizedIsInitialized = 0;
6222 return false;
6223 }
6224 memoizedIsInitialized = 1;
6225 return true;
6226 }
6227
6228 public void writeTo(com.google.protobuf.CodedOutputStream output)
6229 throws java.io.IOException {
6230 getSerializedSize();
6231 if (((bitField0_ & 0x00000001) == 0x00000001)) {
6232 output.writeMessage(1, reqInfo_);
6233 }
6234 if (((bitField0_ & 0x00000002) == 0x00000002)) {
6235 output.writeUInt64(2, startTxId_);
6236 }
6237 if (((bitField0_ & 0x00000004) == 0x00000004)) {
6238 output.writeUInt64(3, endTxId_);
6239 }
6240 getUnknownFields().writeTo(output);
6241 }
6242
6243 private int memoizedSerializedSize = -1;
6244 public int getSerializedSize() {
6245 int size = memoizedSerializedSize;
6246 if (size != -1) return size;
6247
6248 size = 0;
6249 if (((bitField0_ & 0x00000001) == 0x00000001)) {
6250 size += com.google.protobuf.CodedOutputStream
6251 .computeMessageSize(1, reqInfo_);
6252 }
6253 if (((bitField0_ & 0x00000002) == 0x00000002)) {
6254 size += com.google.protobuf.CodedOutputStream
6255 .computeUInt64Size(2, startTxId_);
6256 }
6257 if (((bitField0_ & 0x00000004) == 0x00000004)) {
6258 size += com.google.protobuf.CodedOutputStream
6259 .computeUInt64Size(3, endTxId_);
6260 }
6261 size += getUnknownFields().getSerializedSize();
6262 memoizedSerializedSize = size;
6263 return size;
6264 }
6265
6266 private static final long serialVersionUID = 0L;
6267 @java.lang.Override
6268 protected java.lang.Object writeReplace()
6269 throws java.io.ObjectStreamException {
6270 return super.writeReplace();
6271 }
6272
6273 @java.lang.Override
6274 public boolean equals(final java.lang.Object obj) {
6275 if (obj == this) {
6276 return true;
6277 }
6278 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
6279 return super.equals(obj);
6280 }
6281 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
6282
6283 boolean result = true;
6284 result = result && (hasReqInfo() == other.hasReqInfo());
6285 if (hasReqInfo()) {
6286 result = result && getReqInfo()
6287 .equals(other.getReqInfo());
6288 }
6289 result = result && (hasStartTxId() == other.hasStartTxId());
6290 if (hasStartTxId()) {
6291 result = result && (getStartTxId()
6292 == other.getStartTxId());
6293 }
6294 result = result && (hasEndTxId() == other.hasEndTxId());
6295 if (hasEndTxId()) {
6296 result = result && (getEndTxId()
6297 == other.getEndTxId());
6298 }
6299 result = result &&
6300 getUnknownFields().equals(other.getUnknownFields());
6301 return result;
6302 }
6303
6304 private int memoizedHashCode = 0;
6305 @java.lang.Override
6306 public int hashCode() {
6307 if (memoizedHashCode != 0) {
6308 return memoizedHashCode;
6309 }
6310 int hash = 41;
6311 hash = (19 * hash) + getDescriptorForType().hashCode();
6312 if (hasReqInfo()) {
6313 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
6314 hash = (53 * hash) + getReqInfo().hashCode();
6315 }
6316 if (hasStartTxId()) {
6317 hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
6318 hash = (53 * hash) + hashLong(getStartTxId());
6319 }
6320 if (hasEndTxId()) {
6321 hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
6322 hash = (53 * hash) + hashLong(getEndTxId());
6323 }
6324 hash = (29 * hash) + getUnknownFields().hashCode();
6325 memoizedHashCode = hash;
6326 return hash;
6327 }
6328
6329 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6330 com.google.protobuf.ByteString data)
6331 throws com.google.protobuf.InvalidProtocolBufferException {
6332 return PARSER.parseFrom(data);
6333 }
6334 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6335 com.google.protobuf.ByteString data,
6336 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6337 throws com.google.protobuf.InvalidProtocolBufferException {
6338 return PARSER.parseFrom(data, extensionRegistry);
6339 }
6340 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
6341 throws com.google.protobuf.InvalidProtocolBufferException {
6342 return PARSER.parseFrom(data);
6343 }
6344 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6345 byte[] data,
6346 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6347 throws com.google.protobuf.InvalidProtocolBufferException {
6348 return PARSER.parseFrom(data, extensionRegistry);
6349 }
6350 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
6351 throws java.io.IOException {
6352 return PARSER.parseFrom(input);
6353 }
6354 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6355 java.io.InputStream input,
6356 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6357 throws java.io.IOException {
6358 return PARSER.parseFrom(input, extensionRegistry);
6359 }
6360 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
6361 throws java.io.IOException {
6362 return PARSER.parseDelimitedFrom(input);
6363 }
6364 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
6365 java.io.InputStream input,
6366 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6367 throws java.io.IOException {
6368 return PARSER.parseDelimitedFrom(input, extensionRegistry);
6369 }
6370 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6371 com.google.protobuf.CodedInputStream input)
6372 throws java.io.IOException {
6373 return PARSER.parseFrom(input);
6374 }
6375 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6376 com.google.protobuf.CodedInputStream input,
6377 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6378 throws java.io.IOException {
6379 return PARSER.parseFrom(input, extensionRegistry);
6380 }
6381
6382 public static Builder newBuilder() { return Builder.create(); }
6383 public Builder newBuilderForType() { return newBuilder(); }
6384 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
6385 return newBuilder().mergeFrom(prototype);
6386 }
6387 public Builder toBuilder() { return newBuilder(this); }
6388
6389 @java.lang.Override
6390 protected Builder newBuilderForType(
6391 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6392 Builder builder = new Builder(parent);
6393 return builder;
6394 }
6395 /**
6396 * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6397 *
6398 * <pre>
6399 **
6400 * finalizeLogSegment()
6401 * </pre>
6402 */
6403 public static final class Builder extends
6404 com.google.protobuf.GeneratedMessage.Builder<Builder>
6405 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
6406 public static final com.google.protobuf.Descriptors.Descriptor
6407 getDescriptor() {
6408 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6409 }
6410
6411 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6412 internalGetFieldAccessorTable() {
6413 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6414 .ensureFieldAccessorsInitialized(
6415 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6416 }
6417
6418 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
6419 private Builder() {
6420 maybeForceBuilderInitialization();
6421 }
6422
6423 private Builder(
6424 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6425 super(parent);
6426 maybeForceBuilderInitialization();
6427 }
6428 private void maybeForceBuilderInitialization() {
6429 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6430 getReqInfoFieldBuilder();
6431 }
6432 }
6433 private static Builder create() {
6434 return new Builder();
6435 }
6436
6437 public Builder clear() {
6438 super.clear();
6439 if (reqInfoBuilder_ == null) {
6440 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6441 } else {
6442 reqInfoBuilder_.clear();
6443 }
6444 bitField0_ = (bitField0_ & ~0x00000001);
6445 startTxId_ = 0L;
6446 bitField0_ = (bitField0_ & ~0x00000002);
6447 endTxId_ = 0L;
6448 bitField0_ = (bitField0_ & ~0x00000004);
6449 return this;
6450 }
6451
6452 public Builder clone() {
6453 return create().mergeFrom(buildPartial());
6454 }
6455
6456 public com.google.protobuf.Descriptors.Descriptor
6457 getDescriptorForType() {
6458 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6459 }
6460
6461 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6462 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
6463 }
6464
6465 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
6466 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
6467 if (!result.isInitialized()) {
6468 throw newUninitializedMessageException(result);
6469 }
6470 return result;
6471 }
6472
6473 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
6474 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
6475 int from_bitField0_ = bitField0_;
6476 int to_bitField0_ = 0;
6477 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6478 to_bitField0_ |= 0x00000001;
6479 }
6480 if (reqInfoBuilder_ == null) {
6481 result.reqInfo_ = reqInfo_;
6482 } else {
6483 result.reqInfo_ = reqInfoBuilder_.build();
6484 }
6485 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6486 to_bitField0_ |= 0x00000002;
6487 }
6488 result.startTxId_ = startTxId_;
6489 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6490 to_bitField0_ |= 0x00000004;
6491 }
6492 result.endTxId_ = endTxId_;
6493 result.bitField0_ = to_bitField0_;
6494 onBuilt();
6495 return result;
6496 }
6497
6498 public Builder mergeFrom(com.google.protobuf.Message other) {
6499 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
6500 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
6501 } else {
6502 super.mergeFrom(other);
6503 return this;
6504 }
6505 }
6506
6507 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
6508 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
6509 if (other.hasReqInfo()) {
6510 mergeReqInfo(other.getReqInfo());
6511 }
6512 if (other.hasStartTxId()) {
6513 setStartTxId(other.getStartTxId());
6514 }
6515 if (other.hasEndTxId()) {
6516 setEndTxId(other.getEndTxId());
6517 }
6518 this.mergeUnknownFields(other.getUnknownFields());
6519 return this;
6520 }
6521
6522 public final boolean isInitialized() {
6523 if (!hasReqInfo()) {
6524
6525 return false;
6526 }
6527 if (!hasStartTxId()) {
6528
6529 return false;
6530 }
6531 if (!hasEndTxId()) {
6532
6533 return false;
6534 }
6535 if (!getReqInfo().isInitialized()) {
6536
6537 return false;
6538 }
6539 return true;
6540 }
6541
6542 public Builder mergeFrom(
6543 com.google.protobuf.CodedInputStream input,
6544 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6545 throws java.io.IOException {
6546 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null;
6547 try {
6548 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6549 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6550 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage();
6551 throw e;
6552 } finally {
6553 if (parsedMessage != null) {
6554 mergeFrom(parsedMessage);
6555 }
6556 }
6557 return this;
6558 }
6559 private int bitField0_;
6560
6561 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6562 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6563 private com.google.protobuf.SingleFieldBuilder<
6564 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6565 /**
6566 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6567 */
6568 public boolean hasReqInfo() {
6569 return ((bitField0_ & 0x00000001) == 0x00000001);
6570 }
6571 /**
6572 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6573 */
6574 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6575 if (reqInfoBuilder_ == null) {
6576 return reqInfo_;
6577 } else {
6578 return reqInfoBuilder_.getMessage();
6579 }
6580 }
6581 /**
6582 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6583 */
6584 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6585 if (reqInfoBuilder_ == null) {
6586 if (value == null) {
6587 throw new NullPointerException();
6588 }
6589 reqInfo_ = value;
6590 onChanged();
6591 } else {
6592 reqInfoBuilder_.setMessage(value);
6593 }
6594 bitField0_ |= 0x00000001;
6595 return this;
6596 }
6597 /**
6598 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6599 */
6600 public Builder setReqInfo(
6601 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6602 if (reqInfoBuilder_ == null) {
6603 reqInfo_ = builderForValue.build();
6604 onChanged();
6605 } else {
6606 reqInfoBuilder_.setMessage(builderForValue.build());
6607 }
6608 bitField0_ |= 0x00000001;
6609 return this;
6610 }
6611 /**
6612 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6613 */
6614 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6615 if (reqInfoBuilder_ == null) {
6616 if (((bitField0_ & 0x00000001) == 0x00000001) &&
6617 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6618 reqInfo_ =
6619 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6620 } else {
6621 reqInfo_ = value;
6622 }
6623 onChanged();
6624 } else {
6625 reqInfoBuilder_.mergeFrom(value);
6626 }
6627 bitField0_ |= 0x00000001;
6628 return this;
6629 }
6630 /**
6631 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6632 */
6633 public Builder clearReqInfo() {
6634 if (reqInfoBuilder_ == null) {
6635 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6636 onChanged();
6637 } else {
6638 reqInfoBuilder_.clear();
6639 }
6640 bitField0_ = (bitField0_ & ~0x00000001);
6641 return this;
6642 }
6643 /**
6644 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6645 */
6646 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6647 bitField0_ |= 0x00000001;
6648 onChanged();
6649 return getReqInfoFieldBuilder().getBuilder();
6650 }
6651 /**
6652 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6653 */
6654 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6655 if (reqInfoBuilder_ != null) {
6656 return reqInfoBuilder_.getMessageOrBuilder();
6657 } else {
6658 return reqInfo_;
6659 }
6660 }
6661 /**
6662 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6663 */
6664 private com.google.protobuf.SingleFieldBuilder<
6665 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
6666 getReqInfoFieldBuilder() {
6667 if (reqInfoBuilder_ == null) {
6668 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6669 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6670 reqInfo_,
6671 getParentForChildren(),
6672 isClean());
6673 reqInfo_ = null;
6674 }
6675 return reqInfoBuilder_;
6676 }
6677
6678 // required uint64 startTxId = 2;
6679 private long startTxId_ ;
6680 /**
6681 * <code>required uint64 startTxId = 2;</code>
6682 */
6683 public boolean hasStartTxId() {
6684 return ((bitField0_ & 0x00000002) == 0x00000002);
6685 }
6686 /**
6687 * <code>required uint64 startTxId = 2;</code>
6688 */
6689 public long getStartTxId() {
6690 return startTxId_;
6691 }
6692 /**
6693 * <code>required uint64 startTxId = 2;</code>
6694 */
6695 public Builder setStartTxId(long value) {
6696 bitField0_ |= 0x00000002;
6697 startTxId_ = value;
6698 onChanged();
6699 return this;
6700 }
6701 /**
6702 * <code>required uint64 startTxId = 2;</code>
6703 */
6704 public Builder clearStartTxId() {
6705 bitField0_ = (bitField0_ & ~0x00000002);
6706 startTxId_ = 0L;
6707 onChanged();
6708 return this;
6709 }
6710
6711 // required uint64 endTxId = 3;
6712 private long endTxId_ ;
6713 /**
6714 * <code>required uint64 endTxId = 3;</code>
6715 */
6716 public boolean hasEndTxId() {
6717 return ((bitField0_ & 0x00000004) == 0x00000004);
6718 }
6719 /**
6720 * <code>required uint64 endTxId = 3;</code>
6721 */
6722 public long getEndTxId() {
6723 return endTxId_;
6724 }
6725 /**
6726 * <code>required uint64 endTxId = 3;</code>
6727 */
6728 public Builder setEndTxId(long value) {
6729 bitField0_ |= 0x00000004;
6730 endTxId_ = value;
6731 onChanged();
6732 return this;
6733 }
6734 /**
6735 * <code>required uint64 endTxId = 3;</code>
6736 */
6737 public Builder clearEndTxId() {
6738 bitField0_ = (bitField0_ & ~0x00000004);
6739 endTxId_ = 0L;
6740 onChanged();
6741 return this;
6742 }
6743
6744 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6745 }
6746
6747 static {
6748 defaultInstance = new FinalizeLogSegmentRequestProto(true);
6749 defaultInstance.initFields();
6750 }
6751
6752 // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6753 }
6754
6755 public interface FinalizeLogSegmentResponseProtoOrBuilder
6756 extends com.google.protobuf.MessageOrBuilder {
6757 }
6758 /**
6759 * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6760 */
6761 public static final class FinalizeLogSegmentResponseProto extends
6762 com.google.protobuf.GeneratedMessage
6763 implements FinalizeLogSegmentResponseProtoOrBuilder {
6764 // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
6765 private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6766 super(builder);
6767 this.unknownFields = builder.getUnknownFields();
6768 }
6769 private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6770
6771 private static final FinalizeLogSegmentResponseProto defaultInstance;
6772 public static FinalizeLogSegmentResponseProto getDefaultInstance() {
6773 return defaultInstance;
6774 }
6775
6776 public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
6777 return defaultInstance;
6778 }
6779
6780 private final com.google.protobuf.UnknownFieldSet unknownFields;
6781 @java.lang.Override
6782 public final com.google.protobuf.UnknownFieldSet
6783 getUnknownFields() {
6784 return this.unknownFields;
6785 }
6786 private FinalizeLogSegmentResponseProto(
6787 com.google.protobuf.CodedInputStream input,
6788 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6789 throws com.google.protobuf.InvalidProtocolBufferException {
6790 initFields();
6791 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6792 com.google.protobuf.UnknownFieldSet.newBuilder();
6793 try {
6794 boolean done = false;
6795 while (!done) {
6796 int tag = input.readTag();
6797 switch (tag) {
6798 case 0:
6799 done = true;
6800 break;
6801 default: {
6802 if (!parseUnknownField(input, unknownFields,
6803 extensionRegistry, tag)) {
6804 done = true;
6805 }
6806 break;
6807 }
6808 }
6809 }
6810 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6811 throw e.setUnfinishedMessage(this);
6812 } catch (java.io.IOException e) {
6813 throw new com.google.protobuf.InvalidProtocolBufferException(
6814 e.getMessage()).setUnfinishedMessage(this);
6815 } finally {
6816 this.unknownFields = unknownFields.build();
6817 makeExtensionsImmutable();
6818 }
6819 }
6820 public static final com.google.protobuf.Descriptors.Descriptor
6821 getDescriptor() {
6822 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6823 }
6824
6825 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6826 internalGetFieldAccessorTable() {
6827 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6828 .ensureFieldAccessorsInitialized(
6829 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6830 }
6831
6832 public static com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> PARSER =
6833 new com.google.protobuf.AbstractParser<FinalizeLogSegmentResponseProto>() {
6834 public FinalizeLogSegmentResponseProto parsePartialFrom(
6835 com.google.protobuf.CodedInputStream input,
6836 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6837 throws com.google.protobuf.InvalidProtocolBufferException {
6838 return new FinalizeLogSegmentResponseProto(input, extensionRegistry);
6839 }
6840 };
6841
6842 @java.lang.Override
6843 public com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> getParserForType() {
6844 return PARSER;
6845 }
6846
6847 private void initFields() {
6848 }
6849 private byte memoizedIsInitialized = -1;
6850 public final boolean isInitialized() {
6851 byte isInitialized = memoizedIsInitialized;
6852 if (isInitialized != -1) return isInitialized == 1;
6853
6854 memoizedIsInitialized = 1;
6855 return true;
6856 }
6857
6858 public void writeTo(com.google.protobuf.CodedOutputStream output)
6859 throws java.io.IOException {
6860 getSerializedSize();
6861 getUnknownFields().writeTo(output);
6862 }
6863
6864 private int memoizedSerializedSize = -1;
6865 public int getSerializedSize() {
6866 int size = memoizedSerializedSize;
6867 if (size != -1) return size;
6868
6869 size = 0;
6870 size += getUnknownFields().getSerializedSize();
6871 memoizedSerializedSize = size;
6872 return size;
6873 }
6874
6875 private static final long serialVersionUID = 0L;
6876 @java.lang.Override
6877 protected java.lang.Object writeReplace()
6878 throws java.io.ObjectStreamException {
6879 return super.writeReplace();
6880 }
6881
6882 @java.lang.Override
6883 public boolean equals(final java.lang.Object obj) {
6884 if (obj == this) {
6885 return true;
6886 }
6887 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
6888 return super.equals(obj);
6889 }
6890 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
6891
6892 boolean result = true;
6893 result = result &&
6894 getUnknownFields().equals(other.getUnknownFields());
6895 return result;
6896 }
6897
6898 private int memoizedHashCode = 0;
6899 @java.lang.Override
6900 public int hashCode() {
6901 if (memoizedHashCode != 0) {
6902 return memoizedHashCode;
6903 }
6904 int hash = 41;
6905 hash = (19 * hash) + getDescriptorForType().hashCode();
6906 hash = (29 * hash) + getUnknownFields().hashCode();
6907 memoizedHashCode = hash;
6908 return hash;
6909 }
6910
6911 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6912 com.google.protobuf.ByteString data)
6913 throws com.google.protobuf.InvalidProtocolBufferException {
6914 return PARSER.parseFrom(data);
6915 }
6916 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6917 com.google.protobuf.ByteString data,
6918 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6919 throws com.google.protobuf.InvalidProtocolBufferException {
6920 return PARSER.parseFrom(data, extensionRegistry);
6921 }
6922 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
6923 throws com.google.protobuf.InvalidProtocolBufferException {
6924 return PARSER.parseFrom(data);
6925 }
6926 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6927 byte[] data,
6928 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6929 throws com.google.protobuf.InvalidProtocolBufferException {
6930 return PARSER.parseFrom(data, extensionRegistry);
6931 }
6932 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
6933 throws java.io.IOException {
6934 return PARSER.parseFrom(input);
6935 }
6936 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6937 java.io.InputStream input,
6938 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6939 throws java.io.IOException {
6940 return PARSER.parseFrom(input, extensionRegistry);
6941 }
6942 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
6943 throws java.io.IOException {
6944 return PARSER.parseDelimitedFrom(input);
6945 }
6946 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
6947 java.io.InputStream input,
6948 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6949 throws java.io.IOException {
6950 return PARSER.parseDelimitedFrom(input, extensionRegistry);
6951 }
6952 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6953 com.google.protobuf.CodedInputStream input)
6954 throws java.io.IOException {
6955 return PARSER.parseFrom(input);
6956 }
6957 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6958 com.google.protobuf.CodedInputStream input,
6959 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6960 throws java.io.IOException {
6961 return PARSER.parseFrom(input, extensionRegistry);
6962 }
6963
6964 public static Builder newBuilder() { return Builder.create(); }
6965 public Builder newBuilderForType() { return newBuilder(); }
6966 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
6967 return newBuilder().mergeFrom(prototype);
6968 }
6969 public Builder toBuilder() { return newBuilder(this); }
6970
6971 @java.lang.Override
6972 protected Builder newBuilderForType(
6973 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6974 Builder builder = new Builder(parent);
6975 return builder;
6976 }
6977 /**
6978 * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6979 */
6980 public static final class Builder extends
6981 com.google.protobuf.GeneratedMessage.Builder<Builder>
6982 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
6983 public static final com.google.protobuf.Descriptors.Descriptor
6984 getDescriptor() {
6985 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6986 }
6987
6988 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6989 internalGetFieldAccessorTable() {
6990 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6991 .ensureFieldAccessorsInitialized(
6992 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6993 }
6994
6995 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
6996 private Builder() {
6997 maybeForceBuilderInitialization();
6998 }
6999
7000 private Builder(
7001 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7002 super(parent);
7003 maybeForceBuilderInitialization();
7004 }
7005 private void maybeForceBuilderInitialization() {
7006 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7007 }
7008 }
7009 private static Builder create() {
7010 return new Builder();
7011 }
7012
7013 public Builder clear() {
7014 super.clear();
7015 return this;
7016 }
7017
7018 public Builder clone() {
7019 return create().mergeFrom(buildPartial());
7020 }
7021
7022 public com.google.protobuf.Descriptors.Descriptor
7023 getDescriptorForType() {
7024 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
7025 }
7026
7027 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
7028 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
7029 }
7030
7031 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
7032 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
7033 if (!result.isInitialized()) {
7034 throw newUninitializedMessageException(result);
7035 }
7036 return result;
7037 }
7038
7039 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
7040 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
7041 onBuilt();
7042 return result;
7043 }
7044
7045 public Builder mergeFrom(com.google.protobuf.Message other) {
7046 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
7047 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
7048 } else {
7049 super.mergeFrom(other);
7050 return this;
7051 }
7052 }
7053
7054 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
7055 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
7056 this.mergeUnknownFields(other.getUnknownFields());
7057 return this;
7058 }
7059
7060 public final boolean isInitialized() {
7061 return true;
7062 }
7063
7064 public Builder mergeFrom(
7065 com.google.protobuf.CodedInputStream input,
7066 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7067 throws java.io.IOException {
7068 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null;
7069 try {
7070 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7071 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7072 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage();
7073 throw e;
7074 } finally {
7075 if (parsedMessage != null) {
7076 mergeFrom(parsedMessage);
7077 }
7078 }
7079 return this;
7080 }
7081
7082 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7083 }
7084
7085 static {
7086 defaultInstance = new FinalizeLogSegmentResponseProto(true);
7087 defaultInstance.initFields();
7088 }
7089
7090 // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7091 }
7092
7093 public interface PurgeLogsRequestProtoOrBuilder
7094 extends com.google.protobuf.MessageOrBuilder {
7095
7096 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7097 /**
7098 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7099 */
7100 boolean hasReqInfo();
7101 /**
7102 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7103 */
7104 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
7105 /**
7106 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7107 */
7108 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
7109
7110 // required uint64 minTxIdToKeep = 2;
7111 /**
7112 * <code>required uint64 minTxIdToKeep = 2;</code>
7113 */
7114 boolean hasMinTxIdToKeep();
7115 /**
7116 * <code>required uint64 minTxIdToKeep = 2;</code>
7117 */
7118 long getMinTxIdToKeep();
7119 }
7120 /**
7121 * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7122 *
7123 * <pre>
7124 **
7125 * purgeLogs()
7126 * </pre>
7127 */
7128 public static final class PurgeLogsRequestProto extends
7129 com.google.protobuf.GeneratedMessage
7130 implements PurgeLogsRequestProtoOrBuilder {
7131 // Use PurgeLogsRequestProto.newBuilder() to construct.
7132 private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7133 super(builder);
7134 this.unknownFields = builder.getUnknownFields();
7135 }
7136 private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7137
7138 private static final PurgeLogsRequestProto defaultInstance;
7139 public static PurgeLogsRequestProto getDefaultInstance() {
7140 return defaultInstance;
7141 }
7142
7143 public PurgeLogsRequestProto getDefaultInstanceForType() {
7144 return defaultInstance;
7145 }
7146
7147 private final com.google.protobuf.UnknownFieldSet unknownFields;
7148 @java.lang.Override
7149 public final com.google.protobuf.UnknownFieldSet
7150 getUnknownFields() {
7151 return this.unknownFields;
7152 }
7153 private PurgeLogsRequestProto(
7154 com.google.protobuf.CodedInputStream input,
7155 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7156 throws com.google.protobuf.InvalidProtocolBufferException {
7157 initFields();
7158 int mutable_bitField0_ = 0;
7159 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7160 com.google.protobuf.UnknownFieldSet.newBuilder();
7161 try {
7162 boolean done = false;
7163 while (!done) {
7164 int tag = input.readTag();
7165 switch (tag) {
7166 case 0:
7167 done = true;
7168 break;
7169 default: {
7170 if (!parseUnknownField(input, unknownFields,
7171 extensionRegistry, tag)) {
7172 done = true;
7173 }
7174 break;
7175 }
7176 case 10: {
7177 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
7178 if (((bitField0_ & 0x00000001) == 0x00000001)) {
7179 subBuilder = reqInfo_.toBuilder();
7180 }
7181 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
7182 if (subBuilder != null) {
7183 subBuilder.mergeFrom(reqInfo_);
7184 reqInfo_ = subBuilder.buildPartial();
7185 }
7186 bitField0_ |= 0x00000001;
7187 break;
7188 }
7189 case 16: {
7190 bitField0_ |= 0x00000002;
7191 minTxIdToKeep_ = input.readUInt64();
7192 break;
7193 }
7194 }
7195 }
7196 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7197 throw e.setUnfinishedMessage(this);
7198 } catch (java.io.IOException e) {
7199 throw new com.google.protobuf.InvalidProtocolBufferException(
7200 e.getMessage()).setUnfinishedMessage(this);
7201 } finally {
7202 this.unknownFields = unknownFields.build();
7203 makeExtensionsImmutable();
7204 }
7205 }
7206 public static final com.google.protobuf.Descriptors.Descriptor
7207 getDescriptor() {
7208 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7209 }
7210
7211 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7212 internalGetFieldAccessorTable() {
7213 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7214 .ensureFieldAccessorsInitialized(
7215 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7216 }
7217
7218 public static com.google.protobuf.Parser<PurgeLogsRequestProto> PARSER =
7219 new com.google.protobuf.AbstractParser<PurgeLogsRequestProto>() {
7220 public PurgeLogsRequestProto parsePartialFrom(
7221 com.google.protobuf.CodedInputStream input,
7222 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7223 throws com.google.protobuf.InvalidProtocolBufferException {
7224 return new PurgeLogsRequestProto(input, extensionRegistry);
7225 }
7226 };
7227
7228 @java.lang.Override
7229 public com.google.protobuf.Parser<PurgeLogsRequestProto> getParserForType() {
7230 return PARSER;
7231 }
7232
7233 private int bitField0_;
7234 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7235 public static final int REQINFO_FIELD_NUMBER = 1;
7236 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
7237 /**
7238 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7239 */
7240 public boolean hasReqInfo() {
7241 return ((bitField0_ & 0x00000001) == 0x00000001);
7242 }
7243 /**
7244 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7245 */
7246 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7247 return reqInfo_;
7248 }
7249 /**
7250 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7251 */
7252 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7253 return reqInfo_;
7254 }
7255
7256 // required uint64 minTxIdToKeep = 2;
7257 public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
7258 private long minTxIdToKeep_;
7259 /**
7260 * <code>required uint64 minTxIdToKeep = 2;</code>
7261 */
7262 public boolean hasMinTxIdToKeep() {
7263 return ((bitField0_ & 0x00000002) == 0x00000002);
7264 }
7265 /**
7266 * <code>required uint64 minTxIdToKeep = 2;</code>
7267 */
7268 public long getMinTxIdToKeep() {
7269 return minTxIdToKeep_;
7270 }
7271
7272 private void initFields() {
7273 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7274 minTxIdToKeep_ = 0L;
7275 }
7276 private byte memoizedIsInitialized = -1;
7277 public final boolean isInitialized() {
7278 byte isInitialized = memoizedIsInitialized;
7279 if (isInitialized != -1) return isInitialized == 1;
7280
7281 if (!hasReqInfo()) {
7282 memoizedIsInitialized = 0;
7283 return false;
7284 }
7285 if (!hasMinTxIdToKeep()) {
7286 memoizedIsInitialized = 0;
7287 return false;
7288 }
7289 if (!getReqInfo().isInitialized()) {
7290 memoizedIsInitialized = 0;
7291 return false;
7292 }
7293 memoizedIsInitialized = 1;
7294 return true;
7295 }
7296
7297 public void writeTo(com.google.protobuf.CodedOutputStream output)
7298 throws java.io.IOException {
7299 getSerializedSize();
7300 if (((bitField0_ & 0x00000001) == 0x00000001)) {
7301 output.writeMessage(1, reqInfo_);
7302 }
7303 if (((bitField0_ & 0x00000002) == 0x00000002)) {
7304 output.writeUInt64(2, minTxIdToKeep_);
7305 }
7306 getUnknownFields().writeTo(output);
7307 }
7308
7309 private int memoizedSerializedSize = -1;
7310 public int getSerializedSize() {
7311 int size = memoizedSerializedSize;
7312 if (size != -1) return size;
7313
7314 size = 0;
7315 if (((bitField0_ & 0x00000001) == 0x00000001)) {
7316 size += com.google.protobuf.CodedOutputStream
7317 .computeMessageSize(1, reqInfo_);
7318 }
7319 if (((bitField0_ & 0x00000002) == 0x00000002)) {
7320 size += com.google.protobuf.CodedOutputStream
7321 .computeUInt64Size(2, minTxIdToKeep_);
7322 }
7323 size += getUnknownFields().getSerializedSize();
7324 memoizedSerializedSize = size;
7325 return size;
7326 }
7327
7328 private static final long serialVersionUID = 0L;
7329 @java.lang.Override
7330 protected java.lang.Object writeReplace()
7331 throws java.io.ObjectStreamException {
7332 return super.writeReplace();
7333 }
7334
7335 @java.lang.Override
7336 public boolean equals(final java.lang.Object obj) {
7337 if (obj == this) {
7338 return true;
7339 }
7340 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
7341 return super.equals(obj);
7342 }
7343 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
7344
7345 boolean result = true;
7346 result = result && (hasReqInfo() == other.hasReqInfo());
7347 if (hasReqInfo()) {
7348 result = result && getReqInfo()
7349 .equals(other.getReqInfo());
7350 }
7351 result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
7352 if (hasMinTxIdToKeep()) {
7353 result = result && (getMinTxIdToKeep()
7354 == other.getMinTxIdToKeep());
7355 }
7356 result = result &&
7357 getUnknownFields().equals(other.getUnknownFields());
7358 return result;
7359 }
7360
7361 private int memoizedHashCode = 0;
7362 @java.lang.Override
7363 public int hashCode() {
7364 if (memoizedHashCode != 0) {
7365 return memoizedHashCode;
7366 }
7367 int hash = 41;
7368 hash = (19 * hash) + getDescriptorForType().hashCode();
7369 if (hasReqInfo()) {
7370 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
7371 hash = (53 * hash) + getReqInfo().hashCode();
7372 }
7373 if (hasMinTxIdToKeep()) {
7374 hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
7375 hash = (53 * hash) + hashLong(getMinTxIdToKeep());
7376 }
7377 hash = (29 * hash) + getUnknownFields().hashCode();
7378 memoizedHashCode = hash;
7379 return hash;
7380 }
7381
7382 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7383 com.google.protobuf.ByteString data)
7384 throws com.google.protobuf.InvalidProtocolBufferException {
7385 return PARSER.parseFrom(data);
7386 }
7387 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7388 com.google.protobuf.ByteString data,
7389 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7390 throws com.google.protobuf.InvalidProtocolBufferException {
7391 return PARSER.parseFrom(data, extensionRegistry);
7392 }
7393 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
7394 throws com.google.protobuf.InvalidProtocolBufferException {
7395 return PARSER.parseFrom(data);
7396 }
7397 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7398 byte[] data,
7399 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7400 throws com.google.protobuf.InvalidProtocolBufferException {
7401 return PARSER.parseFrom(data, extensionRegistry);
7402 }
7403 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
7404 throws java.io.IOException {
7405 return PARSER.parseFrom(input);
7406 }
7407 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7408 java.io.InputStream input,
7409 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7410 throws java.io.IOException {
7411 return PARSER.parseFrom(input, extensionRegistry);
7412 }
7413 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
7414 throws java.io.IOException {
7415 return PARSER.parseDelimitedFrom(input);
7416 }
7417 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
7418 java.io.InputStream input,
7419 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7420 throws java.io.IOException {
7421 return PARSER.parseDelimitedFrom(input, extensionRegistry);
7422 }
7423 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7424 com.google.protobuf.CodedInputStream input)
7425 throws java.io.IOException {
7426 return PARSER.parseFrom(input);
7427 }
7428 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7429 com.google.protobuf.CodedInputStream input,
7430 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7431 throws java.io.IOException {
7432 return PARSER.parseFrom(input, extensionRegistry);
7433 }
7434
7435 public static Builder newBuilder() { return Builder.create(); }
7436 public Builder newBuilderForType() { return newBuilder(); }
7437 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
7438 return newBuilder().mergeFrom(prototype);
7439 }
7440 public Builder toBuilder() { return newBuilder(this); }
7441
7442 @java.lang.Override
7443 protected Builder newBuilderForType(
7444 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7445 Builder builder = new Builder(parent);
7446 return builder;
7447 }
7448 /**
7449 * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7450 *
7451 * <pre>
7452 **
7453 * purgeLogs()
7454 * </pre>
7455 */
7456 public static final class Builder extends
7457 com.google.protobuf.GeneratedMessage.Builder<Builder>
7458 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
7459 public static final com.google.protobuf.Descriptors.Descriptor
7460 getDescriptor() {
7461 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7462 }
7463
7464 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7465 internalGetFieldAccessorTable() {
7466 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7467 .ensureFieldAccessorsInitialized(
7468 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7469 }
7470
7471 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
7472 private Builder() {
7473 maybeForceBuilderInitialization();
7474 }
7475
7476 private Builder(
7477 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7478 super(parent);
7479 maybeForceBuilderInitialization();
7480 }
7481 private void maybeForceBuilderInitialization() {
7482 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7483 getReqInfoFieldBuilder();
7484 }
7485 }
7486 private static Builder create() {
7487 return new Builder();
7488 }
7489
7490 public Builder clear() {
7491 super.clear();
7492 if (reqInfoBuilder_ == null) {
7493 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7494 } else {
7495 reqInfoBuilder_.clear();
7496 }
7497 bitField0_ = (bitField0_ & ~0x00000001);
7498 minTxIdToKeep_ = 0L;
7499 bitField0_ = (bitField0_ & ~0x00000002);
7500 return this;
7501 }
7502
7503 public Builder clone() {
7504 return create().mergeFrom(buildPartial());
7505 }
7506
7507 public com.google.protobuf.Descriptors.Descriptor
7508 getDescriptorForType() {
7509 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7510 }
7511
7512 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
7513 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
7514 }
7515
7516 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
7517 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
7518 if (!result.isInitialized()) {
7519 throw newUninitializedMessageException(result);
7520 }
7521 return result;
7522 }
7523
7524 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
7525 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
7526 int from_bitField0_ = bitField0_;
7527 int to_bitField0_ = 0;
7528 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7529 to_bitField0_ |= 0x00000001;
7530 }
7531 if (reqInfoBuilder_ == null) {
7532 result.reqInfo_ = reqInfo_;
7533 } else {
7534 result.reqInfo_ = reqInfoBuilder_.build();
7535 }
7536 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7537 to_bitField0_ |= 0x00000002;
7538 }
7539 result.minTxIdToKeep_ = minTxIdToKeep_;
7540 result.bitField0_ = to_bitField0_;
7541 onBuilt();
7542 return result;
7543 }
7544
7545 public Builder mergeFrom(com.google.protobuf.Message other) {
7546 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
7547 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
7548 } else {
7549 super.mergeFrom(other);
7550 return this;
7551 }
7552 }
7553
7554 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
7555 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
7556 if (other.hasReqInfo()) {
7557 mergeReqInfo(other.getReqInfo());
7558 }
7559 if (other.hasMinTxIdToKeep()) {
7560 setMinTxIdToKeep(other.getMinTxIdToKeep());
7561 }
7562 this.mergeUnknownFields(other.getUnknownFields());
7563 return this;
7564 }
7565
7566 public final boolean isInitialized() {
7567 if (!hasReqInfo()) {
7568
7569 return false;
7570 }
7571 if (!hasMinTxIdToKeep()) {
7572
7573 return false;
7574 }
7575 if (!getReqInfo().isInitialized()) {
7576
7577 return false;
7578 }
7579 return true;
7580 }
7581
7582 public Builder mergeFrom(
7583 com.google.protobuf.CodedInputStream input,
7584 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7585 throws java.io.IOException {
7586 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null;
7587 try {
7588 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7589 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7590 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage();
7591 throw e;
7592 } finally {
7593 if (parsedMessage != null) {
7594 mergeFrom(parsedMessage);
7595 }
7596 }
7597 return this;
7598 }
7599 private int bitField0_;
7600
7601 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7602 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7603 private com.google.protobuf.SingleFieldBuilder<
7604 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
7605 /**
7606 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7607 */
7608 public boolean hasReqInfo() {
7609 return ((bitField0_ & 0x00000001) == 0x00000001);
7610 }
7611 /**
7612 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7613 */
7614 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7615 if (reqInfoBuilder_ == null) {
7616 return reqInfo_;
7617 } else {
7618 return reqInfoBuilder_.getMessage();
7619 }
7620 }
7621 /**
7622 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7623 */
7624 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7625 if (reqInfoBuilder_ == null) {
7626 if (value == null) {
7627 throw new NullPointerException();
7628 }
7629 reqInfo_ = value;
7630 onChanged();
7631 } else {
7632 reqInfoBuilder_.setMessage(value);
7633 }
7634 bitField0_ |= 0x00000001;
7635 return this;
7636 }
7637 /**
7638 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7639 */
7640 public Builder setReqInfo(
7641 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
7642 if (reqInfoBuilder_ == null) {
7643 reqInfo_ = builderForValue.build();
7644 onChanged();
7645 } else {
7646 reqInfoBuilder_.setMessage(builderForValue.build());
7647 }
7648 bitField0_ |= 0x00000001;
7649 return this;
7650 }
7651 /**
7652 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7653 */
7654 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7655 if (reqInfoBuilder_ == null) {
7656 if (((bitField0_ & 0x00000001) == 0x00000001) &&
7657 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
7658 reqInfo_ =
7659 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
7660 } else {
7661 reqInfo_ = value;
7662 }
7663 onChanged();
7664 } else {
7665 reqInfoBuilder_.mergeFrom(value);
7666 }
7667 bitField0_ |= 0x00000001;
7668 return this;
7669 }
7670 /**
7671 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7672 */
7673 public Builder clearReqInfo() {
7674 if (reqInfoBuilder_ == null) {
7675 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7676 onChanged();
7677 } else {
7678 reqInfoBuilder_.clear();
7679 }
7680 bitField0_ = (bitField0_ & ~0x00000001);
7681 return this;
7682 }
7683 /**
7684 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7685 */
7686 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
7687 bitField0_ |= 0x00000001;
7688 onChanged();
7689 return getReqInfoFieldBuilder().getBuilder();
7690 }
7691 /**
7692 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7693 */
7694 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7695 if (reqInfoBuilder_ != null) {
7696 return reqInfoBuilder_.getMessageOrBuilder();
7697 } else {
7698 return reqInfo_;
7699 }
7700 }
7701 /**
7702 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7703 */
7704 private com.google.protobuf.SingleFieldBuilder<
7705 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
7706 getReqInfoFieldBuilder() {
7707 if (reqInfoBuilder_ == null) {
7708 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7709 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
7710 reqInfo_,
7711 getParentForChildren(),
7712 isClean());
7713 reqInfo_ = null;
7714 }
7715 return reqInfoBuilder_;
7716 }
7717
7718 // required uint64 minTxIdToKeep = 2;
7719 private long minTxIdToKeep_ ;
7720 /**
7721 * <code>required uint64 minTxIdToKeep = 2;</code>
7722 */
7723 public boolean hasMinTxIdToKeep() {
7724 return ((bitField0_ & 0x00000002) == 0x00000002);
7725 }
7726 /**
7727 * <code>required uint64 minTxIdToKeep = 2;</code>
7728 */
7729 public long getMinTxIdToKeep() {
7730 return minTxIdToKeep_;
7731 }
7732 /**
7733 * <code>required uint64 minTxIdToKeep = 2;</code>
7734 */
7735 public Builder setMinTxIdToKeep(long value) {
7736 bitField0_ |= 0x00000002;
7737 minTxIdToKeep_ = value;
7738 onChanged();
7739 return this;
7740 }
7741 /**
7742 * <code>required uint64 minTxIdToKeep = 2;</code>
7743 */
7744 public Builder clearMinTxIdToKeep() {
7745 bitField0_ = (bitField0_ & ~0x00000002);
7746 minTxIdToKeep_ = 0L;
7747 onChanged();
7748 return this;
7749 }
7750
7751 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsRequestProto)
7752 }
7753
7754 static {
7755 defaultInstance = new PurgeLogsRequestProto(true);
7756 defaultInstance.initFields();
7757 }
7758
7759 // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsRequestProto)
7760 }
7761
7762 public interface PurgeLogsResponseProtoOrBuilder
7763 extends com.google.protobuf.MessageOrBuilder {
7764 }
7765 /**
7766 * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7767 */
7768 public static final class PurgeLogsResponseProto extends
7769 com.google.protobuf.GeneratedMessage
7770 implements PurgeLogsResponseProtoOrBuilder {
7771 // Use PurgeLogsResponseProto.newBuilder() to construct.
7772 private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7773 super(builder);
7774 this.unknownFields = builder.getUnknownFields();
7775 }
7776 private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7777
7778 private static final PurgeLogsResponseProto defaultInstance;
7779 public static PurgeLogsResponseProto getDefaultInstance() {
7780 return defaultInstance;
7781 }
7782
7783 public PurgeLogsResponseProto getDefaultInstanceForType() {
7784 return defaultInstance;
7785 }
7786
7787 private final com.google.protobuf.UnknownFieldSet unknownFields;
7788 @java.lang.Override
7789 public final com.google.protobuf.UnknownFieldSet
7790 getUnknownFields() {
7791 return this.unknownFields;
7792 }
7793 private PurgeLogsResponseProto(
7794 com.google.protobuf.CodedInputStream input,
7795 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7796 throws com.google.protobuf.InvalidProtocolBufferException {
7797 initFields();
7798 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7799 com.google.protobuf.UnknownFieldSet.newBuilder();
7800 try {
7801 boolean done = false;
7802 while (!done) {
7803 int tag = input.readTag();
7804 switch (tag) {
7805 case 0:
7806 done = true;
7807 break;
7808 default: {
7809 if (!parseUnknownField(input, unknownFields,
7810 extensionRegistry, tag)) {
7811 done = true;
7812 }
7813 break;
7814 }
7815 }
7816 }
7817 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7818 throw e.setUnfinishedMessage(this);
7819 } catch (java.io.IOException e) {
7820 throw new com.google.protobuf.InvalidProtocolBufferException(
7821 e.getMessage()).setUnfinishedMessage(this);
7822 } finally {
7823 this.unknownFields = unknownFields.build();
7824 makeExtensionsImmutable();
7825 }
7826 }
7827 public static final com.google.protobuf.Descriptors.Descriptor
7828 getDescriptor() {
7829 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7830 }
7831
7832 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7833 internalGetFieldAccessorTable() {
7834 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7835 .ensureFieldAccessorsInitialized(
7836 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
7837 }
7838
7839 public static com.google.protobuf.Parser<PurgeLogsResponseProto> PARSER =
7840 new com.google.protobuf.AbstractParser<PurgeLogsResponseProto>() {
7841 public PurgeLogsResponseProto parsePartialFrom(
7842 com.google.protobuf.CodedInputStream input,
7843 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7844 throws com.google.protobuf.InvalidProtocolBufferException {
7845 return new PurgeLogsResponseProto(input, extensionRegistry);
7846 }
7847 };
7848
7849 @java.lang.Override
7850 public com.google.protobuf.Parser<PurgeLogsResponseProto> getParserForType() {
7851 return PARSER;
7852 }
7853
7854 private void initFields() {
7855 }
7856 private byte memoizedIsInitialized = -1;
7857 public final boolean isInitialized() {
7858 byte isInitialized = memoizedIsInitialized;
7859 if (isInitialized != -1) return isInitialized == 1;
7860
7861 memoizedIsInitialized = 1;
7862 return true;
7863 }
7864
7865 public void writeTo(com.google.protobuf.CodedOutputStream output)
7866 throws java.io.IOException {
7867 getSerializedSize();
7868 getUnknownFields().writeTo(output);
7869 }
7870
7871 private int memoizedSerializedSize = -1;
7872 public int getSerializedSize() {
7873 int size = memoizedSerializedSize;
7874 if (size != -1) return size;
7875
7876 size = 0;
7877 size += getUnknownFields().getSerializedSize();
7878 memoizedSerializedSize = size;
7879 return size;
7880 }
7881
7882 private static final long serialVersionUID = 0L;
7883 @java.lang.Override
7884 protected java.lang.Object writeReplace()
7885 throws java.io.ObjectStreamException {
7886 return super.writeReplace();
7887 }
7888
7889 @java.lang.Override
7890 public boolean equals(final java.lang.Object obj) {
7891 if (obj == this) {
7892 return true;
7893 }
7894 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
7895 return super.equals(obj);
7896 }
7897 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
7898
7899 boolean result = true;
7900 result = result &&
7901 getUnknownFields().equals(other.getUnknownFields());
7902 return result;
7903 }
7904
7905 private int memoizedHashCode = 0;
7906 @java.lang.Override
7907 public int hashCode() {
7908 if (memoizedHashCode != 0) {
7909 return memoizedHashCode;
7910 }
7911 int hash = 41;
7912 hash = (19 * hash) + getDescriptorForType().hashCode();
7913 hash = (29 * hash) + getUnknownFields().hashCode();
7914 memoizedHashCode = hash;
7915 return hash;
7916 }
7917
7918 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7919 com.google.protobuf.ByteString data)
7920 throws com.google.protobuf.InvalidProtocolBufferException {
7921 return PARSER.parseFrom(data);
7922 }
7923 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7924 com.google.protobuf.ByteString data,
7925 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7926 throws com.google.protobuf.InvalidProtocolBufferException {
7927 return PARSER.parseFrom(data, extensionRegistry);
7928 }
7929 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
7930 throws com.google.protobuf.InvalidProtocolBufferException {
7931 return PARSER.parseFrom(data);
7932 }
7933 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7934 byte[] data,
7935 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7936 throws com.google.protobuf.InvalidProtocolBufferException {
7937 return PARSER.parseFrom(data, extensionRegistry);
7938 }
7939 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
7940 throws java.io.IOException {
7941 return PARSER.parseFrom(input);
7942 }
7943 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7944 java.io.InputStream input,
7945 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7946 throws java.io.IOException {
7947 return PARSER.parseFrom(input, extensionRegistry);
7948 }
7949 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
7950 throws java.io.IOException {
7951 return PARSER.parseDelimitedFrom(input);
7952 }
7953 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
7954 java.io.InputStream input,
7955 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7956 throws java.io.IOException {
7957 return PARSER.parseDelimitedFrom(input, extensionRegistry);
7958 }
7959 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7960 com.google.protobuf.CodedInputStream input)
7961 throws java.io.IOException {
7962 return PARSER.parseFrom(input);
7963 }
7964 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7965 com.google.protobuf.CodedInputStream input,
7966 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7967 throws java.io.IOException {
7968 return PARSER.parseFrom(input, extensionRegistry);
7969 }
7970
7971 public static Builder newBuilder() { return Builder.create(); }
7972 public Builder newBuilderForType() { return newBuilder(); }
7973 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
7974 return newBuilder().mergeFrom(prototype);
7975 }
7976 public Builder toBuilder() { return newBuilder(this); }
7977
7978 @java.lang.Override
7979 protected Builder newBuilderForType(
7980 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7981 Builder builder = new Builder(parent);
7982 return builder;
7983 }
7984 /**
7985 * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7986 */
7987 public static final class Builder extends
7988 com.google.protobuf.GeneratedMessage.Builder<Builder>
7989 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
7990 public static final com.google.protobuf.Descriptors.Descriptor
7991 getDescriptor() {
7992 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7993 }
7994
7995 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7996 internalGetFieldAccessorTable() {
7997 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7998 .ensureFieldAccessorsInitialized(
7999 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
8000 }
8001
8002 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
8003 private Builder() {
8004 maybeForceBuilderInitialization();
8005 }
8006
8007 private Builder(
8008 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8009 super(parent);
8010 maybeForceBuilderInitialization();
8011 }
8012 private void maybeForceBuilderInitialization() {
8013 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8014 }
8015 }
8016 private static Builder create() {
8017 return new Builder();
8018 }
8019
8020 public Builder clear() {
8021 super.clear();
8022 return this;
8023 }
8024
8025 public Builder clone() {
8026 return create().mergeFrom(buildPartial());
8027 }
8028
8029 public com.google.protobuf.Descriptors.Descriptor
8030 getDescriptorForType() {
8031 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
8032 }
8033
8034 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
8035 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
8036 }
8037
8038 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
8039 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
8040 if (!result.isInitialized()) {
8041 throw newUninitializedMessageException(result);
8042 }
8043 return result;
8044 }
8045
8046 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
8047 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
8048 onBuilt();
8049 return result;
8050 }
8051
8052 public Builder mergeFrom(com.google.protobuf.Message other) {
8053 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
8054 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
8055 } else {
8056 super.mergeFrom(other);
8057 return this;
8058 }
8059 }
8060
8061 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
8062 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
8063 this.mergeUnknownFields(other.getUnknownFields());
8064 return this;
8065 }
8066
8067 public final boolean isInitialized() {
8068 return true;
8069 }
8070
8071 public Builder mergeFrom(
8072 com.google.protobuf.CodedInputStream input,
8073 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8074 throws java.io.IOException {
8075 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null;
8076 try {
8077 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8078 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8079 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage();
8080 throw e;
8081 } finally {
8082 if (parsedMessage != null) {
8083 mergeFrom(parsedMessage);
8084 }
8085 }
8086 return this;
8087 }
8088
8089 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsResponseProto)
8090 }
8091
8092 static {
8093 defaultInstance = new PurgeLogsResponseProto(true);
8094 defaultInstance.initFields();
8095 }
8096
8097 // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsResponseProto)
8098 }
8099
8100 public interface IsFormattedRequestProtoOrBuilder
8101 extends com.google.protobuf.MessageOrBuilder {
8102
8103 // required .hadoop.hdfs.JournalIdProto jid = 1;
8104 /**
8105 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8106 */
8107 boolean hasJid();
8108 /**
8109 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8110 */
8111 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8112 /**
8113 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8114 */
8115 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8116 }
8117 /**
8118 * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8119 *
8120 * <pre>
8121 **
8122 * isFormatted()
8123 * </pre>
8124 */
8125 public static final class IsFormattedRequestProto extends
8126 com.google.protobuf.GeneratedMessage
8127 implements IsFormattedRequestProtoOrBuilder {
8128 // Use IsFormattedRequestProto.newBuilder() to construct.
8129 private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8130 super(builder);
8131 this.unknownFields = builder.getUnknownFields();
8132 }
8133 private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8134
8135 private static final IsFormattedRequestProto defaultInstance;
8136 public static IsFormattedRequestProto getDefaultInstance() {
8137 return defaultInstance;
8138 }
8139
8140 public IsFormattedRequestProto getDefaultInstanceForType() {
8141 return defaultInstance;
8142 }
8143
8144 private final com.google.protobuf.UnknownFieldSet unknownFields;
8145 @java.lang.Override
8146 public final com.google.protobuf.UnknownFieldSet
8147 getUnknownFields() {
8148 return this.unknownFields;
8149 }
8150 private IsFormattedRequestProto(
8151 com.google.protobuf.CodedInputStream input,
8152 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8153 throws com.google.protobuf.InvalidProtocolBufferException {
8154 initFields();
8155 int mutable_bitField0_ = 0;
8156 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8157 com.google.protobuf.UnknownFieldSet.newBuilder();
8158 try {
8159 boolean done = false;
8160 while (!done) {
8161 int tag = input.readTag();
8162 switch (tag) {
8163 case 0:
8164 done = true;
8165 break;
8166 default: {
8167 if (!parseUnknownField(input, unknownFields,
8168 extensionRegistry, tag)) {
8169 done = true;
8170 }
8171 break;
8172 }
8173 case 10: {
8174 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
8175 if (((bitField0_ & 0x00000001) == 0x00000001)) {
8176 subBuilder = jid_.toBuilder();
8177 }
8178 jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
8179 if (subBuilder != null) {
8180 subBuilder.mergeFrom(jid_);
8181 jid_ = subBuilder.buildPartial();
8182 }
8183 bitField0_ |= 0x00000001;
8184 break;
8185 }
8186 }
8187 }
8188 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8189 throw e.setUnfinishedMessage(this);
8190 } catch (java.io.IOException e) {
8191 throw new com.google.protobuf.InvalidProtocolBufferException(
8192 e.getMessage()).setUnfinishedMessage(this);
8193 } finally {
8194 this.unknownFields = unknownFields.build();
8195 makeExtensionsImmutable();
8196 }
8197 }
8198 public static final com.google.protobuf.Descriptors.Descriptor
8199 getDescriptor() {
8200 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8201 }
8202
8203 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8204 internalGetFieldAccessorTable() {
8205 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8206 .ensureFieldAccessorsInitialized(
8207 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8208 }
8209
8210 public static com.google.protobuf.Parser<IsFormattedRequestProto> PARSER =
8211 new com.google.protobuf.AbstractParser<IsFormattedRequestProto>() {
8212 public IsFormattedRequestProto parsePartialFrom(
8213 com.google.protobuf.CodedInputStream input,
8214 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8215 throws com.google.protobuf.InvalidProtocolBufferException {
8216 return new IsFormattedRequestProto(input, extensionRegistry);
8217 }
8218 };
8219
8220 @java.lang.Override
8221 public com.google.protobuf.Parser<IsFormattedRequestProto> getParserForType() {
8222 return PARSER;
8223 }
8224
8225 private int bitField0_;
8226 // required .hadoop.hdfs.JournalIdProto jid = 1;
8227 public static final int JID_FIELD_NUMBER = 1;
8228 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8229 /**
8230 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8231 */
8232 public boolean hasJid() {
8233 return ((bitField0_ & 0x00000001) == 0x00000001);
8234 }
8235 /**
8236 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8237 */
8238 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8239 return jid_;
8240 }
8241 /**
8242 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8243 */
8244 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8245 return jid_;
8246 }
8247
8248 private void initFields() {
8249 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8250 }
8251 private byte memoizedIsInitialized = -1;
8252 public final boolean isInitialized() {
8253 byte isInitialized = memoizedIsInitialized;
8254 if (isInitialized != -1) return isInitialized == 1;
8255
8256 if (!hasJid()) {
8257 memoizedIsInitialized = 0;
8258 return false;
8259 }
8260 if (!getJid().isInitialized()) {
8261 memoizedIsInitialized = 0;
8262 return false;
8263 }
8264 memoizedIsInitialized = 1;
8265 return true;
8266 }
8267
8268 public void writeTo(com.google.protobuf.CodedOutputStream output)
8269 throws java.io.IOException {
8270 getSerializedSize();
8271 if (((bitField0_ & 0x00000001) == 0x00000001)) {
8272 output.writeMessage(1, jid_);
8273 }
8274 getUnknownFields().writeTo(output);
8275 }
8276
8277 private int memoizedSerializedSize = -1;
8278 public int getSerializedSize() {
8279 int size = memoizedSerializedSize;
8280 if (size != -1) return size;
8281
8282 size = 0;
8283 if (((bitField0_ & 0x00000001) == 0x00000001)) {
8284 size += com.google.protobuf.CodedOutputStream
8285 .computeMessageSize(1, jid_);
8286 }
8287 size += getUnknownFields().getSerializedSize();
8288 memoizedSerializedSize = size;
8289 return size;
8290 }
8291
8292 private static final long serialVersionUID = 0L;
8293 @java.lang.Override
8294 protected java.lang.Object writeReplace()
8295 throws java.io.ObjectStreamException {
8296 return super.writeReplace();
8297 }
8298
8299 @java.lang.Override
8300 public boolean equals(final java.lang.Object obj) {
8301 if (obj == this) {
8302 return true;
8303 }
8304 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
8305 return super.equals(obj);
8306 }
8307 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
8308
8309 boolean result = true;
8310 result = result && (hasJid() == other.hasJid());
8311 if (hasJid()) {
8312 result = result && getJid()
8313 .equals(other.getJid());
8314 }
8315 result = result &&
8316 getUnknownFields().equals(other.getUnknownFields());
8317 return result;
8318 }
8319
8320 private int memoizedHashCode = 0;
8321 @java.lang.Override
8322 public int hashCode() {
8323 if (memoizedHashCode != 0) {
8324 return memoizedHashCode;
8325 }
8326 int hash = 41;
8327 hash = (19 * hash) + getDescriptorForType().hashCode();
8328 if (hasJid()) {
8329 hash = (37 * hash) + JID_FIELD_NUMBER;
8330 hash = (53 * hash) + getJid().hashCode();
8331 }
8332 hash = (29 * hash) + getUnknownFields().hashCode();
8333 memoizedHashCode = hash;
8334 return hash;
8335 }
8336
8337 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8338 com.google.protobuf.ByteString data)
8339 throws com.google.protobuf.InvalidProtocolBufferException {
8340 return PARSER.parseFrom(data);
8341 }
8342 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8343 com.google.protobuf.ByteString data,
8344 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8345 throws com.google.protobuf.InvalidProtocolBufferException {
8346 return PARSER.parseFrom(data, extensionRegistry);
8347 }
8348 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
8349 throws com.google.protobuf.InvalidProtocolBufferException {
8350 return PARSER.parseFrom(data);
8351 }
8352 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8353 byte[] data,
8354 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8355 throws com.google.protobuf.InvalidProtocolBufferException {
8356 return PARSER.parseFrom(data, extensionRegistry);
8357 }
8358 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
8359 throws java.io.IOException {
8360 return PARSER.parseFrom(input);
8361 }
8362 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8363 java.io.InputStream input,
8364 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8365 throws java.io.IOException {
8366 return PARSER.parseFrom(input, extensionRegistry);
8367 }
8368 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
8369 throws java.io.IOException {
8370 return PARSER.parseDelimitedFrom(input);
8371 }
8372 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
8373 java.io.InputStream input,
8374 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8375 throws java.io.IOException {
8376 return PARSER.parseDelimitedFrom(input, extensionRegistry);
8377 }
8378 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8379 com.google.protobuf.CodedInputStream input)
8380 throws java.io.IOException {
8381 return PARSER.parseFrom(input);
8382 }
8383 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8384 com.google.protobuf.CodedInputStream input,
8385 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8386 throws java.io.IOException {
8387 return PARSER.parseFrom(input, extensionRegistry);
8388 }
8389
8390 public static Builder newBuilder() { return Builder.create(); }
8391 public Builder newBuilderForType() { return newBuilder(); }
8392 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
8393 return newBuilder().mergeFrom(prototype);
8394 }
8395 public Builder toBuilder() { return newBuilder(this); }
8396
8397 @java.lang.Override
8398 protected Builder newBuilderForType(
8399 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8400 Builder builder = new Builder(parent);
8401 return builder;
8402 }
8403 /**
8404 * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8405 *
8406 * <pre>
8407 **
8408 * isFormatted()
8409 * </pre>
8410 */
8411 public static final class Builder extends
8412 com.google.protobuf.GeneratedMessage.Builder<Builder>
8413 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
8414 public static final com.google.protobuf.Descriptors.Descriptor
8415 getDescriptor() {
8416 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8417 }
8418
8419 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8420 internalGetFieldAccessorTable() {
8421 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8422 .ensureFieldAccessorsInitialized(
8423 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8424 }
8425
8426 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
8427 private Builder() {
8428 maybeForceBuilderInitialization();
8429 }
8430
8431 private Builder(
8432 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8433 super(parent);
8434 maybeForceBuilderInitialization();
8435 }
8436 private void maybeForceBuilderInitialization() {
8437 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8438 getJidFieldBuilder();
8439 }
8440 }
8441 private static Builder create() {
8442 return new Builder();
8443 }
8444
8445 public Builder clear() {
8446 super.clear();
8447 if (jidBuilder_ == null) {
8448 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8449 } else {
8450 jidBuilder_.clear();
8451 }
8452 bitField0_ = (bitField0_ & ~0x00000001);
8453 return this;
8454 }
8455
8456 public Builder clone() {
8457 return create().mergeFrom(buildPartial());
8458 }
8459
8460 public com.google.protobuf.Descriptors.Descriptor
8461 getDescriptorForType() {
8462 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8463 }
8464
8465 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
8466 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
8467 }
8468
8469 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
8470 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
8471 if (!result.isInitialized()) {
8472 throw newUninitializedMessageException(result);
8473 }
8474 return result;
8475 }
8476
8477 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
8478 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
8479 int from_bitField0_ = bitField0_;
8480 int to_bitField0_ = 0;
8481 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8482 to_bitField0_ |= 0x00000001;
8483 }
8484 if (jidBuilder_ == null) {
8485 result.jid_ = jid_;
8486 } else {
8487 result.jid_ = jidBuilder_.build();
8488 }
8489 result.bitField0_ = to_bitField0_;
8490 onBuilt();
8491 return result;
8492 }
8493
8494 public Builder mergeFrom(com.google.protobuf.Message other) {
8495 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
8496 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
8497 } else {
8498 super.mergeFrom(other);
8499 return this;
8500 }
8501 }
8502
8503 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
8504 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
8505 if (other.hasJid()) {
8506 mergeJid(other.getJid());
8507 }
8508 this.mergeUnknownFields(other.getUnknownFields());
8509 return this;
8510 }
8511
8512 public final boolean isInitialized() {
8513 if (!hasJid()) {
8514
8515 return false;
8516 }
8517 if (!getJid().isInitialized()) {
8518
8519 return false;
8520 }
8521 return true;
8522 }
8523
8524 public Builder mergeFrom(
8525 com.google.protobuf.CodedInputStream input,
8526 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8527 throws java.io.IOException {
8528 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null;
8529 try {
8530 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8531 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8532 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage();
8533 throw e;
8534 } finally {
8535 if (parsedMessage != null) {
8536 mergeFrom(parsedMessage);
8537 }
8538 }
8539 return this;
8540 }
8541 private int bitField0_;
8542
8543 // required .hadoop.hdfs.JournalIdProto jid = 1;
8544 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8545 private com.google.protobuf.SingleFieldBuilder<
8546 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8547 /**
8548 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8549 */
8550 public boolean hasJid() {
8551 return ((bitField0_ & 0x00000001) == 0x00000001);
8552 }
8553 /**
8554 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8555 */
8556 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8557 if (jidBuilder_ == null) {
8558 return jid_;
8559 } else {
8560 return jidBuilder_.getMessage();
8561 }
8562 }
8563 /**
8564 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8565 */
8566 public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8567 if (jidBuilder_ == null) {
8568 if (value == null) {
8569 throw new NullPointerException();
8570 }
8571 jid_ = value;
8572 onChanged();
8573 } else {
8574 jidBuilder_.setMessage(value);
8575 }
8576 bitField0_ |= 0x00000001;
8577 return this;
8578 }
8579 /**
8580 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8581 */
8582 public Builder setJid(
8583 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8584 if (jidBuilder_ == null) {
8585 jid_ = builderForValue.build();
8586 onChanged();
8587 } else {
8588 jidBuilder_.setMessage(builderForValue.build());
8589 }
8590 bitField0_ |= 0x00000001;
8591 return this;
8592 }
8593 /**
8594 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8595 */
8596 public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8597 if (jidBuilder_ == null) {
8598 if (((bitField0_ & 0x00000001) == 0x00000001) &&
8599 jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8600 jid_ =
8601 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8602 } else {
8603 jid_ = value;
8604 }
8605 onChanged();
8606 } else {
8607 jidBuilder_.mergeFrom(value);
8608 }
8609 bitField0_ |= 0x00000001;
8610 return this;
8611 }
8612 /**
8613 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8614 */
8615 public Builder clearJid() {
8616 if (jidBuilder_ == null) {
8617 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8618 onChanged();
8619 } else {
8620 jidBuilder_.clear();
8621 }
8622 bitField0_ = (bitField0_ & ~0x00000001);
8623 return this;
8624 }
8625 /**
8626 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8627 */
8628 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8629 bitField0_ |= 0x00000001;
8630 onChanged();
8631 return getJidFieldBuilder().getBuilder();
8632 }
8633 /**
8634 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8635 */
8636 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8637 if (jidBuilder_ != null) {
8638 return jidBuilder_.getMessageOrBuilder();
8639 } else {
8640 return jid_;
8641 }
8642 }
8643 /**
8644 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8645 */
8646 private com.google.protobuf.SingleFieldBuilder<
8647 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
8648 getJidFieldBuilder() {
8649 if (jidBuilder_ == null) {
8650 jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8651 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8652 jid_,
8653 getParentForChildren(),
8654 isClean());
8655 jid_ = null;
8656 }
8657 return jidBuilder_;
8658 }
8659
8660 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedRequestProto)
8661 }
8662
8663 static {
8664 defaultInstance = new IsFormattedRequestProto(true);
8665 defaultInstance.initFields();
8666 }
8667
8668 // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedRequestProto)
8669 }
8670
8671 public interface IsFormattedResponseProtoOrBuilder
8672 extends com.google.protobuf.MessageOrBuilder {
8673
8674 // required bool isFormatted = 1;
8675 /**
8676 * <code>required bool isFormatted = 1;</code>
8677 */
8678 boolean hasIsFormatted();
8679 /**
8680 * <code>required bool isFormatted = 1;</code>
8681 */
8682 boolean getIsFormatted();
8683 }
8684 /**
8685 * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8686 */
8687 public static final class IsFormattedResponseProto extends
8688 com.google.protobuf.GeneratedMessage
8689 implements IsFormattedResponseProtoOrBuilder {
8690 // Use IsFormattedResponseProto.newBuilder() to construct.
8691 private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8692 super(builder);
8693 this.unknownFields = builder.getUnknownFields();
8694 }
8695 private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8696
8697 private static final IsFormattedResponseProto defaultInstance;
8698 public static IsFormattedResponseProto getDefaultInstance() {
8699 return defaultInstance;
8700 }
8701
8702 public IsFormattedResponseProto getDefaultInstanceForType() {
8703 return defaultInstance;
8704 }
8705
8706 private final com.google.protobuf.UnknownFieldSet unknownFields;
8707 @java.lang.Override
8708 public final com.google.protobuf.UnknownFieldSet
8709 getUnknownFields() {
8710 return this.unknownFields;
8711 }
8712 private IsFormattedResponseProto(
8713 com.google.protobuf.CodedInputStream input,
8714 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8715 throws com.google.protobuf.InvalidProtocolBufferException {
8716 initFields();
8717 int mutable_bitField0_ = 0;
8718 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8719 com.google.protobuf.UnknownFieldSet.newBuilder();
8720 try {
8721 boolean done = false;
8722 while (!done) {
8723 int tag = input.readTag();
8724 switch (tag) {
8725 case 0:
8726 done = true;
8727 break;
8728 default: {
8729 if (!parseUnknownField(input, unknownFields,
8730 extensionRegistry, tag)) {
8731 done = true;
8732 }
8733 break;
8734 }
8735 case 8: {
8736 bitField0_ |= 0x00000001;
8737 isFormatted_ = input.readBool();
8738 break;
8739 }
8740 }
8741 }
8742 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8743 throw e.setUnfinishedMessage(this);
8744 } catch (java.io.IOException e) {
8745 throw new com.google.protobuf.InvalidProtocolBufferException(
8746 e.getMessage()).setUnfinishedMessage(this);
8747 } finally {
8748 this.unknownFields = unknownFields.build();
8749 makeExtensionsImmutable();
8750 }
8751 }
8752 public static final com.google.protobuf.Descriptors.Descriptor
8753 getDescriptor() {
8754 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8755 }
8756
8757 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8758 internalGetFieldAccessorTable() {
8759 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8760 .ensureFieldAccessorsInitialized(
8761 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8762 }
8763
8764 public static com.google.protobuf.Parser<IsFormattedResponseProto> PARSER =
8765 new com.google.protobuf.AbstractParser<IsFormattedResponseProto>() {
8766 public IsFormattedResponseProto parsePartialFrom(
8767 com.google.protobuf.CodedInputStream input,
8768 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8769 throws com.google.protobuf.InvalidProtocolBufferException {
8770 return new IsFormattedResponseProto(input, extensionRegistry);
8771 }
8772 };
8773
8774 @java.lang.Override
8775 public com.google.protobuf.Parser<IsFormattedResponseProto> getParserForType() {
8776 return PARSER;
8777 }
8778
8779 private int bitField0_;
8780 // required bool isFormatted = 1;
8781 public static final int ISFORMATTED_FIELD_NUMBER = 1;
8782 private boolean isFormatted_;
8783 /**
8784 * <code>required bool isFormatted = 1;</code>
8785 */
8786 public boolean hasIsFormatted() {
8787 return ((bitField0_ & 0x00000001) == 0x00000001);
8788 }
8789 /**
8790 * <code>required bool isFormatted = 1;</code>
8791 */
8792 public boolean getIsFormatted() {
8793 return isFormatted_;
8794 }
8795
8796 private void initFields() {
8797 isFormatted_ = false;
8798 }
8799 private byte memoizedIsInitialized = -1;
8800 public final boolean isInitialized() {
8801 byte isInitialized = memoizedIsInitialized;
8802 if (isInitialized != -1) return isInitialized == 1;
8803
8804 if (!hasIsFormatted()) {
8805 memoizedIsInitialized = 0;
8806 return false;
8807 }
8808 memoizedIsInitialized = 1;
8809 return true;
8810 }
8811
8812 public void writeTo(com.google.protobuf.CodedOutputStream output)
8813 throws java.io.IOException {
8814 getSerializedSize();
8815 if (((bitField0_ & 0x00000001) == 0x00000001)) {
8816 output.writeBool(1, isFormatted_);
8817 }
8818 getUnknownFields().writeTo(output);
8819 }
8820
8821 private int memoizedSerializedSize = -1;
8822 public int getSerializedSize() {
8823 int size = memoizedSerializedSize;
8824 if (size != -1) return size;
8825
8826 size = 0;
8827 if (((bitField0_ & 0x00000001) == 0x00000001)) {
8828 size += com.google.protobuf.CodedOutputStream
8829 .computeBoolSize(1, isFormatted_);
8830 }
8831 size += getUnknownFields().getSerializedSize();
8832 memoizedSerializedSize = size;
8833 return size;
8834 }
8835
8836 private static final long serialVersionUID = 0L;
8837 @java.lang.Override
8838 protected java.lang.Object writeReplace()
8839 throws java.io.ObjectStreamException {
8840 return super.writeReplace();
8841 }
8842
8843 @java.lang.Override
8844 public boolean equals(final java.lang.Object obj) {
8845 if (obj == this) {
8846 return true;
8847 }
8848 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
8849 return super.equals(obj);
8850 }
8851 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
8852
8853 boolean result = true;
8854 result = result && (hasIsFormatted() == other.hasIsFormatted());
8855 if (hasIsFormatted()) {
8856 result = result && (getIsFormatted()
8857 == other.getIsFormatted());
8858 }
8859 result = result &&
8860 getUnknownFields().equals(other.getUnknownFields());
8861 return result;
8862 }
8863
8864 private int memoizedHashCode = 0;
8865 @java.lang.Override
8866 public int hashCode() {
8867 if (memoizedHashCode != 0) {
8868 return memoizedHashCode;
8869 }
8870 int hash = 41;
8871 hash = (19 * hash) + getDescriptorForType().hashCode();
8872 if (hasIsFormatted()) {
8873 hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
8874 hash = (53 * hash) + hashBoolean(getIsFormatted());
8875 }
8876 hash = (29 * hash) + getUnknownFields().hashCode();
8877 memoizedHashCode = hash;
8878 return hash;
8879 }
8880
8881 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8882 com.google.protobuf.ByteString data)
8883 throws com.google.protobuf.InvalidProtocolBufferException {
8884 return PARSER.parseFrom(data);
8885 }
8886 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8887 com.google.protobuf.ByteString data,
8888 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8889 throws com.google.protobuf.InvalidProtocolBufferException {
8890 return PARSER.parseFrom(data, extensionRegistry);
8891 }
8892 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
8893 throws com.google.protobuf.InvalidProtocolBufferException {
8894 return PARSER.parseFrom(data);
8895 }
8896 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8897 byte[] data,
8898 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8899 throws com.google.protobuf.InvalidProtocolBufferException {
8900 return PARSER.parseFrom(data, extensionRegistry);
8901 }
8902 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
8903 throws java.io.IOException {
8904 return PARSER.parseFrom(input);
8905 }
8906 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8907 java.io.InputStream input,
8908 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8909 throws java.io.IOException {
8910 return PARSER.parseFrom(input, extensionRegistry);
8911 }
8912 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
8913 throws java.io.IOException {
8914 return PARSER.parseDelimitedFrom(input);
8915 }
8916 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
8917 java.io.InputStream input,
8918 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8919 throws java.io.IOException {
8920 return PARSER.parseDelimitedFrom(input, extensionRegistry);
8921 }
8922 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8923 com.google.protobuf.CodedInputStream input)
8924 throws java.io.IOException {
8925 return PARSER.parseFrom(input);
8926 }
8927 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8928 com.google.protobuf.CodedInputStream input,
8929 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8930 throws java.io.IOException {
8931 return PARSER.parseFrom(input, extensionRegistry);
8932 }
8933
8934 public static Builder newBuilder() { return Builder.create(); }
8935 public Builder newBuilderForType() { return newBuilder(); }
8936 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
8937 return newBuilder().mergeFrom(prototype);
8938 }
8939 public Builder toBuilder() { return newBuilder(this); }
8940
8941 @java.lang.Override
8942 protected Builder newBuilderForType(
8943 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8944 Builder builder = new Builder(parent);
8945 return builder;
8946 }
8947 /**
8948 * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8949 */
8950 public static final class Builder extends
8951 com.google.protobuf.GeneratedMessage.Builder<Builder>
8952 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
8953 public static final com.google.protobuf.Descriptors.Descriptor
8954 getDescriptor() {
8955 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8956 }
8957
8958 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8959 internalGetFieldAccessorTable() {
8960 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8961 .ensureFieldAccessorsInitialized(
8962 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8963 }
8964
8965 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
8966 private Builder() {
8967 maybeForceBuilderInitialization();
8968 }
8969
8970 private Builder(
8971 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8972 super(parent);
8973 maybeForceBuilderInitialization();
8974 }
8975 private void maybeForceBuilderInitialization() {
8976 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8977 }
8978 }
8979 private static Builder create() {
8980 return new Builder();
8981 }
8982
8983 public Builder clear() {
8984 super.clear();
8985 isFormatted_ = false;
8986 bitField0_ = (bitField0_ & ~0x00000001);
8987 return this;
8988 }
8989
8990 public Builder clone() {
8991 return create().mergeFrom(buildPartial());
8992 }
8993
8994 public com.google.protobuf.Descriptors.Descriptor
8995 getDescriptorForType() {
8996 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8997 }
8998
8999 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
9000 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
9001 }
9002
9003 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
9004 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
9005 if (!result.isInitialized()) {
9006 throw newUninitializedMessageException(result);
9007 }
9008 return result;
9009 }
9010
9011 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
9012 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
9013 int from_bitField0_ = bitField0_;
9014 int to_bitField0_ = 0;
9015 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9016 to_bitField0_ |= 0x00000001;
9017 }
9018 result.isFormatted_ = isFormatted_;
9019 result.bitField0_ = to_bitField0_;
9020 onBuilt();
9021 return result;
9022 }
9023
9024 public Builder mergeFrom(com.google.protobuf.Message other) {
9025 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
9026 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
9027 } else {
9028 super.mergeFrom(other);
9029 return this;
9030 }
9031 }
9032
9033 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
9034 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
9035 if (other.hasIsFormatted()) {
9036 setIsFormatted(other.getIsFormatted());
9037 }
9038 this.mergeUnknownFields(other.getUnknownFields());
9039 return this;
9040 }
9041
9042 public final boolean isInitialized() {
9043 if (!hasIsFormatted()) {
9044
9045 return false;
9046 }
9047 return true;
9048 }
9049
9050 public Builder mergeFrom(
9051 com.google.protobuf.CodedInputStream input,
9052 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9053 throws java.io.IOException {
9054 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null;
9055 try {
9056 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9057 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9058 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage();
9059 throw e;
9060 } finally {
9061 if (parsedMessage != null) {
9062 mergeFrom(parsedMessage);
9063 }
9064 }
9065 return this;
9066 }
9067 private int bitField0_;
9068
9069 // required bool isFormatted = 1;
9070 private boolean isFormatted_ ;
9071 /**
9072 * <code>required bool isFormatted = 1;</code>
9073 */
9074 public boolean hasIsFormatted() {
9075 return ((bitField0_ & 0x00000001) == 0x00000001);
9076 }
9077 /**
9078 * <code>required bool isFormatted = 1;</code>
9079 */
9080 public boolean getIsFormatted() {
9081 return isFormatted_;
9082 }
9083 /**
9084 * <code>required bool isFormatted = 1;</code>
9085 */
9086 public Builder setIsFormatted(boolean value) {
9087 bitField0_ |= 0x00000001;
9088 isFormatted_ = value;
9089 onChanged();
9090 return this;
9091 }
9092 /**
9093 * <code>required bool isFormatted = 1;</code>
9094 */
9095 public Builder clearIsFormatted() {
9096 bitField0_ = (bitField0_ & ~0x00000001);
9097 isFormatted_ = false;
9098 onChanged();
9099 return this;
9100 }
9101
9102 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedResponseProto)
9103 }
9104
9105 static {
9106 defaultInstance = new IsFormattedResponseProto(true);
9107 defaultInstance.initFields();
9108 }
9109
9110 // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedResponseProto)
9111 }
9112
9113 public interface GetJournalStateRequestProtoOrBuilder
9114 extends com.google.protobuf.MessageOrBuilder {
9115
9116 // required .hadoop.hdfs.JournalIdProto jid = 1;
9117 /**
9118 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9119 */
9120 boolean hasJid();
9121 /**
9122 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9123 */
9124 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9125 /**
9126 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9127 */
9128 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9129 }
9130 /**
9131 * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9132 *
9133 * <pre>
9134 **
9135 * getJournalState()
9136 * </pre>
9137 */
9138 public static final class GetJournalStateRequestProto extends
9139 com.google.protobuf.GeneratedMessage
9140 implements GetJournalStateRequestProtoOrBuilder {
9141 // Use GetJournalStateRequestProto.newBuilder() to construct.
9142 private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9143 super(builder);
9144 this.unknownFields = builder.getUnknownFields();
9145 }
9146 private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9147
9148 private static final GetJournalStateRequestProto defaultInstance;
9149 public static GetJournalStateRequestProto getDefaultInstance() {
9150 return defaultInstance;
9151 }
9152
9153 public GetJournalStateRequestProto getDefaultInstanceForType() {
9154 return defaultInstance;
9155 }
9156
9157 private final com.google.protobuf.UnknownFieldSet unknownFields;
9158 @java.lang.Override
9159 public final com.google.protobuf.UnknownFieldSet
9160 getUnknownFields() {
9161 return this.unknownFields;
9162 }
9163 private GetJournalStateRequestProto(
9164 com.google.protobuf.CodedInputStream input,
9165 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9166 throws com.google.protobuf.InvalidProtocolBufferException {
9167 initFields();
9168 int mutable_bitField0_ = 0;
9169 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9170 com.google.protobuf.UnknownFieldSet.newBuilder();
9171 try {
9172 boolean done = false;
9173 while (!done) {
9174 int tag = input.readTag();
9175 switch (tag) {
9176 case 0:
9177 done = true;
9178 break;
9179 default: {
9180 if (!parseUnknownField(input, unknownFields,
9181 extensionRegistry, tag)) {
9182 done = true;
9183 }
9184 break;
9185 }
9186 case 10: {
9187 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
9188 if (((bitField0_ & 0x00000001) == 0x00000001)) {
9189 subBuilder = jid_.toBuilder();
9190 }
9191 jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
9192 if (subBuilder != null) {
9193 subBuilder.mergeFrom(jid_);
9194 jid_ = subBuilder.buildPartial();
9195 }
9196 bitField0_ |= 0x00000001;
9197 break;
9198 }
9199 }
9200 }
9201 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9202 throw e.setUnfinishedMessage(this);
9203 } catch (java.io.IOException e) {
9204 throw new com.google.protobuf.InvalidProtocolBufferException(
9205 e.getMessage()).setUnfinishedMessage(this);
9206 } finally {
9207 this.unknownFields = unknownFields.build();
9208 makeExtensionsImmutable();
9209 }
9210 }
9211 public static final com.google.protobuf.Descriptors.Descriptor
9212 getDescriptor() {
9213 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9214 }
9215
9216 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9217 internalGetFieldAccessorTable() {
9218 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9219 .ensureFieldAccessorsInitialized(
9220 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9221 }
9222
9223 public static com.google.protobuf.Parser<GetJournalStateRequestProto> PARSER =
9224 new com.google.protobuf.AbstractParser<GetJournalStateRequestProto>() {
9225 public GetJournalStateRequestProto parsePartialFrom(
9226 com.google.protobuf.CodedInputStream input,
9227 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9228 throws com.google.protobuf.InvalidProtocolBufferException {
9229 return new GetJournalStateRequestProto(input, extensionRegistry);
9230 }
9231 };
9232
9233 @java.lang.Override
9234 public com.google.protobuf.Parser<GetJournalStateRequestProto> getParserForType() {
9235 return PARSER;
9236 }
9237
9238 private int bitField0_;
9239 // required .hadoop.hdfs.JournalIdProto jid = 1;
9240 public static final int JID_FIELD_NUMBER = 1;
9241 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9242 /**
9243 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9244 */
9245 public boolean hasJid() {
9246 return ((bitField0_ & 0x00000001) == 0x00000001);
9247 }
9248 /**
9249 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9250 */
9251 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9252 return jid_;
9253 }
9254 /**
9255 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9256 */
9257 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9258 return jid_;
9259 }
9260
9261 private void initFields() {
9262 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9263 }
9264 private byte memoizedIsInitialized = -1;
9265 public final boolean isInitialized() {
9266 byte isInitialized = memoizedIsInitialized;
9267 if (isInitialized != -1) return isInitialized == 1;
9268
9269 if (!hasJid()) {
9270 memoizedIsInitialized = 0;
9271 return false;
9272 }
9273 if (!getJid().isInitialized()) {
9274 memoizedIsInitialized = 0;
9275 return false;
9276 }
9277 memoizedIsInitialized = 1;
9278 return true;
9279 }
9280
9281 public void writeTo(com.google.protobuf.CodedOutputStream output)
9282 throws java.io.IOException {
9283 getSerializedSize();
9284 if (((bitField0_ & 0x00000001) == 0x00000001)) {
9285 output.writeMessage(1, jid_);
9286 }
9287 getUnknownFields().writeTo(output);
9288 }
9289
9290 private int memoizedSerializedSize = -1;
9291 public int getSerializedSize() {
9292 int size = memoizedSerializedSize;
9293 if (size != -1) return size;
9294
9295 size = 0;
9296 if (((bitField0_ & 0x00000001) == 0x00000001)) {
9297 size += com.google.protobuf.CodedOutputStream
9298 .computeMessageSize(1, jid_);
9299 }
9300 size += getUnknownFields().getSerializedSize();
9301 memoizedSerializedSize = size;
9302 return size;
9303 }
9304
9305 private static final long serialVersionUID = 0L;
9306 @java.lang.Override
9307 protected java.lang.Object writeReplace()
9308 throws java.io.ObjectStreamException {
9309 return super.writeReplace();
9310 }
9311
9312 @java.lang.Override
9313 public boolean equals(final java.lang.Object obj) {
9314 if (obj == this) {
9315 return true;
9316 }
9317 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
9318 return super.equals(obj);
9319 }
9320 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
9321
9322 boolean result = true;
9323 result = result && (hasJid() == other.hasJid());
9324 if (hasJid()) {
9325 result = result && getJid()
9326 .equals(other.getJid());
9327 }
9328 result = result &&
9329 getUnknownFields().equals(other.getUnknownFields());
9330 return result;
9331 }
9332
9333 private int memoizedHashCode = 0;
9334 @java.lang.Override
9335 public int hashCode() {
9336 if (memoizedHashCode != 0) {
9337 return memoizedHashCode;
9338 }
9339 int hash = 41;
9340 hash = (19 * hash) + getDescriptorForType().hashCode();
9341 if (hasJid()) {
9342 hash = (37 * hash) + JID_FIELD_NUMBER;
9343 hash = (53 * hash) + getJid().hashCode();
9344 }
9345 hash = (29 * hash) + getUnknownFields().hashCode();
9346 memoizedHashCode = hash;
9347 return hash;
9348 }
9349
9350 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9351 com.google.protobuf.ByteString data)
9352 throws com.google.protobuf.InvalidProtocolBufferException {
9353 return PARSER.parseFrom(data);
9354 }
9355 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9356 com.google.protobuf.ByteString data,
9357 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9358 throws com.google.protobuf.InvalidProtocolBufferException {
9359 return PARSER.parseFrom(data, extensionRegistry);
9360 }
9361 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
9362 throws com.google.protobuf.InvalidProtocolBufferException {
9363 return PARSER.parseFrom(data);
9364 }
9365 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9366 byte[] data,
9367 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9368 throws com.google.protobuf.InvalidProtocolBufferException {
9369 return PARSER.parseFrom(data, extensionRegistry);
9370 }
9371 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
9372 throws java.io.IOException {
9373 return PARSER.parseFrom(input);
9374 }
9375 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9376 java.io.InputStream input,
9377 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9378 throws java.io.IOException {
9379 return PARSER.parseFrom(input, extensionRegistry);
9380 }
9381 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
9382 throws java.io.IOException {
9383 return PARSER.parseDelimitedFrom(input);
9384 }
9385 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
9386 java.io.InputStream input,
9387 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9388 throws java.io.IOException {
9389 return PARSER.parseDelimitedFrom(input, extensionRegistry);
9390 }
9391 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9392 com.google.protobuf.CodedInputStream input)
9393 throws java.io.IOException {
9394 return PARSER.parseFrom(input);
9395 }
9396 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9397 com.google.protobuf.CodedInputStream input,
9398 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9399 throws java.io.IOException {
9400 return PARSER.parseFrom(input, extensionRegistry);
9401 }
9402
9403 public static Builder newBuilder() { return Builder.create(); }
9404 public Builder newBuilderForType() { return newBuilder(); }
9405 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
9406 return newBuilder().mergeFrom(prototype);
9407 }
9408 public Builder toBuilder() { return newBuilder(this); }
9409
9410 @java.lang.Override
9411 protected Builder newBuilderForType(
9412 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9413 Builder builder = new Builder(parent);
9414 return builder;
9415 }
9416 /**
9417 * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9418 *
9419 * <pre>
9420 **
9421 * getJournalState()
9422 * </pre>
9423 */
9424 public static final class Builder extends
9425 com.google.protobuf.GeneratedMessage.Builder<Builder>
9426 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
9427 public static final com.google.protobuf.Descriptors.Descriptor
9428 getDescriptor() {
9429 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9430 }
9431
9432 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9433 internalGetFieldAccessorTable() {
9434 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9435 .ensureFieldAccessorsInitialized(
9436 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9437 }
9438
9439 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
9440 private Builder() {
9441 maybeForceBuilderInitialization();
9442 }
9443
9444 private Builder(
9445 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9446 super(parent);
9447 maybeForceBuilderInitialization();
9448 }
9449 private void maybeForceBuilderInitialization() {
9450 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9451 getJidFieldBuilder();
9452 }
9453 }
9454 private static Builder create() {
9455 return new Builder();
9456 }
9457
9458 public Builder clear() {
9459 super.clear();
9460 if (jidBuilder_ == null) {
9461 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9462 } else {
9463 jidBuilder_.clear();
9464 }
9465 bitField0_ = (bitField0_ & ~0x00000001);
9466 return this;
9467 }
9468
9469 public Builder clone() {
9470 return create().mergeFrom(buildPartial());
9471 }
9472
9473 public com.google.protobuf.Descriptors.Descriptor
9474 getDescriptorForType() {
9475 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9476 }
9477
9478 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
9479 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
9480 }
9481
9482 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
9483 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
9484 if (!result.isInitialized()) {
9485 throw newUninitializedMessageException(result);
9486 }
9487 return result;
9488 }
9489
9490 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
9491 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
9492 int from_bitField0_ = bitField0_;
9493 int to_bitField0_ = 0;
9494 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9495 to_bitField0_ |= 0x00000001;
9496 }
9497 if (jidBuilder_ == null) {
9498 result.jid_ = jid_;
9499 } else {
9500 result.jid_ = jidBuilder_.build();
9501 }
9502 result.bitField0_ = to_bitField0_;
9503 onBuilt();
9504 return result;
9505 }
9506
9507 public Builder mergeFrom(com.google.protobuf.Message other) {
9508 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
9509 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
9510 } else {
9511 super.mergeFrom(other);
9512 return this;
9513 }
9514 }
9515
9516 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
9517 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
9518 if (other.hasJid()) {
9519 mergeJid(other.getJid());
9520 }
9521 this.mergeUnknownFields(other.getUnknownFields());
9522 return this;
9523 }
9524
9525 public final boolean isInitialized() {
9526 if (!hasJid()) {
9527
9528 return false;
9529 }
9530 if (!getJid().isInitialized()) {
9531
9532 return false;
9533 }
9534 return true;
9535 }
9536
9537 public Builder mergeFrom(
9538 com.google.protobuf.CodedInputStream input,
9539 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9540 throws java.io.IOException {
9541 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null;
9542 try {
9543 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9544 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9545 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage();
9546 throw e;
9547 } finally {
9548 if (parsedMessage != null) {
9549 mergeFrom(parsedMessage);
9550 }
9551 }
9552 return this;
9553 }
9554 private int bitField0_;
9555
9556 // required .hadoop.hdfs.JournalIdProto jid = 1;
9557 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9558 private com.google.protobuf.SingleFieldBuilder<
9559 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9560 /**
9561 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9562 */
9563 public boolean hasJid() {
9564 return ((bitField0_ & 0x00000001) == 0x00000001);
9565 }
9566 /**
9567 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9568 */
9569 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9570 if (jidBuilder_ == null) {
9571 return jid_;
9572 } else {
9573 return jidBuilder_.getMessage();
9574 }
9575 }
9576 /**
9577 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9578 */
9579 public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9580 if (jidBuilder_ == null) {
9581 if (value == null) {
9582 throw new NullPointerException();
9583 }
9584 jid_ = value;
9585 onChanged();
9586 } else {
9587 jidBuilder_.setMessage(value);
9588 }
9589 bitField0_ |= 0x00000001;
9590 return this;
9591 }
9592 /**
9593 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9594 */
9595 public Builder setJid(
9596 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9597 if (jidBuilder_ == null) {
9598 jid_ = builderForValue.build();
9599 onChanged();
9600 } else {
9601 jidBuilder_.setMessage(builderForValue.build());
9602 }
9603 bitField0_ |= 0x00000001;
9604 return this;
9605 }
9606 /**
9607 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9608 */
9609 public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9610 if (jidBuilder_ == null) {
9611 if (((bitField0_ & 0x00000001) == 0x00000001) &&
9612 jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9613 jid_ =
9614 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9615 } else {
9616 jid_ = value;
9617 }
9618 onChanged();
9619 } else {
9620 jidBuilder_.mergeFrom(value);
9621 }
9622 bitField0_ |= 0x00000001;
9623 return this;
9624 }
9625 /**
9626 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9627 */
9628 public Builder clearJid() {
9629 if (jidBuilder_ == null) {
9630 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9631 onChanged();
9632 } else {
9633 jidBuilder_.clear();
9634 }
9635 bitField0_ = (bitField0_ & ~0x00000001);
9636 return this;
9637 }
9638 /**
9639 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9640 */
9641 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9642 bitField0_ |= 0x00000001;
9643 onChanged();
9644 return getJidFieldBuilder().getBuilder();
9645 }
9646 /**
9647 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9648 */
9649 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9650 if (jidBuilder_ != null) {
9651 return jidBuilder_.getMessageOrBuilder();
9652 } else {
9653 return jid_;
9654 }
9655 }
9656 /**
9657 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9658 */
9659 private com.google.protobuf.SingleFieldBuilder<
9660 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
9661 getJidFieldBuilder() {
9662 if (jidBuilder_ == null) {
9663 jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9664 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9665 jid_,
9666 getParentForChildren(),
9667 isClean());
9668 jid_ = null;
9669 }
9670 return jidBuilder_;
9671 }
9672
9673 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateRequestProto)
9674 }
9675
9676 static {
9677 defaultInstance = new GetJournalStateRequestProto(true);
9678 defaultInstance.initFields();
9679 }
9680
9681 // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateRequestProto)
9682 }
9683
9684 public interface GetJournalStateResponseProtoOrBuilder
9685 extends com.google.protobuf.MessageOrBuilder {
9686
9687 // required uint64 lastPromisedEpoch = 1;
9688 /**
9689 * <code>required uint64 lastPromisedEpoch = 1;</code>
9690 */
9691 boolean hasLastPromisedEpoch();
9692 /**
9693 * <code>required uint64 lastPromisedEpoch = 1;</code>
9694 */
9695 long getLastPromisedEpoch();
9696
9697 // required uint32 httpPort = 2;
9698 /**
9699 * <code>required uint32 httpPort = 2;</code>
9700 */
9701 boolean hasHttpPort();
9702 /**
9703 * <code>required uint32 httpPort = 2;</code>
9704 */
9705 int getHttpPort();
9706 }
9707 /**
9708 * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
9709 */
9710 public static final class GetJournalStateResponseProto extends
9711 com.google.protobuf.GeneratedMessage
9712 implements GetJournalStateResponseProtoOrBuilder {
9713 // Use GetJournalStateResponseProto.newBuilder() to construct.
9714 private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9715 super(builder);
9716 this.unknownFields = builder.getUnknownFields();
9717 }
9718 private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9719
9720 private static final GetJournalStateResponseProto defaultInstance;
9721 public static GetJournalStateResponseProto getDefaultInstance() {
9722 return defaultInstance;
9723 }
9724
9725 public GetJournalStateResponseProto getDefaultInstanceForType() {
9726 return defaultInstance;
9727 }
9728
9729 private final com.google.protobuf.UnknownFieldSet unknownFields;
9730 @java.lang.Override
9731 public final com.google.protobuf.UnknownFieldSet
9732 getUnknownFields() {
9733 return this.unknownFields;
9734 }
9735 private GetJournalStateResponseProto(
9736 com.google.protobuf.CodedInputStream input,
9737 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9738 throws com.google.protobuf.InvalidProtocolBufferException {
9739 initFields();
9740 int mutable_bitField0_ = 0;
9741 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9742 com.google.protobuf.UnknownFieldSet.newBuilder();
9743 try {
9744 boolean done = false;
9745 while (!done) {
9746 int tag = input.readTag();
9747 switch (tag) {
9748 case 0:
9749 done = true;
9750 break;
9751 default: {
9752 if (!parseUnknownField(input, unknownFields,
9753 extensionRegistry, tag)) {
9754 done = true;
9755 }
9756 break;
9757 }
9758 case 8: {
9759 bitField0_ |= 0x00000001;
9760 lastPromisedEpoch_ = input.readUInt64();
9761 break;
9762 }
9763 case 16: {
9764 bitField0_ |= 0x00000002;
9765 httpPort_ = input.readUInt32();
9766 break;
9767 }
9768 }
9769 }
9770 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9771 throw e.setUnfinishedMessage(this);
9772 } catch (java.io.IOException e) {
9773 throw new com.google.protobuf.InvalidProtocolBufferException(
9774 e.getMessage()).setUnfinishedMessage(this);
9775 } finally {
9776 this.unknownFields = unknownFields.build();
9777 makeExtensionsImmutable();
9778 }
9779 }
9780 public static final com.google.protobuf.Descriptors.Descriptor
9781 getDescriptor() {
9782 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
9783 }
9784
9785 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9786 internalGetFieldAccessorTable() {
9787 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
9788 .ensureFieldAccessorsInitialized(
9789 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
9790 }
9791
9792 public static com.google.protobuf.Parser<GetJournalStateResponseProto> PARSER =
9793 new com.google.protobuf.AbstractParser<GetJournalStateResponseProto>() {
9794 public GetJournalStateResponseProto parsePartialFrom(
9795 com.google.protobuf.CodedInputStream input,
9796 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9797 throws com.google.protobuf.InvalidProtocolBufferException {
9798 return new GetJournalStateResponseProto(input, extensionRegistry);
9799 }
9800 };
9801
9802 @java.lang.Override
9803 public com.google.protobuf.Parser<GetJournalStateResponseProto> getParserForType() {
9804 return PARSER;
9805 }
9806
9807 private int bitField0_;
9808 // required uint64 lastPromisedEpoch = 1;
9809 public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
9810 private long lastPromisedEpoch_;
9811 /**
9812 * <code>required uint64 lastPromisedEpoch = 1;</code>
9813 */
9814 public boolean hasLastPromisedEpoch() {
9815 return ((bitField0_ & 0x00000001) == 0x00000001);
9816 }
9817 /**
9818 * <code>required uint64 lastPromisedEpoch = 1;</code>
9819 */
9820 public long getLastPromisedEpoch() {
9821 return lastPromisedEpoch_;
9822 }
9823
9824 // required uint32 httpPort = 2;
9825 public static final int HTTPPORT_FIELD_NUMBER = 2;
9826 private int httpPort_;
9827 /**
9828 * <code>required uint32 httpPort = 2;</code>
9829 */
9830 public boolean hasHttpPort() {
9831 return ((bitField0_ & 0x00000002) == 0x00000002);
9832 }
9833 /**
9834 * <code>required uint32 httpPort = 2;</code>
9835 */
9836 public int getHttpPort() {
9837 return httpPort_;
9838 }
9839
9840 private void initFields() {
9841 lastPromisedEpoch_ = 0L;
9842 httpPort_ = 0;
9843 }
9844 private byte memoizedIsInitialized = -1;
9845 public final boolean isInitialized() {
9846 byte isInitialized = memoizedIsInitialized;
9847 if (isInitialized != -1) return isInitialized == 1;
9848
9849 if (!hasLastPromisedEpoch()) {
9850 memoizedIsInitialized = 0;
9851 return false;
9852 }
9853 if (!hasHttpPort()) {
9854 memoizedIsInitialized = 0;
9855 return false;
9856 }
9857 memoizedIsInitialized = 1;
9858 return true;
9859 }
9860
9861 public void writeTo(com.google.protobuf.CodedOutputStream output)
9862 throws java.io.IOException {
9863 getSerializedSize();
9864 if (((bitField0_ & 0x00000001) == 0x00000001)) {
9865 output.writeUInt64(1, lastPromisedEpoch_);
9866 }
9867 if (((bitField0_ & 0x00000002) == 0x00000002)) {
9868 output.writeUInt32(2, httpPort_);
9869 }
9870 getUnknownFields().writeTo(output);
9871 }
9872
9873 private int memoizedSerializedSize = -1;
9874 public int getSerializedSize() {
9875 int size = memoizedSerializedSize;
9876 if (size != -1) return size;
9877
9878 size = 0;
9879 if (((bitField0_ & 0x00000001) == 0x00000001)) {
9880 size += com.google.protobuf.CodedOutputStream
9881 .computeUInt64Size(1, lastPromisedEpoch_);
9882 }
9883 if (((bitField0_ & 0x00000002) == 0x00000002)) {
9884 size += com.google.protobuf.CodedOutputStream
9885 .computeUInt32Size(2, httpPort_);
9886 }
9887 size += getUnknownFields().getSerializedSize();
9888 memoizedSerializedSize = size;
9889 return size;
9890 }
9891
9892 private static final long serialVersionUID = 0L;
9893 @java.lang.Override
9894 protected java.lang.Object writeReplace()
9895 throws java.io.ObjectStreamException {
9896 return super.writeReplace();
9897 }
9898
9899 @java.lang.Override
9900 public boolean equals(final java.lang.Object obj) {
9901 if (obj == this) {
9902 return true;
9903 }
9904 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
9905 return super.equals(obj);
9906 }
9907 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
9908
9909 boolean result = true;
9910 result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
9911 if (hasLastPromisedEpoch()) {
9912 result = result && (getLastPromisedEpoch()
9913 == other.getLastPromisedEpoch());
9914 }
9915 result = result && (hasHttpPort() == other.hasHttpPort());
9916 if (hasHttpPort()) {
9917 result = result && (getHttpPort()
9918 == other.getHttpPort());
9919 }
9920 result = result &&
9921 getUnknownFields().equals(other.getUnknownFields());
9922 return result;
9923 }
9924
9925 private int memoizedHashCode = 0;
9926 @java.lang.Override
9927 public int hashCode() {
9928 if (memoizedHashCode != 0) {
9929 return memoizedHashCode;
9930 }
9931 int hash = 41;
9932 hash = (19 * hash) + getDescriptorForType().hashCode();
9933 if (hasLastPromisedEpoch()) {
9934 hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
9935 hash = (53 * hash) + hashLong(getLastPromisedEpoch());
9936 }
9937 if (hasHttpPort()) {
9938 hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
9939 hash = (53 * hash) + getHttpPort();
9940 }
9941 hash = (29 * hash) + getUnknownFields().hashCode();
9942 memoizedHashCode = hash;
9943 return hash;
9944 }
9945
9946 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9947 com.google.protobuf.ByteString data)
9948 throws com.google.protobuf.InvalidProtocolBufferException {
9949 return PARSER.parseFrom(data);
9950 }
9951 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9952 com.google.protobuf.ByteString data,
9953 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9954 throws com.google.protobuf.InvalidProtocolBufferException {
9955 return PARSER.parseFrom(data, extensionRegistry);
9956 }
9957 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
9958 throws com.google.protobuf.InvalidProtocolBufferException {
9959 return PARSER.parseFrom(data);
9960 }
9961 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9962 byte[] data,
9963 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9964 throws com.google.protobuf.InvalidProtocolBufferException {
9965 return PARSER.parseFrom(data, extensionRegistry);
9966 }
9967 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
9968 throws java.io.IOException {
9969 return PARSER.parseFrom(input);
9970 }
9971 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9972 java.io.InputStream input,
9973 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9974 throws java.io.IOException {
9975 return PARSER.parseFrom(input, extensionRegistry);
9976 }
9977 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
9978 throws java.io.IOException {
9979 return PARSER.parseDelimitedFrom(input);
9980 }
9981 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
9982 java.io.InputStream input,
9983 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9984 throws java.io.IOException {
9985 return PARSER.parseDelimitedFrom(input, extensionRegistry);
9986 }
9987 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9988 com.google.protobuf.CodedInputStream input)
9989 throws java.io.IOException {
9990 return PARSER.parseFrom(input);
9991 }
9992 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9993 com.google.protobuf.CodedInputStream input,
9994 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9995 throws java.io.IOException {
9996 return PARSER.parseFrom(input, extensionRegistry);
9997 }
9998
9999 public static Builder newBuilder() { return Builder.create(); }
10000 public Builder newBuilderForType() { return newBuilder(); }
10001 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
10002 return newBuilder().mergeFrom(prototype);
10003 }
10004 public Builder toBuilder() { return newBuilder(this); }
10005
10006 @java.lang.Override
10007 protected Builder newBuilderForType(
10008 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10009 Builder builder = new Builder(parent);
10010 return builder;
10011 }
10012 /**
10013 * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
10014 */
10015 public static final class Builder extends
10016 com.google.protobuf.GeneratedMessage.Builder<Builder>
10017 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
10018 public static final com.google.protobuf.Descriptors.Descriptor
10019 getDescriptor() {
10020 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10021 }
10022
10023 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10024 internalGetFieldAccessorTable() {
10025 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
10026 .ensureFieldAccessorsInitialized(
10027 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
10028 }
10029
10030 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
10031 private Builder() {
10032 maybeForceBuilderInitialization();
10033 }
10034
10035 private Builder(
10036 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10037 super(parent);
10038 maybeForceBuilderInitialization();
10039 }
10040 private void maybeForceBuilderInitialization() {
10041 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10042 }
10043 }
10044 private static Builder create() {
10045 return new Builder();
10046 }
10047
10048 public Builder clear() {
10049 super.clear();
10050 lastPromisedEpoch_ = 0L;
10051 bitField0_ = (bitField0_ & ~0x00000001);
10052 httpPort_ = 0;
10053 bitField0_ = (bitField0_ & ~0x00000002);
10054 return this;
10055 }
10056
10057 public Builder clone() {
10058 return create().mergeFrom(buildPartial());
10059 }
10060
10061 public com.google.protobuf.Descriptors.Descriptor
10062 getDescriptorForType() {
10063 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10064 }
10065
10066 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
10067 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
10068 }
10069
10070 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
10071 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
10072 if (!result.isInitialized()) {
10073 throw newUninitializedMessageException(result);
10074 }
10075 return result;
10076 }
10077
10078 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
10079 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
10080 int from_bitField0_ = bitField0_;
10081 int to_bitField0_ = 0;
10082 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10083 to_bitField0_ |= 0x00000001;
10084 }
10085 result.lastPromisedEpoch_ = lastPromisedEpoch_;
10086 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10087 to_bitField0_ |= 0x00000002;
10088 }
10089 result.httpPort_ = httpPort_;
10090 result.bitField0_ = to_bitField0_;
10091 onBuilt();
10092 return result;
10093 }
10094
10095 public Builder mergeFrom(com.google.protobuf.Message other) {
10096 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
10097 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
10098 } else {
10099 super.mergeFrom(other);
10100 return this;
10101 }
10102 }
10103
10104 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
10105 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
10106 if (other.hasLastPromisedEpoch()) {
10107 setLastPromisedEpoch(other.getLastPromisedEpoch());
10108 }
10109 if (other.hasHttpPort()) {
10110 setHttpPort(other.getHttpPort());
10111 }
10112 this.mergeUnknownFields(other.getUnknownFields());
10113 return this;
10114 }
10115
10116 public final boolean isInitialized() {
10117 if (!hasLastPromisedEpoch()) {
10118
10119 return false;
10120 }
10121 if (!hasHttpPort()) {
10122
10123 return false;
10124 }
10125 return true;
10126 }
10127
10128 public Builder mergeFrom(
10129 com.google.protobuf.CodedInputStream input,
10130 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10131 throws java.io.IOException {
10132 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null;
10133 try {
10134 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10135 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10136 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage();
10137 throw e;
10138 } finally {
10139 if (parsedMessage != null) {
10140 mergeFrom(parsedMessage);
10141 }
10142 }
10143 return this;
10144 }
10145 private int bitField0_;
10146
10147 // required uint64 lastPromisedEpoch = 1;
10148 private long lastPromisedEpoch_ ;
10149 /**
10150 * <code>required uint64 lastPromisedEpoch = 1;</code>
10151 */
10152 public boolean hasLastPromisedEpoch() {
10153 return ((bitField0_ & 0x00000001) == 0x00000001);
10154 }
10155 /**
10156 * <code>required uint64 lastPromisedEpoch = 1;</code>
10157 */
10158 public long getLastPromisedEpoch() {
10159 return lastPromisedEpoch_;
10160 }
10161 /**
10162 * <code>required uint64 lastPromisedEpoch = 1;</code>
10163 */
10164 public Builder setLastPromisedEpoch(long value) {
10165 bitField0_ |= 0x00000001;
10166 lastPromisedEpoch_ = value;
10167 onChanged();
10168 return this;
10169 }
10170 /**
10171 * <code>required uint64 lastPromisedEpoch = 1;</code>
10172 */
10173 public Builder clearLastPromisedEpoch() {
10174 bitField0_ = (bitField0_ & ~0x00000001);
10175 lastPromisedEpoch_ = 0L;
10176 onChanged();
10177 return this;
10178 }
10179
10180 // required uint32 httpPort = 2;
10181 private int httpPort_ ;
10182 /**
10183 * <code>required uint32 httpPort = 2;</code>
10184 */
10185 public boolean hasHttpPort() {
10186 return ((bitField0_ & 0x00000002) == 0x00000002);
10187 }
10188 /**
10189 * <code>required uint32 httpPort = 2;</code>
10190 */
10191 public int getHttpPort() {
10192 return httpPort_;
10193 }
10194 /**
10195 * <code>required uint32 httpPort = 2;</code>
10196 */
10197 public Builder setHttpPort(int value) {
10198 bitField0_ |= 0x00000002;
10199 httpPort_ = value;
10200 onChanged();
10201 return this;
10202 }
10203 /**
10204 * <code>required uint32 httpPort = 2;</code>
10205 */
10206 public Builder clearHttpPort() {
10207 bitField0_ = (bitField0_ & ~0x00000002);
10208 httpPort_ = 0;
10209 onChanged();
10210 return this;
10211 }
10212
10213 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateResponseProto)
10214 }
10215
10216 static {
10217 defaultInstance = new GetJournalStateResponseProto(true);
10218 defaultInstance.initFields();
10219 }
10220
10221 // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateResponseProto)
10222 }
10223
10224 public interface FormatRequestProtoOrBuilder
10225 extends com.google.protobuf.MessageOrBuilder {
10226
10227 // required .hadoop.hdfs.JournalIdProto jid = 1;
10228 /**
10229 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10230 */
10231 boolean hasJid();
10232 /**
10233 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10234 */
10235 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10236 /**
10237 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10238 */
10239 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10240
10241 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10242 /**
10243 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10244 */
10245 boolean hasNsInfo();
10246 /**
10247 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10248 */
10249 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
10250 /**
10251 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10252 */
10253 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
10254 }
10255 /**
10256 * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10257 *
10258 * <pre>
10259 **
10260 * format()
10261 * </pre>
10262 */
10263 public static final class FormatRequestProto extends
10264 com.google.protobuf.GeneratedMessage
10265 implements FormatRequestProtoOrBuilder {
10266 // Use FormatRequestProto.newBuilder() to construct.
10267 private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10268 super(builder);
10269 this.unknownFields = builder.getUnknownFields();
10270 }
10271 private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10272
10273 private static final FormatRequestProto defaultInstance;
10274 public static FormatRequestProto getDefaultInstance() {
10275 return defaultInstance;
10276 }
10277
10278 public FormatRequestProto getDefaultInstanceForType() {
10279 return defaultInstance;
10280 }
10281
10282 private final com.google.protobuf.UnknownFieldSet unknownFields;
10283 @java.lang.Override
10284 public final com.google.protobuf.UnknownFieldSet
10285 getUnknownFields() {
10286 return this.unknownFields;
10287 }
10288 private FormatRequestProto(
10289 com.google.protobuf.CodedInputStream input,
10290 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10291 throws com.google.protobuf.InvalidProtocolBufferException {
10292 initFields();
10293 int mutable_bitField0_ = 0;
10294 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10295 com.google.protobuf.UnknownFieldSet.newBuilder();
10296 try {
10297 boolean done = false;
10298 while (!done) {
10299 int tag = input.readTag();
10300 switch (tag) {
10301 case 0:
10302 done = true;
10303 break;
10304 default: {
10305 if (!parseUnknownField(input, unknownFields,
10306 extensionRegistry, tag)) {
10307 done = true;
10308 }
10309 break;
10310 }
10311 case 10: {
10312 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
10313 if (((bitField0_ & 0x00000001) == 0x00000001)) {
10314 subBuilder = jid_.toBuilder();
10315 }
10316 jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
10317 if (subBuilder != null) {
10318 subBuilder.mergeFrom(jid_);
10319 jid_ = subBuilder.buildPartial();
10320 }
10321 bitField0_ |= 0x00000001;
10322 break;
10323 }
10324 case 18: {
10325 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
10326 if (((bitField0_ & 0x00000002) == 0x00000002)) {
10327 subBuilder = nsInfo_.toBuilder();
10328 }
10329 nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
10330 if (subBuilder != null) {
10331 subBuilder.mergeFrom(nsInfo_);
10332 nsInfo_ = subBuilder.buildPartial();
10333 }
10334 bitField0_ |= 0x00000002;
10335 break;
10336 }
10337 }
10338 }
10339 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10340 throw e.setUnfinishedMessage(this);
10341 } catch (java.io.IOException e) {
10342 throw new com.google.protobuf.InvalidProtocolBufferException(
10343 e.getMessage()).setUnfinishedMessage(this);
10344 } finally {
10345 this.unknownFields = unknownFields.build();
10346 makeExtensionsImmutable();
10347 }
10348 }
10349 public static final com.google.protobuf.Descriptors.Descriptor
10350 getDescriptor() {
10351 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10352 }
10353
10354 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10355 internalGetFieldAccessorTable() {
10356 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10357 .ensureFieldAccessorsInitialized(
10358 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10359 }
10360
10361 public static com.google.protobuf.Parser<FormatRequestProto> PARSER =
10362 new com.google.protobuf.AbstractParser<FormatRequestProto>() {
10363 public FormatRequestProto parsePartialFrom(
10364 com.google.protobuf.CodedInputStream input,
10365 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10366 throws com.google.protobuf.InvalidProtocolBufferException {
10367 return new FormatRequestProto(input, extensionRegistry);
10368 }
10369 };
10370
10371 @java.lang.Override
10372 public com.google.protobuf.Parser<FormatRequestProto> getParserForType() {
10373 return PARSER;
10374 }
10375
10376 private int bitField0_;
10377 // required .hadoop.hdfs.JournalIdProto jid = 1;
10378 public static final int JID_FIELD_NUMBER = 1;
10379 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10380 /**
10381 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10382 */
10383 public boolean hasJid() {
10384 return ((bitField0_ & 0x00000001) == 0x00000001);
10385 }
10386 /**
10387 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10388 */
10389 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10390 return jid_;
10391 }
10392 /**
10393 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10394 */
10395 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10396 return jid_;
10397 }
10398
10399 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10400 public static final int NSINFO_FIELD_NUMBER = 2;
10401 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
10402 /**
10403 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10404 */
10405 public boolean hasNsInfo() {
10406 return ((bitField0_ & 0x00000002) == 0x00000002);
10407 }
10408 /**
10409 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10410 */
10411 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10412 return nsInfo_;
10413 }
10414 /**
10415 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10416 */
10417 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10418 return nsInfo_;
10419 }
10420
10421 private void initFields() {
10422 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10423 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10424 }
10425 private byte memoizedIsInitialized = -1;
10426 public final boolean isInitialized() {
10427 byte isInitialized = memoizedIsInitialized;
10428 if (isInitialized != -1) return isInitialized == 1;
10429
10430 if (!hasJid()) {
10431 memoizedIsInitialized = 0;
10432 return false;
10433 }
10434 if (!hasNsInfo()) {
10435 memoizedIsInitialized = 0;
10436 return false;
10437 }
10438 if (!getJid().isInitialized()) {
10439 memoizedIsInitialized = 0;
10440 return false;
10441 }
10442 if (!getNsInfo().isInitialized()) {
10443 memoizedIsInitialized = 0;
10444 return false;
10445 }
10446 memoizedIsInitialized = 1;
10447 return true;
10448 }
10449
10450 public void writeTo(com.google.protobuf.CodedOutputStream output)
10451 throws java.io.IOException {
10452 getSerializedSize();
10453 if (((bitField0_ & 0x00000001) == 0x00000001)) {
10454 output.writeMessage(1, jid_);
10455 }
10456 if (((bitField0_ & 0x00000002) == 0x00000002)) {
10457 output.writeMessage(2, nsInfo_);
10458 }
10459 getUnknownFields().writeTo(output);
10460 }
10461
10462 private int memoizedSerializedSize = -1;
10463 public int getSerializedSize() {
10464 int size = memoizedSerializedSize;
10465 if (size != -1) return size;
10466
10467 size = 0;
10468 if (((bitField0_ & 0x00000001) == 0x00000001)) {
10469 size += com.google.protobuf.CodedOutputStream
10470 .computeMessageSize(1, jid_);
10471 }
10472 if (((bitField0_ & 0x00000002) == 0x00000002)) {
10473 size += com.google.protobuf.CodedOutputStream
10474 .computeMessageSize(2, nsInfo_);
10475 }
10476 size += getUnknownFields().getSerializedSize();
10477 memoizedSerializedSize = size;
10478 return size;
10479 }
10480
10481 private static final long serialVersionUID = 0L;
10482 @java.lang.Override
10483 protected java.lang.Object writeReplace()
10484 throws java.io.ObjectStreamException {
10485 return super.writeReplace();
10486 }
10487
10488 @java.lang.Override
10489 public boolean equals(final java.lang.Object obj) {
10490 if (obj == this) {
10491 return true;
10492 }
10493 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
10494 return super.equals(obj);
10495 }
10496 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
10497
10498 boolean result = true;
10499 result = result && (hasJid() == other.hasJid());
10500 if (hasJid()) {
10501 result = result && getJid()
10502 .equals(other.getJid());
10503 }
10504 result = result && (hasNsInfo() == other.hasNsInfo());
10505 if (hasNsInfo()) {
10506 result = result && getNsInfo()
10507 .equals(other.getNsInfo());
10508 }
10509 result = result &&
10510 getUnknownFields().equals(other.getUnknownFields());
10511 return result;
10512 }
10513
10514 private int memoizedHashCode = 0;
10515 @java.lang.Override
10516 public int hashCode() {
10517 if (memoizedHashCode != 0) {
10518 return memoizedHashCode;
10519 }
10520 int hash = 41;
10521 hash = (19 * hash) + getDescriptorForType().hashCode();
10522 if (hasJid()) {
10523 hash = (37 * hash) + JID_FIELD_NUMBER;
10524 hash = (53 * hash) + getJid().hashCode();
10525 }
10526 if (hasNsInfo()) {
10527 hash = (37 * hash) + NSINFO_FIELD_NUMBER;
10528 hash = (53 * hash) + getNsInfo().hashCode();
10529 }
10530 hash = (29 * hash) + getUnknownFields().hashCode();
10531 memoizedHashCode = hash;
10532 return hash;
10533 }
10534
10535 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10536 com.google.protobuf.ByteString data)
10537 throws com.google.protobuf.InvalidProtocolBufferException {
10538 return PARSER.parseFrom(data);
10539 }
10540 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10541 com.google.protobuf.ByteString data,
10542 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10543 throws com.google.protobuf.InvalidProtocolBufferException {
10544 return PARSER.parseFrom(data, extensionRegistry);
10545 }
10546 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
10547 throws com.google.protobuf.InvalidProtocolBufferException {
10548 return PARSER.parseFrom(data);
10549 }
10550 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10551 byte[] data,
10552 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10553 throws com.google.protobuf.InvalidProtocolBufferException {
10554 return PARSER.parseFrom(data, extensionRegistry);
10555 }
10556 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
10557 throws java.io.IOException {
10558 return PARSER.parseFrom(input);
10559 }
10560 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10561 java.io.InputStream input,
10562 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10563 throws java.io.IOException {
10564 return PARSER.parseFrom(input, extensionRegistry);
10565 }
10566 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
10567 throws java.io.IOException {
10568 return PARSER.parseDelimitedFrom(input);
10569 }
10570 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
10571 java.io.InputStream input,
10572 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10573 throws java.io.IOException {
10574 return PARSER.parseDelimitedFrom(input, extensionRegistry);
10575 }
10576 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10577 com.google.protobuf.CodedInputStream input)
10578 throws java.io.IOException {
10579 return PARSER.parseFrom(input);
10580 }
10581 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10582 com.google.protobuf.CodedInputStream input,
10583 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10584 throws java.io.IOException {
10585 return PARSER.parseFrom(input, extensionRegistry);
10586 }
10587
10588 public static Builder newBuilder() { return Builder.create(); }
10589 public Builder newBuilderForType() { return newBuilder(); }
10590 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
10591 return newBuilder().mergeFrom(prototype);
10592 }
10593 public Builder toBuilder() { return newBuilder(this); }
10594
10595 @java.lang.Override
10596 protected Builder newBuilderForType(
10597 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10598 Builder builder = new Builder(parent);
10599 return builder;
10600 }
10601 /**
10602 * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10603 *
10604 * <pre>
10605 **
10606 * format()
10607 * </pre>
10608 */
10609 public static final class Builder extends
10610 com.google.protobuf.GeneratedMessage.Builder<Builder>
10611 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
10612 public static final com.google.protobuf.Descriptors.Descriptor
10613 getDescriptor() {
10614 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10615 }
10616
10617 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10618 internalGetFieldAccessorTable() {
10619 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10620 .ensureFieldAccessorsInitialized(
10621 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10622 }
10623
10624 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
10625 private Builder() {
10626 maybeForceBuilderInitialization();
10627 }
10628
10629 private Builder(
10630 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10631 super(parent);
10632 maybeForceBuilderInitialization();
10633 }
10634 private void maybeForceBuilderInitialization() {
10635 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10636 getJidFieldBuilder();
10637 getNsInfoFieldBuilder();
10638 }
10639 }
10640 private static Builder create() {
10641 return new Builder();
10642 }
10643
10644 public Builder clear() {
10645 super.clear();
10646 if (jidBuilder_ == null) {
10647 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10648 } else {
10649 jidBuilder_.clear();
10650 }
10651 bitField0_ = (bitField0_ & ~0x00000001);
10652 if (nsInfoBuilder_ == null) {
10653 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10654 } else {
10655 nsInfoBuilder_.clear();
10656 }
10657 bitField0_ = (bitField0_ & ~0x00000002);
10658 return this;
10659 }
10660
10661 public Builder clone() {
10662 return create().mergeFrom(buildPartial());
10663 }
10664
10665 public com.google.protobuf.Descriptors.Descriptor
10666 getDescriptorForType() {
10667 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10668 }
10669
10670 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
10671 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
10672 }
10673
10674 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
10675 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
10676 if (!result.isInitialized()) {
10677 throw newUninitializedMessageException(result);
10678 }
10679 return result;
10680 }
10681
10682 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
10683 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
10684 int from_bitField0_ = bitField0_;
10685 int to_bitField0_ = 0;
10686 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10687 to_bitField0_ |= 0x00000001;
10688 }
10689 if (jidBuilder_ == null) {
10690 result.jid_ = jid_;
10691 } else {
10692 result.jid_ = jidBuilder_.build();
10693 }
10694 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10695 to_bitField0_ |= 0x00000002;
10696 }
10697 if (nsInfoBuilder_ == null) {
10698 result.nsInfo_ = nsInfo_;
10699 } else {
10700 result.nsInfo_ = nsInfoBuilder_.build();
10701 }
10702 result.bitField0_ = to_bitField0_;
10703 onBuilt();
10704 return result;
10705 }
10706
10707 public Builder mergeFrom(com.google.protobuf.Message other) {
10708 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
10709 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
10710 } else {
10711 super.mergeFrom(other);
10712 return this;
10713 }
10714 }
10715
10716 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
10717 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
10718 if (other.hasJid()) {
10719 mergeJid(other.getJid());
10720 }
10721 if (other.hasNsInfo()) {
10722 mergeNsInfo(other.getNsInfo());
10723 }
10724 this.mergeUnknownFields(other.getUnknownFields());
10725 return this;
10726 }
10727
10728 public final boolean isInitialized() {
10729 if (!hasJid()) {
10730
10731 return false;
10732 }
10733 if (!hasNsInfo()) {
10734
10735 return false;
10736 }
10737 if (!getJid().isInitialized()) {
10738
10739 return false;
10740 }
10741 if (!getNsInfo().isInitialized()) {
10742
10743 return false;
10744 }
10745 return true;
10746 }
10747
10748 public Builder mergeFrom(
10749 com.google.protobuf.CodedInputStream input,
10750 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10751 throws java.io.IOException {
10752 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null;
10753 try {
10754 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10755 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10756 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage();
10757 throw e;
10758 } finally {
10759 if (parsedMessage != null) {
10760 mergeFrom(parsedMessage);
10761 }
10762 }
10763 return this;
10764 }
10765 private int bitField0_;
10766
10767 // required .hadoop.hdfs.JournalIdProto jid = 1;
10768 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10769 private com.google.protobuf.SingleFieldBuilder<
10770 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10771 /**
10772 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10773 */
10774 public boolean hasJid() {
10775 return ((bitField0_ & 0x00000001) == 0x00000001);
10776 }
10777 /**
10778 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10779 */
10780 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10781 if (jidBuilder_ == null) {
10782 return jid_;
10783 } else {
10784 return jidBuilder_.getMessage();
10785 }
10786 }
10787 /**
10788 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10789 */
10790 public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10791 if (jidBuilder_ == null) {
10792 if (value == null) {
10793 throw new NullPointerException();
10794 }
10795 jid_ = value;
10796 onChanged();
10797 } else {
10798 jidBuilder_.setMessage(value);
10799 }
10800 bitField0_ |= 0x00000001;
10801 return this;
10802 }
10803 /**
10804 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10805 */
10806 public Builder setJid(
10807 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
10808 if (jidBuilder_ == null) {
10809 jid_ = builderForValue.build();
10810 onChanged();
10811 } else {
10812 jidBuilder_.setMessage(builderForValue.build());
10813 }
10814 bitField0_ |= 0x00000001;
10815 return this;
10816 }
10817 /**
10818 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10819 */
10820 public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10821 if (jidBuilder_ == null) {
10822 if (((bitField0_ & 0x00000001) == 0x00000001) &&
10823 jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
10824 jid_ =
10825 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
10826 } else {
10827 jid_ = value;
10828 }
10829 onChanged();
10830 } else {
10831 jidBuilder_.mergeFrom(value);
10832 }
10833 bitField0_ |= 0x00000001;
10834 return this;
10835 }
10836 /**
10837 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10838 */
10839 public Builder clearJid() {
10840 if (jidBuilder_ == null) {
10841 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10842 onChanged();
10843 } else {
10844 jidBuilder_.clear();
10845 }
10846 bitField0_ = (bitField0_ & ~0x00000001);
10847 return this;
10848 }
10849 /**
10850 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10851 */
10852 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
10853 bitField0_ |= 0x00000001;
10854 onChanged();
10855 return getJidFieldBuilder().getBuilder();
10856 }
10857 /**
10858 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10859 */
10860 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10861 if (jidBuilder_ != null) {
10862 return jidBuilder_.getMessageOrBuilder();
10863 } else {
10864 return jid_;
10865 }
10866 }
10867 /**
10868 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10869 */
10870 private com.google.protobuf.SingleFieldBuilder<
10871 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
10872 getJidFieldBuilder() {
10873 if (jidBuilder_ == null) {
10874 jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10875 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
10876 jid_,
10877 getParentForChildren(),
10878 isClean());
10879 jid_ = null;
10880 }
10881 return jidBuilder_;
10882 }
10883
10884 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10885 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10886 private com.google.protobuf.SingleFieldBuilder<
10887 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
10888 /**
10889 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10890 */
10891 public boolean hasNsInfo() {
10892 return ((bitField0_ & 0x00000002) == 0x00000002);
10893 }
10894 /**
10895 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10896 */
10897 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10898 if (nsInfoBuilder_ == null) {
10899 return nsInfo_;
10900 } else {
10901 return nsInfoBuilder_.getMessage();
10902 }
10903 }
10904 /**
10905 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10906 */
10907 public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10908 if (nsInfoBuilder_ == null) {
10909 if (value == null) {
10910 throw new NullPointerException();
10911 }
10912 nsInfo_ = value;
10913 onChanged();
10914 } else {
10915 nsInfoBuilder_.setMessage(value);
10916 }
10917 bitField0_ |= 0x00000002;
10918 return this;
10919 }
10920 /**
10921 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10922 */
10923 public Builder setNsInfo(
10924 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
10925 if (nsInfoBuilder_ == null) {
10926 nsInfo_ = builderForValue.build();
10927 onChanged();
10928 } else {
10929 nsInfoBuilder_.setMessage(builderForValue.build());
10930 }
10931 bitField0_ |= 0x00000002;
10932 return this;
10933 }
10934 /**
10935 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10936 */
10937 public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10938 if (nsInfoBuilder_ == null) {
10939 if (((bitField0_ & 0x00000002) == 0x00000002) &&
10940 nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
10941 nsInfo_ =
10942 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
10943 } else {
10944 nsInfo_ = value;
10945 }
10946 onChanged();
10947 } else {
10948 nsInfoBuilder_.mergeFrom(value);
10949 }
10950 bitField0_ |= 0x00000002;
10951 return this;
10952 }
10953 /**
10954 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10955 */
10956 public Builder clearNsInfo() {
10957 if (nsInfoBuilder_ == null) {
10958 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10959 onChanged();
10960 } else {
10961 nsInfoBuilder_.clear();
10962 }
10963 bitField0_ = (bitField0_ & ~0x00000002);
10964 return this;
10965 }
10966 /**
10967 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10968 */
10969 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
10970 bitField0_ |= 0x00000002;
10971 onChanged();
10972 return getNsInfoFieldBuilder().getBuilder();
10973 }
10974 /**
10975 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10976 */
10977 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10978 if (nsInfoBuilder_ != null) {
10979 return nsInfoBuilder_.getMessageOrBuilder();
10980 } else {
10981 return nsInfo_;
10982 }
10983 }
10984 /**
10985 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10986 */
10987 private com.google.protobuf.SingleFieldBuilder<
10988 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>
10989 getNsInfoFieldBuilder() {
10990 if (nsInfoBuilder_ == null) {
10991 nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10992 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
10993 nsInfo_,
10994 getParentForChildren(),
10995 isClean());
10996 nsInfo_ = null;
10997 }
10998 return nsInfoBuilder_;
10999 }
11000
11001 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatRequestProto)
11002 }
11003
11004 static {
11005 defaultInstance = new FormatRequestProto(true);
11006 defaultInstance.initFields();
11007 }
11008
11009 // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatRequestProto)
11010 }
11011
11012 public interface FormatResponseProtoOrBuilder
11013 extends com.google.protobuf.MessageOrBuilder {
11014 }
11015 /**
11016 * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11017 */
11018 public static final class FormatResponseProto extends
11019 com.google.protobuf.GeneratedMessage
11020 implements FormatResponseProtoOrBuilder {
11021 // Use FormatResponseProto.newBuilder() to construct.
11022 private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11023 super(builder);
11024 this.unknownFields = builder.getUnknownFields();
11025 }
11026 private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11027
11028 private static final FormatResponseProto defaultInstance;
11029 public static FormatResponseProto getDefaultInstance() {
11030 return defaultInstance;
11031 }
11032
11033 public FormatResponseProto getDefaultInstanceForType() {
11034 return defaultInstance;
11035 }
11036
11037 private final com.google.protobuf.UnknownFieldSet unknownFields;
11038 @java.lang.Override
11039 public final com.google.protobuf.UnknownFieldSet
11040 getUnknownFields() {
11041 return this.unknownFields;
11042 }
11043 private FormatResponseProto(
11044 com.google.protobuf.CodedInputStream input,
11045 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11046 throws com.google.protobuf.InvalidProtocolBufferException {
11047 initFields();
11048 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11049 com.google.protobuf.UnknownFieldSet.newBuilder();
11050 try {
11051 boolean done = false;
11052 while (!done) {
11053 int tag = input.readTag();
11054 switch (tag) {
11055 case 0:
11056 done = true;
11057 break;
11058 default: {
11059 if (!parseUnknownField(input, unknownFields,
11060 extensionRegistry, tag)) {
11061 done = true;
11062 }
11063 break;
11064 }
11065 }
11066 }
11067 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11068 throw e.setUnfinishedMessage(this);
11069 } catch (java.io.IOException e) {
11070 throw new com.google.protobuf.InvalidProtocolBufferException(
11071 e.getMessage()).setUnfinishedMessage(this);
11072 } finally {
11073 this.unknownFields = unknownFields.build();
11074 makeExtensionsImmutable();
11075 }
11076 }
11077 public static final com.google.protobuf.Descriptors.Descriptor
11078 getDescriptor() {
11079 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11080 }
11081
11082 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11083 internalGetFieldAccessorTable() {
11084 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11085 .ensureFieldAccessorsInitialized(
11086 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11087 }
11088
11089 public static com.google.protobuf.Parser<FormatResponseProto> PARSER =
11090 new com.google.protobuf.AbstractParser<FormatResponseProto>() {
11091 public FormatResponseProto parsePartialFrom(
11092 com.google.protobuf.CodedInputStream input,
11093 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11094 throws com.google.protobuf.InvalidProtocolBufferException {
11095 return new FormatResponseProto(input, extensionRegistry);
11096 }
11097 };
11098
11099 @java.lang.Override
11100 public com.google.protobuf.Parser<FormatResponseProto> getParserForType() {
11101 return PARSER;
11102 }
11103
11104 private void initFields() {
11105 }
11106 private byte memoizedIsInitialized = -1;
11107 public final boolean isInitialized() {
11108 byte isInitialized = memoizedIsInitialized;
11109 if (isInitialized != -1) return isInitialized == 1;
11110
11111 memoizedIsInitialized = 1;
11112 return true;
11113 }
11114
11115 public void writeTo(com.google.protobuf.CodedOutputStream output)
11116 throws java.io.IOException {
11117 getSerializedSize();
11118 getUnknownFields().writeTo(output);
11119 }
11120
11121 private int memoizedSerializedSize = -1;
11122 public int getSerializedSize() {
11123 int size = memoizedSerializedSize;
11124 if (size != -1) return size;
11125
11126 size = 0;
11127 size += getUnknownFields().getSerializedSize();
11128 memoizedSerializedSize = size;
11129 return size;
11130 }
11131
11132 private static final long serialVersionUID = 0L;
11133 @java.lang.Override
11134 protected java.lang.Object writeReplace()
11135 throws java.io.ObjectStreamException {
11136 return super.writeReplace();
11137 }
11138
11139 @java.lang.Override
11140 public boolean equals(final java.lang.Object obj) {
11141 if (obj == this) {
11142 return true;
11143 }
11144 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
11145 return super.equals(obj);
11146 }
11147 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
11148
11149 boolean result = true;
11150 result = result &&
11151 getUnknownFields().equals(other.getUnknownFields());
11152 return result;
11153 }
11154
11155 private int memoizedHashCode = 0;
11156 @java.lang.Override
11157 public int hashCode() {
11158 if (memoizedHashCode != 0) {
11159 return memoizedHashCode;
11160 }
11161 int hash = 41;
11162 hash = (19 * hash) + getDescriptorForType().hashCode();
11163 hash = (29 * hash) + getUnknownFields().hashCode();
11164 memoizedHashCode = hash;
11165 return hash;
11166 }
11167
11168 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11169 com.google.protobuf.ByteString data)
11170 throws com.google.protobuf.InvalidProtocolBufferException {
11171 return PARSER.parseFrom(data);
11172 }
11173 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11174 com.google.protobuf.ByteString data,
11175 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11176 throws com.google.protobuf.InvalidProtocolBufferException {
11177 return PARSER.parseFrom(data, extensionRegistry);
11178 }
11179 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
11180 throws com.google.protobuf.InvalidProtocolBufferException {
11181 return PARSER.parseFrom(data);
11182 }
11183 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11184 byte[] data,
11185 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11186 throws com.google.protobuf.InvalidProtocolBufferException {
11187 return PARSER.parseFrom(data, extensionRegistry);
11188 }
11189 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
11190 throws java.io.IOException {
11191 return PARSER.parseFrom(input);
11192 }
11193 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11194 java.io.InputStream input,
11195 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11196 throws java.io.IOException {
11197 return PARSER.parseFrom(input, extensionRegistry);
11198 }
11199 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
11200 throws java.io.IOException {
11201 return PARSER.parseDelimitedFrom(input);
11202 }
11203 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
11204 java.io.InputStream input,
11205 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11206 throws java.io.IOException {
11207 return PARSER.parseDelimitedFrom(input, extensionRegistry);
11208 }
11209 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11210 com.google.protobuf.CodedInputStream input)
11211 throws java.io.IOException {
11212 return PARSER.parseFrom(input);
11213 }
11214 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11215 com.google.protobuf.CodedInputStream input,
11216 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11217 throws java.io.IOException {
11218 return PARSER.parseFrom(input, extensionRegistry);
11219 }
11220
11221 public static Builder newBuilder() { return Builder.create(); }
11222 public Builder newBuilderForType() { return newBuilder(); }
11223 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
11224 return newBuilder().mergeFrom(prototype);
11225 }
11226 public Builder toBuilder() { return newBuilder(this); }
11227
11228 @java.lang.Override
11229 protected Builder newBuilderForType(
11230 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11231 Builder builder = new Builder(parent);
11232 return builder;
11233 }
11234 /**
11235 * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11236 */
11237 public static final class Builder extends
11238 com.google.protobuf.GeneratedMessage.Builder<Builder>
11239 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
11240 public static final com.google.protobuf.Descriptors.Descriptor
11241 getDescriptor() {
11242 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11243 }
11244
11245 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11246 internalGetFieldAccessorTable() {
11247 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11248 .ensureFieldAccessorsInitialized(
11249 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11250 }
11251
11252 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
11253 private Builder() {
11254 maybeForceBuilderInitialization();
11255 }
11256
11257 private Builder(
11258 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11259 super(parent);
11260 maybeForceBuilderInitialization();
11261 }
11262 private void maybeForceBuilderInitialization() {
11263 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11264 }
11265 }
11266 private static Builder create() {
11267 return new Builder();
11268 }
11269
11270 public Builder clear() {
11271 super.clear();
11272 return this;
11273 }
11274
11275 public Builder clone() {
11276 return create().mergeFrom(buildPartial());
11277 }
11278
11279 public com.google.protobuf.Descriptors.Descriptor
11280 getDescriptorForType() {
11281 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11282 }
11283
11284 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
11285 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
11286 }
11287
11288 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
11289 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
11290 if (!result.isInitialized()) {
11291 throw newUninitializedMessageException(result);
11292 }
11293 return result;
11294 }
11295
11296 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
11297 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
11298 onBuilt();
11299 return result;
11300 }
11301
11302 public Builder mergeFrom(com.google.protobuf.Message other) {
11303 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
11304 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
11305 } else {
11306 super.mergeFrom(other);
11307 return this;
11308 }
11309 }
11310
11311 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
11312 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
11313 this.mergeUnknownFields(other.getUnknownFields());
11314 return this;
11315 }
11316
11317 public final boolean isInitialized() {
11318 return true;
11319 }
11320
11321 public Builder mergeFrom(
11322 com.google.protobuf.CodedInputStream input,
11323 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11324 throws java.io.IOException {
11325 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null;
11326 try {
11327 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11328 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11329 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage();
11330 throw e;
11331 } finally {
11332 if (parsedMessage != null) {
11333 mergeFrom(parsedMessage);
11334 }
11335 }
11336 return this;
11337 }
11338
11339 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatResponseProto)
11340 }
11341
11342 static {
11343 defaultInstance = new FormatResponseProto(true);
11344 defaultInstance.initFields();
11345 }
11346
11347 // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatResponseProto)
11348 }
11349
11350 public interface NewEpochRequestProtoOrBuilder
11351 extends com.google.protobuf.MessageOrBuilder {
11352
11353 // required .hadoop.hdfs.JournalIdProto jid = 1;
11354 /**
11355 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11356 */
11357 boolean hasJid();
11358 /**
11359 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11360 */
11361 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
11362 /**
11363 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11364 */
11365 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
11366
11367 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11368 /**
11369 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11370 */
11371 boolean hasNsInfo();
11372 /**
11373 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11374 */
11375 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
11376 /**
11377 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11378 */
11379 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
11380
11381 // required uint64 epoch = 3;
11382 /**
11383 * <code>required uint64 epoch = 3;</code>
11384 */
11385 boolean hasEpoch();
11386 /**
11387 * <code>required uint64 epoch = 3;</code>
11388 */
11389 long getEpoch();
11390 }
11391 /**
11392 * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11393 *
11394 * <pre>
11395 **
11396 * newEpoch()
11397 * </pre>
11398 */
11399 public static final class NewEpochRequestProto extends
11400 com.google.protobuf.GeneratedMessage
11401 implements NewEpochRequestProtoOrBuilder {
11402 // Use NewEpochRequestProto.newBuilder() to construct.
11403 private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11404 super(builder);
11405 this.unknownFields = builder.getUnknownFields();
11406 }
11407 private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11408
11409 private static final NewEpochRequestProto defaultInstance;
11410 public static NewEpochRequestProto getDefaultInstance() {
11411 return defaultInstance;
11412 }
11413
11414 public NewEpochRequestProto getDefaultInstanceForType() {
11415 return defaultInstance;
11416 }
11417
11418 private final com.google.protobuf.UnknownFieldSet unknownFields;
11419 @java.lang.Override
11420 public final com.google.protobuf.UnknownFieldSet
11421 getUnknownFields() {
11422 return this.unknownFields;
11423 }
11424 private NewEpochRequestProto(
11425 com.google.protobuf.CodedInputStream input,
11426 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11427 throws com.google.protobuf.InvalidProtocolBufferException {
11428 initFields();
11429 int mutable_bitField0_ = 0;
11430 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11431 com.google.protobuf.UnknownFieldSet.newBuilder();
11432 try {
11433 boolean done = false;
11434 while (!done) {
11435 int tag = input.readTag();
11436 switch (tag) {
11437 case 0:
11438 done = true;
11439 break;
11440 default: {
11441 if (!parseUnknownField(input, unknownFields,
11442 extensionRegistry, tag)) {
11443 done = true;
11444 }
11445 break;
11446 }
11447 case 10: {
11448 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
11449 if (((bitField0_ & 0x00000001) == 0x00000001)) {
11450 subBuilder = jid_.toBuilder();
11451 }
11452 jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
11453 if (subBuilder != null) {
11454 subBuilder.mergeFrom(jid_);
11455 jid_ = subBuilder.buildPartial();
11456 }
11457 bitField0_ |= 0x00000001;
11458 break;
11459 }
11460 case 18: {
11461 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
11462 if (((bitField0_ & 0x00000002) == 0x00000002)) {
11463 subBuilder = nsInfo_.toBuilder();
11464 }
11465 nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
11466 if (subBuilder != null) {
11467 subBuilder.mergeFrom(nsInfo_);
11468 nsInfo_ = subBuilder.buildPartial();
11469 }
11470 bitField0_ |= 0x00000002;
11471 break;
11472 }
11473 case 24: {
11474 bitField0_ |= 0x00000004;
11475 epoch_ = input.readUInt64();
11476 break;
11477 }
11478 }
11479 }
11480 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11481 throw e.setUnfinishedMessage(this);
11482 } catch (java.io.IOException e) {
11483 throw new com.google.protobuf.InvalidProtocolBufferException(
11484 e.getMessage()).setUnfinishedMessage(this);
11485 } finally {
11486 this.unknownFields = unknownFields.build();
11487 makeExtensionsImmutable();
11488 }
11489 }
11490 public static final com.google.protobuf.Descriptors.Descriptor
11491 getDescriptor() {
11492 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11493 }
11494
11495 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11496 internalGetFieldAccessorTable() {
11497 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11498 .ensureFieldAccessorsInitialized(
11499 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11500 }
11501
11502 public static com.google.protobuf.Parser<NewEpochRequestProto> PARSER =
11503 new com.google.protobuf.AbstractParser<NewEpochRequestProto>() {
11504 public NewEpochRequestProto parsePartialFrom(
11505 com.google.protobuf.CodedInputStream input,
11506 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11507 throws com.google.protobuf.InvalidProtocolBufferException {
11508 return new NewEpochRequestProto(input, extensionRegistry);
11509 }
11510 };
11511
11512 @java.lang.Override
11513 public com.google.protobuf.Parser<NewEpochRequestProto> getParserForType() {
11514 return PARSER;
11515 }
11516
11517 private int bitField0_;
11518 // required .hadoop.hdfs.JournalIdProto jid = 1;
11519 public static final int JID_FIELD_NUMBER = 1;
11520 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
11521 /**
11522 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11523 */
11524 public boolean hasJid() {
11525 return ((bitField0_ & 0x00000001) == 0x00000001);
11526 }
11527 /**
11528 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11529 */
11530 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11531 return jid_;
11532 }
11533 /**
11534 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11535 */
11536 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11537 return jid_;
11538 }
11539
11540 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11541 public static final int NSINFO_FIELD_NUMBER = 2;
11542 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
11543 /**
11544 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11545 */
11546 public boolean hasNsInfo() {
11547 return ((bitField0_ & 0x00000002) == 0x00000002);
11548 }
11549 /**
11550 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11551 */
11552 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
11553 return nsInfo_;
11554 }
11555 /**
11556 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11557 */
11558 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
11559 return nsInfo_;
11560 }
11561
11562 // required uint64 epoch = 3;
11563 public static final int EPOCH_FIELD_NUMBER = 3;
11564 private long epoch_;
11565 /**
11566 * <code>required uint64 epoch = 3;</code>
11567 */
11568 public boolean hasEpoch() {
11569 return ((bitField0_ & 0x00000004) == 0x00000004);
11570 }
11571 /**
11572 * <code>required uint64 epoch = 3;</code>
11573 */
11574 public long getEpoch() {
11575 return epoch_;
11576 }
11577
11578 private void initFields() {
11579 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11580 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11581 epoch_ = 0L;
11582 }
11583 private byte memoizedIsInitialized = -1;
11584 public final boolean isInitialized() {
11585 byte isInitialized = memoizedIsInitialized;
11586 if (isInitialized != -1) return isInitialized == 1;
11587
11588 if (!hasJid()) {
11589 memoizedIsInitialized = 0;
11590 return false;
11591 }
11592 if (!hasNsInfo()) {
11593 memoizedIsInitialized = 0;
11594 return false;
11595 }
11596 if (!hasEpoch()) {
11597 memoizedIsInitialized = 0;
11598 return false;
11599 }
11600 if (!getJid().isInitialized()) {
11601 memoizedIsInitialized = 0;
11602 return false;
11603 }
11604 if (!getNsInfo().isInitialized()) {
11605 memoizedIsInitialized = 0;
11606 return false;
11607 }
11608 memoizedIsInitialized = 1;
11609 return true;
11610 }
11611
11612 public void writeTo(com.google.protobuf.CodedOutputStream output)
11613 throws java.io.IOException {
11614 getSerializedSize();
11615 if (((bitField0_ & 0x00000001) == 0x00000001)) {
11616 output.writeMessage(1, jid_);
11617 }
11618 if (((bitField0_ & 0x00000002) == 0x00000002)) {
11619 output.writeMessage(2, nsInfo_);
11620 }
11621 if (((bitField0_ & 0x00000004) == 0x00000004)) {
11622 output.writeUInt64(3, epoch_);
11623 }
11624 getUnknownFields().writeTo(output);
11625 }
11626
11627 private int memoizedSerializedSize = -1;
11628 public int getSerializedSize() {
11629 int size = memoizedSerializedSize;
11630 if (size != -1) return size;
11631
11632 size = 0;
11633 if (((bitField0_ & 0x00000001) == 0x00000001)) {
11634 size += com.google.protobuf.CodedOutputStream
11635 .computeMessageSize(1, jid_);
11636 }
11637 if (((bitField0_ & 0x00000002) == 0x00000002)) {
11638 size += com.google.protobuf.CodedOutputStream
11639 .computeMessageSize(2, nsInfo_);
11640 }
11641 if (((bitField0_ & 0x00000004) == 0x00000004)) {
11642 size += com.google.protobuf.CodedOutputStream
11643 .computeUInt64Size(3, epoch_);
11644 }
11645 size += getUnknownFields().getSerializedSize();
11646 memoizedSerializedSize = size;
11647 return size;
11648 }
11649
11650 private static final long serialVersionUID = 0L;
11651 @java.lang.Override
11652 protected java.lang.Object writeReplace()
11653 throws java.io.ObjectStreamException {
11654 return super.writeReplace();
11655 }
11656
11657 @java.lang.Override
11658 public boolean equals(final java.lang.Object obj) {
11659 if (obj == this) {
11660 return true;
11661 }
11662 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
11663 return super.equals(obj);
11664 }
11665 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
11666
11667 boolean result = true;
11668 result = result && (hasJid() == other.hasJid());
11669 if (hasJid()) {
11670 result = result && getJid()
11671 .equals(other.getJid());
11672 }
11673 result = result && (hasNsInfo() == other.hasNsInfo());
11674 if (hasNsInfo()) {
11675 result = result && getNsInfo()
11676 .equals(other.getNsInfo());
11677 }
11678 result = result && (hasEpoch() == other.hasEpoch());
11679 if (hasEpoch()) {
11680 result = result && (getEpoch()
11681 == other.getEpoch());
11682 }
11683 result = result &&
11684 getUnknownFields().equals(other.getUnknownFields());
11685 return result;
11686 }
11687
11688 private int memoizedHashCode = 0;
11689 @java.lang.Override
11690 public int hashCode() {
11691 if (memoizedHashCode != 0) {
11692 return memoizedHashCode;
11693 }
11694 int hash = 41;
11695 hash = (19 * hash) + getDescriptorForType().hashCode();
11696 if (hasJid()) {
11697 hash = (37 * hash) + JID_FIELD_NUMBER;
11698 hash = (53 * hash) + getJid().hashCode();
11699 }
11700 if (hasNsInfo()) {
11701 hash = (37 * hash) + NSINFO_FIELD_NUMBER;
11702 hash = (53 * hash) + getNsInfo().hashCode();
11703 }
11704 if (hasEpoch()) {
11705 hash = (37 * hash) + EPOCH_FIELD_NUMBER;
11706 hash = (53 * hash) + hashLong(getEpoch());
11707 }
11708 hash = (29 * hash) + getUnknownFields().hashCode();
11709 memoizedHashCode = hash;
11710 return hash;
11711 }
11712
11713 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11714 com.google.protobuf.ByteString data)
11715 throws com.google.protobuf.InvalidProtocolBufferException {
11716 return PARSER.parseFrom(data);
11717 }
11718 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11719 com.google.protobuf.ByteString data,
11720 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11721 throws com.google.protobuf.InvalidProtocolBufferException {
11722 return PARSER.parseFrom(data, extensionRegistry);
11723 }
11724 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
11725 throws com.google.protobuf.InvalidProtocolBufferException {
11726 return PARSER.parseFrom(data);
11727 }
11728 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11729 byte[] data,
11730 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11731 throws com.google.protobuf.InvalidProtocolBufferException {
11732 return PARSER.parseFrom(data, extensionRegistry);
11733 }
11734 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
11735 throws java.io.IOException {
11736 return PARSER.parseFrom(input);
11737 }
11738 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11739 java.io.InputStream input,
11740 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11741 throws java.io.IOException {
11742 return PARSER.parseFrom(input, extensionRegistry);
11743 }
11744 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
11745 throws java.io.IOException {
11746 return PARSER.parseDelimitedFrom(input);
11747 }
11748 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
11749 java.io.InputStream input,
11750 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11751 throws java.io.IOException {
11752 return PARSER.parseDelimitedFrom(input, extensionRegistry);
11753 }
11754 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11755 com.google.protobuf.CodedInputStream input)
11756 throws java.io.IOException {
11757 return PARSER.parseFrom(input);
11758 }
11759 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11760 com.google.protobuf.CodedInputStream input,
11761 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11762 throws java.io.IOException {
11763 return PARSER.parseFrom(input, extensionRegistry);
11764 }
11765
11766 public static Builder newBuilder() { return Builder.create(); }
11767 public Builder newBuilderForType() { return newBuilder(); }
11768 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
11769 return newBuilder().mergeFrom(prototype);
11770 }
11771 public Builder toBuilder() { return newBuilder(this); }
11772
11773 @java.lang.Override
11774 protected Builder newBuilderForType(
11775 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11776 Builder builder = new Builder(parent);
11777 return builder;
11778 }
11779 /**
11780 * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11781 *
11782 * <pre>
11783 **
11784 * newEpoch()
11785 * </pre>
11786 */
11787 public static final class Builder extends
11788 com.google.protobuf.GeneratedMessage.Builder<Builder>
11789 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
11790 public static final com.google.protobuf.Descriptors.Descriptor
11791 getDescriptor() {
11792 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11793 }
11794
11795 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11796 internalGetFieldAccessorTable() {
11797 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11798 .ensureFieldAccessorsInitialized(
11799 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11800 }
11801
11802 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
11803 private Builder() {
11804 maybeForceBuilderInitialization();
11805 }
11806
11807 private Builder(
11808 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11809 super(parent);
11810 maybeForceBuilderInitialization();
11811 }
11812 private void maybeForceBuilderInitialization() {
11813 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11814 getJidFieldBuilder();
11815 getNsInfoFieldBuilder();
11816 }
11817 }
11818 private static Builder create() {
11819 return new Builder();
11820 }
11821
11822 public Builder clear() {
11823 super.clear();
11824 if (jidBuilder_ == null) {
11825 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11826 } else {
11827 jidBuilder_.clear();
11828 }
11829 bitField0_ = (bitField0_ & ~0x00000001);
11830 if (nsInfoBuilder_ == null) {
11831 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11832 } else {
11833 nsInfoBuilder_.clear();
11834 }
11835 bitField0_ = (bitField0_ & ~0x00000002);
11836 epoch_ = 0L;
11837 bitField0_ = (bitField0_ & ~0x00000004);
11838 return this;
11839 }
11840
11841 public Builder clone() {
11842 return create().mergeFrom(buildPartial());
11843 }
11844
11845 public com.google.protobuf.Descriptors.Descriptor
11846 getDescriptorForType() {
11847 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11848 }
11849
11850 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
11851 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
11852 }
11853
11854 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
11855 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
11856 if (!result.isInitialized()) {
11857 throw newUninitializedMessageException(result);
11858 }
11859 return result;
11860 }
11861
11862 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
11863 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
11864 int from_bitField0_ = bitField0_;
11865 int to_bitField0_ = 0;
11866 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11867 to_bitField0_ |= 0x00000001;
11868 }
11869 if (jidBuilder_ == null) {
11870 result.jid_ = jid_;
11871 } else {
11872 result.jid_ = jidBuilder_.build();
11873 }
11874 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11875 to_bitField0_ |= 0x00000002;
11876 }
11877 if (nsInfoBuilder_ == null) {
11878 result.nsInfo_ = nsInfo_;
11879 } else {
11880 result.nsInfo_ = nsInfoBuilder_.build();
11881 }
11882 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
11883 to_bitField0_ |= 0x00000004;
11884 }
11885 result.epoch_ = epoch_;
11886 result.bitField0_ = to_bitField0_;
11887 onBuilt();
11888 return result;
11889 }
11890
11891 public Builder mergeFrom(com.google.protobuf.Message other) {
11892 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
11893 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
11894 } else {
11895 super.mergeFrom(other);
11896 return this;
11897 }
11898 }
11899
11900 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
11901 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
11902 if (other.hasJid()) {
11903 mergeJid(other.getJid());
11904 }
11905 if (other.hasNsInfo()) {
11906 mergeNsInfo(other.getNsInfo());
11907 }
11908 if (other.hasEpoch()) {
11909 setEpoch(other.getEpoch());
11910 }
11911 this.mergeUnknownFields(other.getUnknownFields());
11912 return this;
11913 }
11914
11915 public final boolean isInitialized() {
11916 if (!hasJid()) {
11917
11918 return false;
11919 }
11920 if (!hasNsInfo()) {
11921
11922 return false;
11923 }
11924 if (!hasEpoch()) {
11925
11926 return false;
11927 }
11928 if (!getJid().isInitialized()) {
11929
11930 return false;
11931 }
11932 if (!getNsInfo().isInitialized()) {
11933
11934 return false;
11935 }
11936 return true;
11937 }
11938
11939 public Builder mergeFrom(
11940 com.google.protobuf.CodedInputStream input,
11941 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11942 throws java.io.IOException {
11943 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null;
11944 try {
11945 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11946 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11947 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage();
11948 throw e;
11949 } finally {
11950 if (parsedMessage != null) {
11951 mergeFrom(parsedMessage);
11952 }
11953 }
11954 return this;
11955 }
11956 private int bitField0_;
11957
11958 // required .hadoop.hdfs.JournalIdProto jid = 1;
11959 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11960 private com.google.protobuf.SingleFieldBuilder<
11961 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
11962 /**
11963 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11964 */
11965 public boolean hasJid() {
11966 return ((bitField0_ & 0x00000001) == 0x00000001);
11967 }
11968 /**
11969 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11970 */
11971 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11972 if (jidBuilder_ == null) {
11973 return jid_;
11974 } else {
11975 return jidBuilder_.getMessage();
11976 }
11977 }
11978 /**
11979 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11980 */
11981 public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11982 if (jidBuilder_ == null) {
11983 if (value == null) {
11984 throw new NullPointerException();
11985 }
11986 jid_ = value;
11987 onChanged();
11988 } else {
11989 jidBuilder_.setMessage(value);
11990 }
11991 bitField0_ |= 0x00000001;
11992 return this;
11993 }
11994 /**
11995 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11996 */
11997 public Builder setJid(
11998 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
11999 if (jidBuilder_ == null) {
12000 jid_ = builderForValue.build();
12001 onChanged();
12002 } else {
12003 jidBuilder_.setMessage(builderForValue.build());
12004 }
12005 bitField0_ |= 0x00000001;
12006 return this;
12007 }
12008 /**
12009 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12010 */
12011 public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12012 if (jidBuilder_ == null) {
12013 if (((bitField0_ & 0x00000001) == 0x00000001) &&
12014 jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
12015 jid_ =
12016 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
12017 } else {
12018 jid_ = value;
12019 }
12020 onChanged();
12021 } else {
12022 jidBuilder_.mergeFrom(value);
12023 }
12024 bitField0_ |= 0x00000001;
12025 return this;
12026 }
12027 /**
12028 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12029 */
12030 public Builder clearJid() {
12031 if (jidBuilder_ == null) {
12032 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12033 onChanged();
12034 } else {
12035 jidBuilder_.clear();
12036 }
12037 bitField0_ = (bitField0_ & ~0x00000001);
12038 return this;
12039 }
12040 /**
12041 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12042 */
12043 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
12044 bitField0_ |= 0x00000001;
12045 onChanged();
12046 return getJidFieldBuilder().getBuilder();
12047 }
12048 /**
12049 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12050 */
12051 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12052 if (jidBuilder_ != null) {
12053 return jidBuilder_.getMessageOrBuilder();
12054 } else {
12055 return jid_;
12056 }
12057 }
12058 /**
12059 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12060 */
12061 private com.google.protobuf.SingleFieldBuilder<
12062 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
12063 getJidFieldBuilder() {
12064 if (jidBuilder_ == null) {
12065 jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12066 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
12067 jid_,
12068 getParentForChildren(),
12069 isClean());
12070 jid_ = null;
12071 }
12072 return jidBuilder_;
12073 }
12074
12075 // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
12076 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12077 private com.google.protobuf.SingleFieldBuilder<
12078 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
12079 /**
12080 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12081 */
12082 public boolean hasNsInfo() {
12083 return ((bitField0_ & 0x00000002) == 0x00000002);
12084 }
12085 /**
12086 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12087 */
12088 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
12089 if (nsInfoBuilder_ == null) {
12090 return nsInfo_;
12091 } else {
12092 return nsInfoBuilder_.getMessage();
12093 }
12094 }
12095 /**
12096 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12097 */
12098 public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12099 if (nsInfoBuilder_ == null) {
12100 if (value == null) {
12101 throw new NullPointerException();
12102 }
12103 nsInfo_ = value;
12104 onChanged();
12105 } else {
12106 nsInfoBuilder_.setMessage(value);
12107 }
12108 bitField0_ |= 0x00000002;
12109 return this;
12110 }
12111 /**
12112 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12113 */
12114 public Builder setNsInfo(
12115 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
12116 if (nsInfoBuilder_ == null) {
12117 nsInfo_ = builderForValue.build();
12118 onChanged();
12119 } else {
12120 nsInfoBuilder_.setMessage(builderForValue.build());
12121 }
12122 bitField0_ |= 0x00000002;
12123 return this;
12124 }
12125 /**
12126 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12127 */
12128 public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12129 if (nsInfoBuilder_ == null) {
12130 if (((bitField0_ & 0x00000002) == 0x00000002) &&
12131 nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
12132 nsInfo_ =
12133 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
12134 } else {
12135 nsInfo_ = value;
12136 }
12137 onChanged();
12138 } else {
12139 nsInfoBuilder_.mergeFrom(value);
12140 }
12141 bitField0_ |= 0x00000002;
12142 return this;
12143 }
12144 /**
12145 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12146 */
12147 public Builder clearNsInfo() {
12148 if (nsInfoBuilder_ == null) {
12149 nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12150 onChanged();
12151 } else {
12152 nsInfoBuilder_.clear();
12153 }
12154 bitField0_ = (bitField0_ & ~0x00000002);
12155 return this;
12156 }
12157 /**
12158 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12159 */
12160 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
12161 bitField0_ |= 0x00000002;
12162 onChanged();
12163 return getNsInfoFieldBuilder().getBuilder();
12164 }
12165 /**
12166 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12167 */
12168 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
12169 if (nsInfoBuilder_ != null) {
12170 return nsInfoBuilder_.getMessageOrBuilder();
12171 } else {
12172 return nsInfo_;
12173 }
12174 }
12175 /**
12176 * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12177 */
12178 private com.google.protobuf.SingleFieldBuilder<
12179 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>
12180 getNsInfoFieldBuilder() {
12181 if (nsInfoBuilder_ == null) {
12182 nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12183 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
12184 nsInfo_,
12185 getParentForChildren(),
12186 isClean());
12187 nsInfo_ = null;
12188 }
12189 return nsInfoBuilder_;
12190 }
12191
12192 // required uint64 epoch = 3;
12193 private long epoch_ ;
12194 /**
12195 * <code>required uint64 epoch = 3;</code>
12196 */
12197 public boolean hasEpoch() {
12198 return ((bitField0_ & 0x00000004) == 0x00000004);
12199 }
12200 /**
12201 * <code>required uint64 epoch = 3;</code>
12202 */
12203 public long getEpoch() {
12204 return epoch_;
12205 }
12206 /**
12207 * <code>required uint64 epoch = 3;</code>
12208 */
12209 public Builder setEpoch(long value) {
12210 bitField0_ |= 0x00000004;
12211 epoch_ = value;
12212 onChanged();
12213 return this;
12214 }
12215 /**
12216 * <code>required uint64 epoch = 3;</code>
12217 */
12218 public Builder clearEpoch() {
12219 bitField0_ = (bitField0_ & ~0x00000004);
12220 epoch_ = 0L;
12221 onChanged();
12222 return this;
12223 }
12224
12225 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochRequestProto)
12226 }
12227
12228 static {
12229 defaultInstance = new NewEpochRequestProto(true);
12230 defaultInstance.initFields();
12231 }
12232
12233 // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochRequestProto)
12234 }
12235
12236 public interface NewEpochResponseProtoOrBuilder
12237 extends com.google.protobuf.MessageOrBuilder {
12238
12239 // optional uint64 lastSegmentTxId = 1;
12240 /**
12241 * <code>optional uint64 lastSegmentTxId = 1;</code>
12242 */
12243 boolean hasLastSegmentTxId();
12244 /**
12245 * <code>optional uint64 lastSegmentTxId = 1;</code>
12246 */
12247 long getLastSegmentTxId();
12248 }
12249 /**
12250 * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12251 */
12252 public static final class NewEpochResponseProto extends
12253 com.google.protobuf.GeneratedMessage
12254 implements NewEpochResponseProtoOrBuilder {
12255 // Use NewEpochResponseProto.newBuilder() to construct.
12256 private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12257 super(builder);
12258 this.unknownFields = builder.getUnknownFields();
12259 }
12260 private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12261
12262 private static final NewEpochResponseProto defaultInstance;
12263 public static NewEpochResponseProto getDefaultInstance() {
12264 return defaultInstance;
12265 }
12266
12267 public NewEpochResponseProto getDefaultInstanceForType() {
12268 return defaultInstance;
12269 }
12270
12271 private final com.google.protobuf.UnknownFieldSet unknownFields;
12272 @java.lang.Override
12273 public final com.google.protobuf.UnknownFieldSet
12274 getUnknownFields() {
12275 return this.unknownFields;
12276 }
12277 private NewEpochResponseProto(
12278 com.google.protobuf.CodedInputStream input,
12279 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12280 throws com.google.protobuf.InvalidProtocolBufferException {
12281 initFields();
12282 int mutable_bitField0_ = 0;
12283 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12284 com.google.protobuf.UnknownFieldSet.newBuilder();
12285 try {
12286 boolean done = false;
12287 while (!done) {
12288 int tag = input.readTag();
12289 switch (tag) {
12290 case 0:
12291 done = true;
12292 break;
12293 default: {
12294 if (!parseUnknownField(input, unknownFields,
12295 extensionRegistry, tag)) {
12296 done = true;
12297 }
12298 break;
12299 }
12300 case 8: {
12301 bitField0_ |= 0x00000001;
12302 lastSegmentTxId_ = input.readUInt64();
12303 break;
12304 }
12305 }
12306 }
12307 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12308 throw e.setUnfinishedMessage(this);
12309 } catch (java.io.IOException e) {
12310 throw new com.google.protobuf.InvalidProtocolBufferException(
12311 e.getMessage()).setUnfinishedMessage(this);
12312 } finally {
12313 this.unknownFields = unknownFields.build();
12314 makeExtensionsImmutable();
12315 }
12316 }
12317 public static final com.google.protobuf.Descriptors.Descriptor
12318 getDescriptor() {
12319 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12320 }
12321
12322 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12323 internalGetFieldAccessorTable() {
12324 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12325 .ensureFieldAccessorsInitialized(
12326 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12327 }
12328
12329 public static com.google.protobuf.Parser<NewEpochResponseProto> PARSER =
12330 new com.google.protobuf.AbstractParser<NewEpochResponseProto>() {
12331 public NewEpochResponseProto parsePartialFrom(
12332 com.google.protobuf.CodedInputStream input,
12333 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12334 throws com.google.protobuf.InvalidProtocolBufferException {
12335 return new NewEpochResponseProto(input, extensionRegistry);
12336 }
12337 };
12338
12339 @java.lang.Override
12340 public com.google.protobuf.Parser<NewEpochResponseProto> getParserForType() {
12341 return PARSER;
12342 }
12343
12344 private int bitField0_;
12345 // optional uint64 lastSegmentTxId = 1;
12346 public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
12347 private long lastSegmentTxId_;
12348 /**
12349 * <code>optional uint64 lastSegmentTxId = 1;</code>
12350 */
12351 public boolean hasLastSegmentTxId() {
12352 return ((bitField0_ & 0x00000001) == 0x00000001);
12353 }
12354 /**
12355 * <code>optional uint64 lastSegmentTxId = 1;</code>
12356 */
12357 public long getLastSegmentTxId() {
12358 return lastSegmentTxId_;
12359 }
12360
12361 private void initFields() {
12362 lastSegmentTxId_ = 0L;
12363 }
12364 private byte memoizedIsInitialized = -1;
12365 public final boolean isInitialized() {
12366 byte isInitialized = memoizedIsInitialized;
12367 if (isInitialized != -1) return isInitialized == 1;
12368
12369 memoizedIsInitialized = 1;
12370 return true;
12371 }
12372
12373 public void writeTo(com.google.protobuf.CodedOutputStream output)
12374 throws java.io.IOException {
12375 getSerializedSize();
12376 if (((bitField0_ & 0x00000001) == 0x00000001)) {
12377 output.writeUInt64(1, lastSegmentTxId_);
12378 }
12379 getUnknownFields().writeTo(output);
12380 }
12381
12382 private int memoizedSerializedSize = -1;
12383 public int getSerializedSize() {
12384 int size = memoizedSerializedSize;
12385 if (size != -1) return size;
12386
12387 size = 0;
12388 if (((bitField0_ & 0x00000001) == 0x00000001)) {
12389 size += com.google.protobuf.CodedOutputStream
12390 .computeUInt64Size(1, lastSegmentTxId_);
12391 }
12392 size += getUnknownFields().getSerializedSize();
12393 memoizedSerializedSize = size;
12394 return size;
12395 }
12396
12397 private static final long serialVersionUID = 0L;
12398 @java.lang.Override
12399 protected java.lang.Object writeReplace()
12400 throws java.io.ObjectStreamException {
12401 return super.writeReplace();
12402 }
12403
12404 @java.lang.Override
12405 public boolean equals(final java.lang.Object obj) {
12406 if (obj == this) {
12407 return true;
12408 }
12409 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
12410 return super.equals(obj);
12411 }
12412 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
12413
12414 boolean result = true;
12415 result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
12416 if (hasLastSegmentTxId()) {
12417 result = result && (getLastSegmentTxId()
12418 == other.getLastSegmentTxId());
12419 }
12420 result = result &&
12421 getUnknownFields().equals(other.getUnknownFields());
12422 return result;
12423 }
12424
12425 private int memoizedHashCode = 0;
12426 @java.lang.Override
12427 public int hashCode() {
12428 if (memoizedHashCode != 0) {
12429 return memoizedHashCode;
12430 }
12431 int hash = 41;
12432 hash = (19 * hash) + getDescriptorForType().hashCode();
12433 if (hasLastSegmentTxId()) {
12434 hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
12435 hash = (53 * hash) + hashLong(getLastSegmentTxId());
12436 }
12437 hash = (29 * hash) + getUnknownFields().hashCode();
12438 memoizedHashCode = hash;
12439 return hash;
12440 }
12441
12442 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12443 com.google.protobuf.ByteString data)
12444 throws com.google.protobuf.InvalidProtocolBufferException {
12445 return PARSER.parseFrom(data);
12446 }
12447 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12448 com.google.protobuf.ByteString data,
12449 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12450 throws com.google.protobuf.InvalidProtocolBufferException {
12451 return PARSER.parseFrom(data, extensionRegistry);
12452 }
12453 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
12454 throws com.google.protobuf.InvalidProtocolBufferException {
12455 return PARSER.parseFrom(data);
12456 }
12457 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12458 byte[] data,
12459 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12460 throws com.google.protobuf.InvalidProtocolBufferException {
12461 return PARSER.parseFrom(data, extensionRegistry);
12462 }
12463 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
12464 throws java.io.IOException {
12465 return PARSER.parseFrom(input);
12466 }
12467 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12468 java.io.InputStream input,
12469 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12470 throws java.io.IOException {
12471 return PARSER.parseFrom(input, extensionRegistry);
12472 }
12473 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
12474 throws java.io.IOException {
12475 return PARSER.parseDelimitedFrom(input);
12476 }
12477 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
12478 java.io.InputStream input,
12479 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12480 throws java.io.IOException {
12481 return PARSER.parseDelimitedFrom(input, extensionRegistry);
12482 }
12483 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12484 com.google.protobuf.CodedInputStream input)
12485 throws java.io.IOException {
12486 return PARSER.parseFrom(input);
12487 }
12488 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12489 com.google.protobuf.CodedInputStream input,
12490 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12491 throws java.io.IOException {
12492 return PARSER.parseFrom(input, extensionRegistry);
12493 }
12494
12495 public static Builder newBuilder() { return Builder.create(); }
12496 public Builder newBuilderForType() { return newBuilder(); }
12497 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
12498 return newBuilder().mergeFrom(prototype);
12499 }
12500 public Builder toBuilder() { return newBuilder(this); }
12501
12502 @java.lang.Override
12503 protected Builder newBuilderForType(
12504 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12505 Builder builder = new Builder(parent);
12506 return builder;
12507 }
12508 /**
12509 * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12510 */
12511 public static final class Builder extends
12512 com.google.protobuf.GeneratedMessage.Builder<Builder>
12513 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
12514 public static final com.google.protobuf.Descriptors.Descriptor
12515 getDescriptor() {
12516 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12517 }
12518
12519 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12520 internalGetFieldAccessorTable() {
12521 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12522 .ensureFieldAccessorsInitialized(
12523 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12524 }
12525
12526 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
12527 private Builder() {
12528 maybeForceBuilderInitialization();
12529 }
12530
12531 private Builder(
12532 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12533 super(parent);
12534 maybeForceBuilderInitialization();
12535 }
12536 private void maybeForceBuilderInitialization() {
12537 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12538 }
12539 }
12540 private static Builder create() {
12541 return new Builder();
12542 }
12543
12544 public Builder clear() {
12545 super.clear();
12546 lastSegmentTxId_ = 0L;
12547 bitField0_ = (bitField0_ & ~0x00000001);
12548 return this;
12549 }
12550
12551 public Builder clone() {
12552 return create().mergeFrom(buildPartial());
12553 }
12554
12555 public com.google.protobuf.Descriptors.Descriptor
12556 getDescriptorForType() {
12557 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12558 }
12559
12560 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
12561 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
12562 }
12563
12564 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
12565 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
12566 if (!result.isInitialized()) {
12567 throw newUninitializedMessageException(result);
12568 }
12569 return result;
12570 }
12571
12572 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
12573 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
12574 int from_bitField0_ = bitField0_;
12575 int to_bitField0_ = 0;
12576 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12577 to_bitField0_ |= 0x00000001;
12578 }
12579 result.lastSegmentTxId_ = lastSegmentTxId_;
12580 result.bitField0_ = to_bitField0_;
12581 onBuilt();
12582 return result;
12583 }
12584
12585 public Builder mergeFrom(com.google.protobuf.Message other) {
12586 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
12587 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
12588 } else {
12589 super.mergeFrom(other);
12590 return this;
12591 }
12592 }
12593
12594 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
12595 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
12596 if (other.hasLastSegmentTxId()) {
12597 setLastSegmentTxId(other.getLastSegmentTxId());
12598 }
12599 this.mergeUnknownFields(other.getUnknownFields());
12600 return this;
12601 }
12602
12603 public final boolean isInitialized() {
12604 return true;
12605 }
12606
12607 public Builder mergeFrom(
12608 com.google.protobuf.CodedInputStream input,
12609 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12610 throws java.io.IOException {
12611 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null;
12612 try {
12613 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12614 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12615 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage();
12616 throw e;
12617 } finally {
12618 if (parsedMessage != null) {
12619 mergeFrom(parsedMessage);
12620 }
12621 }
12622 return this;
12623 }
12624 private int bitField0_;
12625
12626 // optional uint64 lastSegmentTxId = 1;
12627 private long lastSegmentTxId_ ;
12628 /**
12629 * <code>optional uint64 lastSegmentTxId = 1;</code>
12630 */
12631 public boolean hasLastSegmentTxId() {
12632 return ((bitField0_ & 0x00000001) == 0x00000001);
12633 }
12634 /**
12635 * <code>optional uint64 lastSegmentTxId = 1;</code>
12636 */
12637 public long getLastSegmentTxId() {
12638 return lastSegmentTxId_;
12639 }
12640 /**
12641 * <code>optional uint64 lastSegmentTxId = 1;</code>
12642 */
12643 public Builder setLastSegmentTxId(long value) {
12644 bitField0_ |= 0x00000001;
12645 lastSegmentTxId_ = value;
12646 onChanged();
12647 return this;
12648 }
12649 /**
12650 * <code>optional uint64 lastSegmentTxId = 1;</code>
12651 */
12652 public Builder clearLastSegmentTxId() {
12653 bitField0_ = (bitField0_ & ~0x00000001);
12654 lastSegmentTxId_ = 0L;
12655 onChanged();
12656 return this;
12657 }
12658
12659 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochResponseProto)
12660 }
12661
12662 static {
12663 defaultInstance = new NewEpochResponseProto(true);
12664 defaultInstance.initFields();
12665 }
12666
12667 // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochResponseProto)
12668 }
12669
12670 public interface GetEditLogManifestRequestProtoOrBuilder
12671 extends com.google.protobuf.MessageOrBuilder {
12672
12673 // required .hadoop.hdfs.JournalIdProto jid = 1;
12674 /**
12675 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12676 */
12677 boolean hasJid();
12678 /**
12679 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12680 */
12681 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
12682 /**
12683 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12684 */
12685 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
12686
12687 // required uint64 sinceTxId = 2;
12688 /**
12689 * <code>required uint64 sinceTxId = 2;</code>
12690 *
12691 * <pre>
12692 * Transaction ID
12693 * </pre>
12694 */
12695 boolean hasSinceTxId();
12696 /**
12697 * <code>required uint64 sinceTxId = 2;</code>
12698 *
12699 * <pre>
12700 * Transaction ID
12701 * </pre>
12702 */
12703 long getSinceTxId();
12704
12705 // optional bool forReading = 3 [default = true];
12706 /**
12707 * <code>optional bool forReading = 3 [default = true];</code>
12708 *
12709 * <pre>
12710 * Whether or not the client will be reading from the returned streams.
12711 * </pre>
12712 */
12713 boolean hasForReading();
12714 /**
12715 * <code>optional bool forReading = 3 [default = true];</code>
12716 *
12717 * <pre>
12718 * Whether or not the client will be reading from the returned streams.
12719 * </pre>
12720 */
12721 boolean getForReading();
12722
12723 // optional bool inProgressOk = 4 [default = false];
12724 /**
12725 * <code>optional bool inProgressOk = 4 [default = false];</code>
12726 */
12727 boolean hasInProgressOk();
12728 /**
12729 * <code>optional bool inProgressOk = 4 [default = false];</code>
12730 */
12731 boolean getInProgressOk();
12732 }
12733 /**
12734 * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
12735 *
12736 * <pre>
12737 **
12738 * getEditLogManifest()
12739 * </pre>
12740 */
12741 public static final class GetEditLogManifestRequestProto extends
12742 com.google.protobuf.GeneratedMessage
12743 implements GetEditLogManifestRequestProtoOrBuilder {
12744 // Use GetEditLogManifestRequestProto.newBuilder() to construct.
12745 private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12746 super(builder);
12747 this.unknownFields = builder.getUnknownFields();
12748 }
12749 private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12750
12751 private static final GetEditLogManifestRequestProto defaultInstance;
12752 public static GetEditLogManifestRequestProto getDefaultInstance() {
12753 return defaultInstance;
12754 }
12755
12756 public GetEditLogManifestRequestProto getDefaultInstanceForType() {
12757 return defaultInstance;
12758 }
12759
12760 private final com.google.protobuf.UnknownFieldSet unknownFields;
12761 @java.lang.Override
12762 public final com.google.protobuf.UnknownFieldSet
12763 getUnknownFields() {
12764 return this.unknownFields;
12765 }
12766 private GetEditLogManifestRequestProto(
12767 com.google.protobuf.CodedInputStream input,
12768 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12769 throws com.google.protobuf.InvalidProtocolBufferException {
12770 initFields();
12771 int mutable_bitField0_ = 0;
12772 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12773 com.google.protobuf.UnknownFieldSet.newBuilder();
12774 try {
12775 boolean done = false;
12776 while (!done) {
12777 int tag = input.readTag();
12778 switch (tag) {
12779 case 0:
12780 done = true;
12781 break;
12782 default: {
12783 if (!parseUnknownField(input, unknownFields,
12784 extensionRegistry, tag)) {
12785 done = true;
12786 }
12787 break;
12788 }
12789 case 10: {
12790 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
12791 if (((bitField0_ & 0x00000001) == 0x00000001)) {
12792 subBuilder = jid_.toBuilder();
12793 }
12794 jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
12795 if (subBuilder != null) {
12796 subBuilder.mergeFrom(jid_);
12797 jid_ = subBuilder.buildPartial();
12798 }
12799 bitField0_ |= 0x00000001;
12800 break;
12801 }
12802 case 16: {
12803 bitField0_ |= 0x00000002;
12804 sinceTxId_ = input.readUInt64();
12805 break;
12806 }
12807 case 24: {
12808 bitField0_ |= 0x00000004;
12809 forReading_ = input.readBool();
12810 break;
12811 }
12812 case 32: {
12813 bitField0_ |= 0x00000008;
12814 inProgressOk_ = input.readBool();
12815 break;
12816 }
12817 }
12818 }
12819 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12820 throw e.setUnfinishedMessage(this);
12821 } catch (java.io.IOException e) {
12822 throw new com.google.protobuf.InvalidProtocolBufferException(
12823 e.getMessage()).setUnfinishedMessage(this);
12824 } finally {
12825 this.unknownFields = unknownFields.build();
12826 makeExtensionsImmutable();
12827 }
12828 }
12829 public static final com.google.protobuf.Descriptors.Descriptor
12830 getDescriptor() {
12831 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
12832 }
12833
12834 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12835 internalGetFieldAccessorTable() {
12836 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
12837 .ensureFieldAccessorsInitialized(
12838 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
12839 }
12840
12841 public static com.google.protobuf.Parser<GetEditLogManifestRequestProto> PARSER =
12842 new com.google.protobuf.AbstractParser<GetEditLogManifestRequestProto>() {
12843 public GetEditLogManifestRequestProto parsePartialFrom(
12844 com.google.protobuf.CodedInputStream input,
12845 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12846 throws com.google.protobuf.InvalidProtocolBufferException {
12847 return new GetEditLogManifestRequestProto(input, extensionRegistry);
12848 }
12849 };
12850
12851 @java.lang.Override
12852 public com.google.protobuf.Parser<GetEditLogManifestRequestProto> getParserForType() {
12853 return PARSER;
12854 }
12855
12856 private int bitField0_;
12857 // required .hadoop.hdfs.JournalIdProto jid = 1;
12858 public static final int JID_FIELD_NUMBER = 1;
12859 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
12860 /**
12861 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12862 */
12863 public boolean hasJid() {
12864 return ((bitField0_ & 0x00000001) == 0x00000001);
12865 }
12866 /**
12867 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12868 */
12869 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12870 return jid_;
12871 }
12872 /**
12873 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12874 */
12875 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12876 return jid_;
12877 }
12878
12879 // required uint64 sinceTxId = 2;
12880 public static final int SINCETXID_FIELD_NUMBER = 2;
12881 private long sinceTxId_;
12882 /**
12883 * <code>required uint64 sinceTxId = 2;</code>
12884 *
12885 * <pre>
12886 * Transaction ID
12887 * </pre>
12888 */
12889 public boolean hasSinceTxId() {
12890 return ((bitField0_ & 0x00000002) == 0x00000002);
12891 }
12892 /**
12893 * <code>required uint64 sinceTxId = 2;</code>
12894 *
12895 * <pre>
12896 * Transaction ID
12897 * </pre>
12898 */
12899 public long getSinceTxId() {
12900 return sinceTxId_;
12901 }
12902
12903 // optional bool forReading = 3 [default = true];
12904 public static final int FORREADING_FIELD_NUMBER = 3;
12905 private boolean forReading_;
12906 /**
12907 * <code>optional bool forReading = 3 [default = true];</code>
12908 *
12909 * <pre>
12910 * Whether or not the client will be reading from the returned streams.
12911 * </pre>
12912 */
12913 public boolean hasForReading() {
12914 return ((bitField0_ & 0x00000004) == 0x00000004);
12915 }
12916 /**
12917 * <code>optional bool forReading = 3 [default = true];</code>
12918 *
12919 * <pre>
12920 * Whether or not the client will be reading from the returned streams.
12921 * </pre>
12922 */
12923 public boolean getForReading() {
12924 return forReading_;
12925 }
12926
12927 // optional bool inProgressOk = 4 [default = false];
12928 public static final int INPROGRESSOK_FIELD_NUMBER = 4;
12929 private boolean inProgressOk_;
12930 /**
12931 * <code>optional bool inProgressOk = 4 [default = false];</code>
12932 */
12933 public boolean hasInProgressOk() {
12934 return ((bitField0_ & 0x00000008) == 0x00000008);
12935 }
12936 /**
12937 * <code>optional bool inProgressOk = 4 [default = false];</code>
12938 */
12939 public boolean getInProgressOk() {
12940 return inProgressOk_;
12941 }
12942
12943 private void initFields() {
12944 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12945 sinceTxId_ = 0L;
12946 forReading_ = true;
12947 inProgressOk_ = false;
12948 }
12949 private byte memoizedIsInitialized = -1;
12950 public final boolean isInitialized() {
12951 byte isInitialized = memoizedIsInitialized;
12952 if (isInitialized != -1) return isInitialized == 1;
12953
12954 if (!hasJid()) {
12955 memoizedIsInitialized = 0;
12956 return false;
12957 }
12958 if (!hasSinceTxId()) {
12959 memoizedIsInitialized = 0;
12960 return false;
12961 }
12962 if (!getJid().isInitialized()) {
12963 memoizedIsInitialized = 0;
12964 return false;
12965 }
12966 memoizedIsInitialized = 1;
12967 return true;
12968 }
12969
12970 public void writeTo(com.google.protobuf.CodedOutputStream output)
12971 throws java.io.IOException {
12972 getSerializedSize();
12973 if (((bitField0_ & 0x00000001) == 0x00000001)) {
12974 output.writeMessage(1, jid_);
12975 }
12976 if (((bitField0_ & 0x00000002) == 0x00000002)) {
12977 output.writeUInt64(2, sinceTxId_);
12978 }
12979 if (((bitField0_ & 0x00000004) == 0x00000004)) {
12980 output.writeBool(3, forReading_);
12981 }
12982 if (((bitField0_ & 0x00000008) == 0x00000008)) {
12983 output.writeBool(4, inProgressOk_);
12984 }
12985 getUnknownFields().writeTo(output);
12986 }
12987
12988 private int memoizedSerializedSize = -1;
12989 public int getSerializedSize() {
12990 int size = memoizedSerializedSize;
12991 if (size != -1) return size;
12992
12993 size = 0;
12994 if (((bitField0_ & 0x00000001) == 0x00000001)) {
12995 size += com.google.protobuf.CodedOutputStream
12996 .computeMessageSize(1, jid_);
12997 }
12998 if (((bitField0_ & 0x00000002) == 0x00000002)) {
12999 size += com.google.protobuf.CodedOutputStream
13000 .computeUInt64Size(2, sinceTxId_);
13001 }
13002 if (((bitField0_ & 0x00000004) == 0x00000004)) {
13003 size += com.google.protobuf.CodedOutputStream
13004 .computeBoolSize(3, forReading_);
13005 }
13006 if (((bitField0_ & 0x00000008) == 0x00000008)) {
13007 size += com.google.protobuf.CodedOutputStream
13008 .computeBoolSize(4, inProgressOk_);
13009 }
13010 size += getUnknownFields().getSerializedSize();
13011 memoizedSerializedSize = size;
13012 return size;
13013 }
13014
13015 private static final long serialVersionUID = 0L;
13016 @java.lang.Override
13017 protected java.lang.Object writeReplace()
13018 throws java.io.ObjectStreamException {
13019 return super.writeReplace();
13020 }
13021
13022 @java.lang.Override
13023 public boolean equals(final java.lang.Object obj) {
13024 if (obj == this) {
13025 return true;
13026 }
13027 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
13028 return super.equals(obj);
13029 }
13030 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
13031
13032 boolean result = true;
13033 result = result && (hasJid() == other.hasJid());
13034 if (hasJid()) {
13035 result = result && getJid()
13036 .equals(other.getJid());
13037 }
13038 result = result && (hasSinceTxId() == other.hasSinceTxId());
13039 if (hasSinceTxId()) {
13040 result = result && (getSinceTxId()
13041 == other.getSinceTxId());
13042 }
13043 result = result && (hasForReading() == other.hasForReading());
13044 if (hasForReading()) {
13045 result = result && (getForReading()
13046 == other.getForReading());
13047 }
13048 result = result && (hasInProgressOk() == other.hasInProgressOk());
13049 if (hasInProgressOk()) {
13050 result = result && (getInProgressOk()
13051 == other.getInProgressOk());
13052 }
13053 result = result &&
13054 getUnknownFields().equals(other.getUnknownFields());
13055 return result;
13056 }
13057
13058 private int memoizedHashCode = 0;
13059 @java.lang.Override
13060 public int hashCode() {
13061 if (memoizedHashCode != 0) {
13062 return memoizedHashCode;
13063 }
13064 int hash = 41;
13065 hash = (19 * hash) + getDescriptorForType().hashCode();
13066 if (hasJid()) {
13067 hash = (37 * hash) + JID_FIELD_NUMBER;
13068 hash = (53 * hash) + getJid().hashCode();
13069 }
13070 if (hasSinceTxId()) {
13071 hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
13072 hash = (53 * hash) + hashLong(getSinceTxId());
13073 }
13074 if (hasForReading()) {
13075 hash = (37 * hash) + FORREADING_FIELD_NUMBER;
13076 hash = (53 * hash) + hashBoolean(getForReading());
13077 }
13078 if (hasInProgressOk()) {
13079 hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER;
13080 hash = (53 * hash) + hashBoolean(getInProgressOk());
13081 }
13082 hash = (29 * hash) + getUnknownFields().hashCode();
13083 memoizedHashCode = hash;
13084 return hash;
13085 }
13086
13087 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13088 com.google.protobuf.ByteString data)
13089 throws com.google.protobuf.InvalidProtocolBufferException {
13090 return PARSER.parseFrom(data);
13091 }
13092 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13093 com.google.protobuf.ByteString data,
13094 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13095 throws com.google.protobuf.InvalidProtocolBufferException {
13096 return PARSER.parseFrom(data, extensionRegistry);
13097 }
13098 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
13099 throws com.google.protobuf.InvalidProtocolBufferException {
13100 return PARSER.parseFrom(data);
13101 }
13102 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13103 byte[] data,
13104 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13105 throws com.google.protobuf.InvalidProtocolBufferException {
13106 return PARSER.parseFrom(data, extensionRegistry);
13107 }
13108 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
13109 throws java.io.IOException {
13110 return PARSER.parseFrom(input);
13111 }
13112 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13113 java.io.InputStream input,
13114 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13115 throws java.io.IOException {
13116 return PARSER.parseFrom(input, extensionRegistry);
13117 }
13118 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
13119 throws java.io.IOException {
13120 return PARSER.parseDelimitedFrom(input);
13121 }
13122 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
13123 java.io.InputStream input,
13124 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13125 throws java.io.IOException {
13126 return PARSER.parseDelimitedFrom(input, extensionRegistry);
13127 }
13128 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13129 com.google.protobuf.CodedInputStream input)
13130 throws java.io.IOException {
13131 return PARSER.parseFrom(input);
13132 }
13133 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13134 com.google.protobuf.CodedInputStream input,
13135 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13136 throws java.io.IOException {
13137 return PARSER.parseFrom(input, extensionRegistry);
13138 }
13139
13140 public static Builder newBuilder() { return Builder.create(); }
13141 public Builder newBuilderForType() { return newBuilder(); }
13142 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
13143 return newBuilder().mergeFrom(prototype);
13144 }
13145 public Builder toBuilder() { return newBuilder(this); }
13146
13147 @java.lang.Override
13148 protected Builder newBuilderForType(
13149 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13150 Builder builder = new Builder(parent);
13151 return builder;
13152 }
13153 /**
13154 * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
13155 *
13156 * <pre>
13157 **
13158 * getEditLogManifest()
13159 * </pre>
13160 */
13161 public static final class Builder extends
13162 com.google.protobuf.GeneratedMessage.Builder<Builder>
13163 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
13164 public static final com.google.protobuf.Descriptors.Descriptor
13165 getDescriptor() {
13166 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13167 }
13168
13169 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13170 internalGetFieldAccessorTable() {
13171 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
13172 .ensureFieldAccessorsInitialized(
13173 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
13174 }
13175
13176 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
13177 private Builder() {
13178 maybeForceBuilderInitialization();
13179 }
13180
13181 private Builder(
13182 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13183 super(parent);
13184 maybeForceBuilderInitialization();
13185 }
13186 private void maybeForceBuilderInitialization() {
13187 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13188 getJidFieldBuilder();
13189 }
13190 }
13191 private static Builder create() {
13192 return new Builder();
13193 }
13194
13195 public Builder clear() {
13196 super.clear();
13197 if (jidBuilder_ == null) {
13198 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13199 } else {
13200 jidBuilder_.clear();
13201 }
13202 bitField0_ = (bitField0_ & ~0x00000001);
13203 sinceTxId_ = 0L;
13204 bitField0_ = (bitField0_ & ~0x00000002);
13205 forReading_ = true;
13206 bitField0_ = (bitField0_ & ~0x00000004);
13207 inProgressOk_ = false;
13208 bitField0_ = (bitField0_ & ~0x00000008);
13209 return this;
13210 }
13211
13212 public Builder clone() {
13213 return create().mergeFrom(buildPartial());
13214 }
13215
13216 public com.google.protobuf.Descriptors.Descriptor
13217 getDescriptorForType() {
13218 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13219 }
13220
13221 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
13222 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
13223 }
13224
13225 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
13226 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
13227 if (!result.isInitialized()) {
13228 throw newUninitializedMessageException(result);
13229 }
13230 return result;
13231 }
13232
13233 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
13234 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
13235 int from_bitField0_ = bitField0_;
13236 int to_bitField0_ = 0;
13237 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13238 to_bitField0_ |= 0x00000001;
13239 }
13240 if (jidBuilder_ == null) {
13241 result.jid_ = jid_;
13242 } else {
13243 result.jid_ = jidBuilder_.build();
13244 }
13245 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13246 to_bitField0_ |= 0x00000002;
13247 }
13248 result.sinceTxId_ = sinceTxId_;
13249 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
13250 to_bitField0_ |= 0x00000004;
13251 }
13252 result.forReading_ = forReading_;
13253 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
13254 to_bitField0_ |= 0x00000008;
13255 }
13256 result.inProgressOk_ = inProgressOk_;
13257 result.bitField0_ = to_bitField0_;
13258 onBuilt();
13259 return result;
13260 }
13261
13262 public Builder mergeFrom(com.google.protobuf.Message other) {
13263 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
13264 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
13265 } else {
13266 super.mergeFrom(other);
13267 return this;
13268 }
13269 }
13270
13271 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
13272 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
13273 if (other.hasJid()) {
13274 mergeJid(other.getJid());
13275 }
13276 if (other.hasSinceTxId()) {
13277 setSinceTxId(other.getSinceTxId());
13278 }
13279 if (other.hasForReading()) {
13280 setForReading(other.getForReading());
13281 }
13282 if (other.hasInProgressOk()) {
13283 setInProgressOk(other.getInProgressOk());
13284 }
13285 this.mergeUnknownFields(other.getUnknownFields());
13286 return this;
13287 }
13288
13289 public final boolean isInitialized() {
13290 if (!hasJid()) {
13291
13292 return false;
13293 }
13294 if (!hasSinceTxId()) {
13295
13296 return false;
13297 }
13298 if (!getJid().isInitialized()) {
13299
13300 return false;
13301 }
13302 return true;
13303 }
13304
13305 public Builder mergeFrom(
13306 com.google.protobuf.CodedInputStream input,
13307 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13308 throws java.io.IOException {
13309 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null;
13310 try {
13311 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13312 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13313 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage();
13314 throw e;
13315 } finally {
13316 if (parsedMessage != null) {
13317 mergeFrom(parsedMessage);
13318 }
13319 }
13320 return this;
13321 }
13322 private int bitField0_;
13323
13324 // required .hadoop.hdfs.JournalIdProto jid = 1;
13325 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13326 private com.google.protobuf.SingleFieldBuilder<
13327 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
13328 /**
13329 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13330 */
13331 public boolean hasJid() {
13332 return ((bitField0_ & 0x00000001) == 0x00000001);
13333 }
13334 /**
13335 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13336 */
13337 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13338 if (jidBuilder_ == null) {
13339 return jid_;
13340 } else {
13341 return jidBuilder_.getMessage();
13342 }
13343 }
13344 /**
13345 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13346 */
13347 public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13348 if (jidBuilder_ == null) {
13349 if (value == null) {
13350 throw new NullPointerException();
13351 }
13352 jid_ = value;
13353 onChanged();
13354 } else {
13355 jidBuilder_.setMessage(value);
13356 }
13357 bitField0_ |= 0x00000001;
13358 return this;
13359 }
13360 /**
13361 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13362 */
13363 public Builder setJid(
13364 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
13365 if (jidBuilder_ == null) {
13366 jid_ = builderForValue.build();
13367 onChanged();
13368 } else {
13369 jidBuilder_.setMessage(builderForValue.build());
13370 }
13371 bitField0_ |= 0x00000001;
13372 return this;
13373 }
13374 /**
13375 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13376 */
13377 public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13378 if (jidBuilder_ == null) {
13379 if (((bitField0_ & 0x00000001) == 0x00000001) &&
13380 jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
13381 jid_ =
13382 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
13383 } else {
13384 jid_ = value;
13385 }
13386 onChanged();
13387 } else {
13388 jidBuilder_.mergeFrom(value);
13389 }
13390 bitField0_ |= 0x00000001;
13391 return this;
13392 }
13393 /**
13394 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13395 */
13396 public Builder clearJid() {
13397 if (jidBuilder_ == null) {
13398 jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13399 onChanged();
13400 } else {
13401 jidBuilder_.clear();
13402 }
13403 bitField0_ = (bitField0_ & ~0x00000001);
13404 return this;
13405 }
13406 /**
13407 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13408 */
13409 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
13410 bitField0_ |= 0x00000001;
13411 onChanged();
13412 return getJidFieldBuilder().getBuilder();
13413 }
13414 /**
13415 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13416 */
13417 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13418 if (jidBuilder_ != null) {
13419 return jidBuilder_.getMessageOrBuilder();
13420 } else {
13421 return jid_;
13422 }
13423 }
13424 /**
13425 * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13426 */
13427 private com.google.protobuf.SingleFieldBuilder<
13428 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>
13429 getJidFieldBuilder() {
13430 if (jidBuilder_ == null) {
13431 jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13432 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
13433 jid_,
13434 getParentForChildren(),
13435 isClean());
13436 jid_ = null;
13437 }
13438 return jidBuilder_;
13439 }
13440
13441 // required uint64 sinceTxId = 2;
13442 private long sinceTxId_ ;
13443 /**
13444 * <code>required uint64 sinceTxId = 2;</code>
13445 *
13446 * <pre>
13447 * Transaction ID
13448 * </pre>
13449 */
13450 public boolean hasSinceTxId() {
13451 return ((bitField0_ & 0x00000002) == 0x00000002);
13452 }
13453 /**
13454 * <code>required uint64 sinceTxId = 2;</code>
13455 *
13456 * <pre>
13457 * Transaction ID
13458 * </pre>
13459 */
13460 public long getSinceTxId() {
13461 return sinceTxId_;
13462 }
13463 /**
13464 * <code>required uint64 sinceTxId = 2;</code>
13465 *
13466 * <pre>
13467 * Transaction ID
13468 * </pre>
13469 */
13470 public Builder setSinceTxId(long value) {
13471 bitField0_ |= 0x00000002;
13472 sinceTxId_ = value;
13473 onChanged();
13474 return this;
13475 }
13476 /**
13477 * <code>required uint64 sinceTxId = 2;</code>
13478 *
13479 * <pre>
13480 * Transaction ID
13481 * </pre>
13482 */
13483 public Builder clearSinceTxId() {
13484 bitField0_ = (bitField0_ & ~0x00000002);
13485 sinceTxId_ = 0L;
13486 onChanged();
13487 return this;
13488 }
13489
13490 // optional bool forReading = 3 [default = true];
13491 private boolean forReading_ = true;
13492 /**
13493 * <code>optional bool forReading = 3 [default = true];</code>
13494 *
13495 * <pre>
13496 * Whether or not the client will be reading from the returned streams.
13497 * </pre>
13498 */
13499 public boolean hasForReading() {
13500 return ((bitField0_ & 0x00000004) == 0x00000004);
13501 }
13502 /**
13503 * <code>optional bool forReading = 3 [default = true];</code>
13504 *
13505 * <pre>
13506 * Whether or not the client will be reading from the returned streams.
13507 * </pre>
13508 */
13509 public boolean getForReading() {
13510 return forReading_;
13511 }
13512 /**
13513 * <code>optional bool forReading = 3 [default = true];</code>
13514 *
13515 * <pre>
13516 * Whether or not the client will be reading from the returned streams.
13517 * </pre>
13518 */
13519 public Builder setForReading(boolean value) {
13520 bitField0_ |= 0x00000004;
13521 forReading_ = value;
13522 onChanged();
13523 return this;
13524 }
13525 /**
13526 * <code>optional bool forReading = 3 [default = true];</code>
13527 *
13528 * <pre>
13529 * Whether or not the client will be reading from the returned streams.
13530 * </pre>
13531 */
13532 public Builder clearForReading() {
13533 bitField0_ = (bitField0_ & ~0x00000004);
13534 forReading_ = true;
13535 onChanged();
13536 return this;
13537 }
13538
13539 // optional bool inProgressOk = 4 [default = false];
13540 private boolean inProgressOk_ ;
13541 /**
13542 * <code>optional bool inProgressOk = 4 [default = false];</code>
13543 */
13544 public boolean hasInProgressOk() {
13545 return ((bitField0_ & 0x00000008) == 0x00000008);
13546 }
13547 /**
13548 * <code>optional bool inProgressOk = 4 [default = false];</code>
13549 */
13550 public boolean getInProgressOk() {
13551 return inProgressOk_;
13552 }
13553 /**
13554 * <code>optional bool inProgressOk = 4 [default = false];</code>
13555 */
13556 public Builder setInProgressOk(boolean value) {
13557 bitField0_ |= 0x00000008;
13558 inProgressOk_ = value;
13559 onChanged();
13560 return this;
13561 }
13562 /**
13563 * <code>optional bool inProgressOk = 4 [default = false];</code>
13564 */
13565 public Builder clearInProgressOk() {
13566 bitField0_ = (bitField0_ & ~0x00000008);
13567 inProgressOk_ = false;
13568 onChanged();
13569 return this;
13570 }
13571
13572 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13573 }
13574
13575 static {
13576 defaultInstance = new GetEditLogManifestRequestProto(true);
13577 defaultInstance.initFields();
13578 }
13579
13580 // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13581 }
13582
13583 public interface GetEditLogManifestResponseProtoOrBuilder
13584 extends com.google.protobuf.MessageOrBuilder {
13585
13586 // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13587 /**
13588 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13589 */
13590 boolean hasManifest();
13591 /**
13592 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13593 */
13594 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
13595 /**
13596 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13597 */
13598 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
13599
13600 // required uint32 httpPort = 2;
13601 /**
13602 * <code>required uint32 httpPort = 2;</code>
13603 */
13604 boolean hasHttpPort();
13605 /**
13606 * <code>required uint32 httpPort = 2;</code>
13607 */
13608 int getHttpPort();
13609 }
13610 /**
13611 * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13612 */
13613 public static final class GetEditLogManifestResponseProto extends
13614 com.google.protobuf.GeneratedMessage
13615 implements GetEditLogManifestResponseProtoOrBuilder {
13616 // Use GetEditLogManifestResponseProto.newBuilder() to construct.
13617 private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13618 super(builder);
13619 this.unknownFields = builder.getUnknownFields();
13620 }
13621 private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13622
13623 private static final GetEditLogManifestResponseProto defaultInstance;
13624 public static GetEditLogManifestResponseProto getDefaultInstance() {
13625 return defaultInstance;
13626 }
13627
13628 public GetEditLogManifestResponseProto getDefaultInstanceForType() {
13629 return defaultInstance;
13630 }
13631
13632 private final com.google.protobuf.UnknownFieldSet unknownFields;
13633 @java.lang.Override
13634 public final com.google.protobuf.UnknownFieldSet
13635 getUnknownFields() {
13636 return this.unknownFields;
13637 }
13638 private GetEditLogManifestResponseProto(
13639 com.google.protobuf.CodedInputStream input,
13640 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13641 throws com.google.protobuf.InvalidProtocolBufferException {
13642 initFields();
13643 int mutable_bitField0_ = 0;
13644 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13645 com.google.protobuf.UnknownFieldSet.newBuilder();
13646 try {
13647 boolean done = false;
13648 while (!done) {
13649 int tag = input.readTag();
13650 switch (tag) {
13651 case 0:
13652 done = true;
13653 break;
13654 default: {
13655 if (!parseUnknownField(input, unknownFields,
13656 extensionRegistry, tag)) {
13657 done = true;
13658 }
13659 break;
13660 }
13661 case 10: {
13662 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = null;
13663 if (((bitField0_ & 0x00000001) == 0x00000001)) {
13664 subBuilder = manifest_.toBuilder();
13665 }
13666 manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry);
13667 if (subBuilder != null) {
13668 subBuilder.mergeFrom(manifest_);
13669 manifest_ = subBuilder.buildPartial();
13670 }
13671 bitField0_ |= 0x00000001;
13672 break;
13673 }
13674 case 16: {
13675 bitField0_ |= 0x00000002;
13676 httpPort_ = input.readUInt32();
13677 break;
13678 }
13679 }
13680 }
13681 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13682 throw e.setUnfinishedMessage(this);
13683 } catch (java.io.IOException e) {
13684 throw new com.google.protobuf.InvalidProtocolBufferException(
13685 e.getMessage()).setUnfinishedMessage(this);
13686 } finally {
13687 this.unknownFields = unknownFields.build();
13688 makeExtensionsImmutable();
13689 }
13690 }
13691 public static final com.google.protobuf.Descriptors.Descriptor
13692 getDescriptor() {
13693 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13694 }
13695
13696 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13697 internalGetFieldAccessorTable() {
13698 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13699 .ensureFieldAccessorsInitialized(
13700 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13701 }
13702
13703 public static com.google.protobuf.Parser<GetEditLogManifestResponseProto> PARSER =
13704 new com.google.protobuf.AbstractParser<GetEditLogManifestResponseProto>() {
13705 public GetEditLogManifestResponseProto parsePartialFrom(
13706 com.google.protobuf.CodedInputStream input,
13707 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13708 throws com.google.protobuf.InvalidProtocolBufferException {
13709 return new GetEditLogManifestResponseProto(input, extensionRegistry);
13710 }
13711 };
13712
13713 @java.lang.Override
13714 public com.google.protobuf.Parser<GetEditLogManifestResponseProto> getParserForType() {
13715 return PARSER;
13716 }
13717
13718 private int bitField0_;
13719 // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13720 public static final int MANIFEST_FIELD_NUMBER = 1;
13721 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
13722 /**
13723 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13724 */
13725 public boolean hasManifest() {
13726 return ((bitField0_ & 0x00000001) == 0x00000001);
13727 }
13728 /**
13729 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13730 */
13731 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
13732 return manifest_;
13733 }
13734 /**
13735 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13736 */
13737 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
13738 return manifest_;
13739 }
13740
13741 // required uint32 httpPort = 2;
13742 public static final int HTTPPORT_FIELD_NUMBER = 2;
13743 private int httpPort_;
13744 /**
13745 * <code>required uint32 httpPort = 2;</code>
13746 */
13747 public boolean hasHttpPort() {
13748 return ((bitField0_ & 0x00000002) == 0x00000002);
13749 }
13750 /**
13751 * <code>required uint32 httpPort = 2;</code>
13752 */
13753 public int getHttpPort() {
13754 return httpPort_;
13755 }
13756
13757 private void initFields() {
13758 manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13759 httpPort_ = 0;
13760 }
13761 private byte memoizedIsInitialized = -1;
13762 public final boolean isInitialized() {
13763 byte isInitialized = memoizedIsInitialized;
13764 if (isInitialized != -1) return isInitialized == 1;
13765
13766 if (!hasManifest()) {
13767 memoizedIsInitialized = 0;
13768 return false;
13769 }
13770 if (!hasHttpPort()) {
13771 memoizedIsInitialized = 0;
13772 return false;
13773 }
13774 if (!getManifest().isInitialized()) {
13775 memoizedIsInitialized = 0;
13776 return false;
13777 }
13778 memoizedIsInitialized = 1;
13779 return true;
13780 }
13781
13782 public void writeTo(com.google.protobuf.CodedOutputStream output)
13783 throws java.io.IOException {
13784 getSerializedSize();
13785 if (((bitField0_ & 0x00000001) == 0x00000001)) {
13786 output.writeMessage(1, manifest_);
13787 }
13788 if (((bitField0_ & 0x00000002) == 0x00000002)) {
13789 output.writeUInt32(2, httpPort_);
13790 }
13791 getUnknownFields().writeTo(output);
13792 }
13793
13794 private int memoizedSerializedSize = -1;
13795 public int getSerializedSize() {
13796 int size = memoizedSerializedSize;
13797 if (size != -1) return size;
13798
13799 size = 0;
13800 if (((bitField0_ & 0x00000001) == 0x00000001)) {
13801 size += com.google.protobuf.CodedOutputStream
13802 .computeMessageSize(1, manifest_);
13803 }
13804 if (((bitField0_ & 0x00000002) == 0x00000002)) {
13805 size += com.google.protobuf.CodedOutputStream
13806 .computeUInt32Size(2, httpPort_);
13807 }
13808 size += getUnknownFields().getSerializedSize();
13809 memoizedSerializedSize = size;
13810 return size;
13811 }
13812
13813 private static final long serialVersionUID = 0L;
13814 @java.lang.Override
13815 protected java.lang.Object writeReplace()
13816 throws java.io.ObjectStreamException {
13817 return super.writeReplace();
13818 }
13819
13820 @java.lang.Override
13821 public boolean equals(final java.lang.Object obj) {
13822 if (obj == this) {
13823 return true;
13824 }
13825 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
13826 return super.equals(obj);
13827 }
13828 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
13829
13830 boolean result = true;
13831 result = result && (hasManifest() == other.hasManifest());
13832 if (hasManifest()) {
13833 result = result && getManifest()
13834 .equals(other.getManifest());
13835 }
13836 result = result && (hasHttpPort() == other.hasHttpPort());
13837 if (hasHttpPort()) {
13838 result = result && (getHttpPort()
13839 == other.getHttpPort());
13840 }
13841 result = result &&
13842 getUnknownFields().equals(other.getUnknownFields());
13843 return result;
13844 }
13845
13846 private int memoizedHashCode = 0;
13847 @java.lang.Override
13848 public int hashCode() {
13849 if (memoizedHashCode != 0) {
13850 return memoizedHashCode;
13851 }
13852 int hash = 41;
13853 hash = (19 * hash) + getDescriptorForType().hashCode();
13854 if (hasManifest()) {
13855 hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
13856 hash = (53 * hash) + getManifest().hashCode();
13857 }
13858 if (hasHttpPort()) {
13859 hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
13860 hash = (53 * hash) + getHttpPort();
13861 }
13862 hash = (29 * hash) + getUnknownFields().hashCode();
13863 memoizedHashCode = hash;
13864 return hash;
13865 }
13866
13867 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13868 com.google.protobuf.ByteString data)
13869 throws com.google.protobuf.InvalidProtocolBufferException {
13870 return PARSER.parseFrom(data);
13871 }
13872 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13873 com.google.protobuf.ByteString data,
13874 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13875 throws com.google.protobuf.InvalidProtocolBufferException {
13876 return PARSER.parseFrom(data, extensionRegistry);
13877 }
13878 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
13879 throws com.google.protobuf.InvalidProtocolBufferException {
13880 return PARSER.parseFrom(data);
13881 }
13882 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13883 byte[] data,
13884 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13885 throws com.google.protobuf.InvalidProtocolBufferException {
13886 return PARSER.parseFrom(data, extensionRegistry);
13887 }
13888 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
13889 throws java.io.IOException {
13890 return PARSER.parseFrom(input);
13891 }
13892 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13893 java.io.InputStream input,
13894 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13895 throws java.io.IOException {
13896 return PARSER.parseFrom(input, extensionRegistry);
13897 }
13898 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
13899 throws java.io.IOException {
13900 return PARSER.parseDelimitedFrom(input);
13901 }
13902 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
13903 java.io.InputStream input,
13904 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13905 throws java.io.IOException {
13906 return PARSER.parseDelimitedFrom(input, extensionRegistry);
13907 }
13908 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13909 com.google.protobuf.CodedInputStream input)
13910 throws java.io.IOException {
13911 return PARSER.parseFrom(input);
13912 }
13913 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13914 com.google.protobuf.CodedInputStream input,
13915 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13916 throws java.io.IOException {
13917 return PARSER.parseFrom(input, extensionRegistry);
13918 }
13919
13920 public static Builder newBuilder() { return Builder.create(); }
13921 public Builder newBuilderForType() { return newBuilder(); }
13922 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
13923 return newBuilder().mergeFrom(prototype);
13924 }
13925 public Builder toBuilder() { return newBuilder(this); }
13926
13927 @java.lang.Override
13928 protected Builder newBuilderForType(
13929 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13930 Builder builder = new Builder(parent);
13931 return builder;
13932 }
13933 /**
13934 * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13935 */
13936 public static final class Builder extends
13937 com.google.protobuf.GeneratedMessage.Builder<Builder>
13938 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
13939 public static final com.google.protobuf.Descriptors.Descriptor
13940 getDescriptor() {
13941 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13942 }
13943
13944 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13945 internalGetFieldAccessorTable() {
13946 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13947 .ensureFieldAccessorsInitialized(
13948 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13949 }
13950
13951 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
13952 private Builder() {
13953 maybeForceBuilderInitialization();
13954 }
13955
13956 private Builder(
13957 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13958 super(parent);
13959 maybeForceBuilderInitialization();
13960 }
13961 private void maybeForceBuilderInitialization() {
13962 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13963 getManifestFieldBuilder();
13964 }
13965 }
13966 private static Builder create() {
13967 return new Builder();
13968 }
13969
13970 public Builder clear() {
13971 super.clear();
13972 if (manifestBuilder_ == null) {
13973 manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13974 } else {
13975 manifestBuilder_.clear();
13976 }
13977 bitField0_ = (bitField0_ & ~0x00000001);
13978 httpPort_ = 0;
13979 bitField0_ = (bitField0_ & ~0x00000002);
13980 return this;
13981 }
13982
13983 public Builder clone() {
13984 return create().mergeFrom(buildPartial());
13985 }
13986
13987 public com.google.protobuf.Descriptors.Descriptor
13988 getDescriptorForType() {
13989 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13990 }
13991
13992 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
13993 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
13994 }
13995
13996 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
13997 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
13998 if (!result.isInitialized()) {
13999 throw newUninitializedMessageException(result);
14000 }
14001 return result;
14002 }
14003
14004 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
14005 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
14006 int from_bitField0_ = bitField0_;
14007 int to_bitField0_ = 0;
14008 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14009 to_bitField0_ |= 0x00000001;
14010 }
14011 if (manifestBuilder_ == null) {
14012 result.manifest_ = manifest_;
14013 } else {
14014 result.manifest_ = manifestBuilder_.build();
14015 }
14016 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14017 to_bitField0_ |= 0x00000002;
14018 }
14019 result.httpPort_ = httpPort_;
14020 result.bitField0_ = to_bitField0_;
14021 onBuilt();
14022 return result;
14023 }
14024
14025 public Builder mergeFrom(com.google.protobuf.Message other) {
14026 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
14027 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
14028 } else {
14029 super.mergeFrom(other);
14030 return this;
14031 }
14032 }
14033
14034 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
14035 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
14036 if (other.hasManifest()) {
14037 mergeManifest(other.getManifest());
14038 }
14039 if (other.hasHttpPort()) {
14040 setHttpPort(other.getHttpPort());
14041 }
14042 this.mergeUnknownFields(other.getUnknownFields());
14043 return this;
14044 }
14045
14046 public final boolean isInitialized() {
14047 if (!hasManifest()) {
14048
14049 return false;
14050 }
14051 if (!hasHttpPort()) {
14052
14053 return false;
14054 }
14055 if (!getManifest().isInitialized()) {
14056
14057 return false;
14058 }
14059 return true;
14060 }
14061
14062 public Builder mergeFrom(
14063 com.google.protobuf.CodedInputStream input,
14064 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14065 throws java.io.IOException {
14066 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null;
14067 try {
14068 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14069 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14070 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage();
14071 throw e;
14072 } finally {
14073 if (parsedMessage != null) {
14074 mergeFrom(parsedMessage);
14075 }
14076 }
14077 return this;
14078 }
14079 private int bitField0_;
14080
14081 // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
14082 private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14083 private com.google.protobuf.SingleFieldBuilder<
14084 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
14085 /**
14086 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14087 */
14088 public boolean hasManifest() {
14089 return ((bitField0_ & 0x00000001) == 0x00000001);
14090 }
14091 /**
14092 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14093 */
14094 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
14095 if (manifestBuilder_ == null) {
14096 return manifest_;
14097 } else {
14098 return manifestBuilder_.getMessage();
14099 }
14100 }
14101 /**
14102 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14103 */
14104 public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14105 if (manifestBuilder_ == null) {
14106 if (value == null) {
14107 throw new NullPointerException();
14108 }
14109 manifest_ = value;
14110 onChanged();
14111 } else {
14112 manifestBuilder_.setMessage(value);
14113 }
14114 bitField0_ |= 0x00000001;
14115 return this;
14116 }
14117 /**
14118 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14119 */
14120 public Builder setManifest(
14121 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
14122 if (manifestBuilder_ == null) {
14123 manifest_ = builderForValue.build();
14124 onChanged();
14125 } else {
14126 manifestBuilder_.setMessage(builderForValue.build());
14127 }
14128 bitField0_ |= 0x00000001;
14129 return this;
14130 }
14131 /**
14132 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14133 */
14134 public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14135 if (manifestBuilder_ == null) {
14136 if (((bitField0_ & 0x00000001) == 0x00000001) &&
14137 manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
14138 manifest_ =
14139 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
14140 } else {
14141 manifest_ = value;
14142 }
14143 onChanged();
14144 } else {
14145 manifestBuilder_.mergeFrom(value);
14146 }
14147 bitField0_ |= 0x00000001;
14148 return this;
14149 }
14150 /**
14151 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14152 */
14153 public Builder clearManifest() {
14154 if (manifestBuilder_ == null) {
14155 manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14156 onChanged();
14157 } else {
14158 manifestBuilder_.clear();
14159 }
14160 bitField0_ = (bitField0_ & ~0x00000001);
14161 return this;
14162 }
14163 /**
14164 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14165 */
14166 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
14167 bitField0_ |= 0x00000001;
14168 onChanged();
14169 return getManifestFieldBuilder().getBuilder();
14170 }
14171 /**
14172 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14173 */
14174 public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
14175 if (manifestBuilder_ != null) {
14176 return manifestBuilder_.getMessageOrBuilder();
14177 } else {
14178 return manifest_;
14179 }
14180 }
14181 /**
14182 * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14183 */
14184 private com.google.protobuf.SingleFieldBuilder<
14185 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>
14186 getManifestFieldBuilder() {
14187 if (manifestBuilder_ == null) {
14188 manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14189 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
14190 manifest_,
14191 getParentForChildren(),
14192 isClean());
14193 manifest_ = null;
14194 }
14195 return manifestBuilder_;
14196 }
14197
14198 // required uint32 httpPort = 2;
14199 private int httpPort_ ;
14200 /**
14201 * <code>required uint32 httpPort = 2;</code>
14202 */
14203 public boolean hasHttpPort() {
14204 return ((bitField0_ & 0x00000002) == 0x00000002);
14205 }
14206 /**
14207 * <code>required uint32 httpPort = 2;</code>
14208 */
14209 public int getHttpPort() {
14210 return httpPort_;
14211 }
14212 /**
14213 * <code>required uint32 httpPort = 2;</code>
14214 */
14215 public Builder setHttpPort(int value) {
14216 bitField0_ |= 0x00000002;
14217 httpPort_ = value;
14218 onChanged();
14219 return this;
14220 }
14221 /**
14222 * <code>required uint32 httpPort = 2;</code>
14223 */
14224 public Builder clearHttpPort() {
14225 bitField0_ = (bitField0_ & ~0x00000002);
14226 httpPort_ = 0;
14227 onChanged();
14228 return this;
14229 }
14230
14231 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14232 }
14233
14234 static {
14235 defaultInstance = new GetEditLogManifestResponseProto(true);
14236 defaultInstance.initFields();
14237 }
14238
14239 // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14240 }
14241
14242 public interface PrepareRecoveryRequestProtoOrBuilder
14243 extends com.google.protobuf.MessageOrBuilder {
14244
14245 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14246 /**
14247 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14248 */
14249 boolean hasReqInfo();
14250 /**
14251 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14252 */
14253 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
14254 /**
14255 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14256 */
14257 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
14258
14259 // required uint64 segmentTxId = 2;
14260 /**
14261 * <code>required uint64 segmentTxId = 2;</code>
14262 */
14263 boolean hasSegmentTxId();
14264 /**
14265 * <code>required uint64 segmentTxId = 2;</code>
14266 */
14267 long getSegmentTxId();
14268 }
14269 /**
14270 * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14271 *
14272 * <pre>
14273 **
14274 * prepareRecovery()
14275 * </pre>
14276 */
14277 public static final class PrepareRecoveryRequestProto extends
14278 com.google.protobuf.GeneratedMessage
14279 implements PrepareRecoveryRequestProtoOrBuilder {
14280 // Use PrepareRecoveryRequestProto.newBuilder() to construct.
14281 private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14282 super(builder);
14283 this.unknownFields = builder.getUnknownFields();
14284 }
14285 private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14286
14287 private static final PrepareRecoveryRequestProto defaultInstance;
14288 public static PrepareRecoveryRequestProto getDefaultInstance() {
14289 return defaultInstance;
14290 }
14291
14292 public PrepareRecoveryRequestProto getDefaultInstanceForType() {
14293 return defaultInstance;
14294 }
14295
14296 private final com.google.protobuf.UnknownFieldSet unknownFields;
14297 @java.lang.Override
14298 public final com.google.protobuf.UnknownFieldSet
14299 getUnknownFields() {
14300 return this.unknownFields;
14301 }
14302 private PrepareRecoveryRequestProto(
14303 com.google.protobuf.CodedInputStream input,
14304 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14305 throws com.google.protobuf.InvalidProtocolBufferException {
14306 initFields();
14307 int mutable_bitField0_ = 0;
14308 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14309 com.google.protobuf.UnknownFieldSet.newBuilder();
14310 try {
14311 boolean done = false;
14312 while (!done) {
14313 int tag = input.readTag();
14314 switch (tag) {
14315 case 0:
14316 done = true;
14317 break;
14318 default: {
14319 if (!parseUnknownField(input, unknownFields,
14320 extensionRegistry, tag)) {
14321 done = true;
14322 }
14323 break;
14324 }
14325 case 10: {
14326 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
14327 if (((bitField0_ & 0x00000001) == 0x00000001)) {
14328 subBuilder = reqInfo_.toBuilder();
14329 }
14330 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
14331 if (subBuilder != null) {
14332 subBuilder.mergeFrom(reqInfo_);
14333 reqInfo_ = subBuilder.buildPartial();
14334 }
14335 bitField0_ |= 0x00000001;
14336 break;
14337 }
14338 case 16: {
14339 bitField0_ |= 0x00000002;
14340 segmentTxId_ = input.readUInt64();
14341 break;
14342 }
14343 }
14344 }
14345 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14346 throw e.setUnfinishedMessage(this);
14347 } catch (java.io.IOException e) {
14348 throw new com.google.protobuf.InvalidProtocolBufferException(
14349 e.getMessage()).setUnfinishedMessage(this);
14350 } finally {
14351 this.unknownFields = unknownFields.build();
14352 makeExtensionsImmutable();
14353 }
14354 }
14355 public static final com.google.protobuf.Descriptors.Descriptor
14356 getDescriptor() {
14357 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14358 }
14359
14360 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14361 internalGetFieldAccessorTable() {
14362 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14363 .ensureFieldAccessorsInitialized(
14364 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14365 }
14366
14367 public static com.google.protobuf.Parser<PrepareRecoveryRequestProto> PARSER =
14368 new com.google.protobuf.AbstractParser<PrepareRecoveryRequestProto>() {
14369 public PrepareRecoveryRequestProto parsePartialFrom(
14370 com.google.protobuf.CodedInputStream input,
14371 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14372 throws com.google.protobuf.InvalidProtocolBufferException {
14373 return new PrepareRecoveryRequestProto(input, extensionRegistry);
14374 }
14375 };
14376
14377 @java.lang.Override
14378 public com.google.protobuf.Parser<PrepareRecoveryRequestProto> getParserForType() {
14379 return PARSER;
14380 }
14381
14382 private int bitField0_;
14383 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14384 public static final int REQINFO_FIELD_NUMBER = 1;
14385 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
14386 /**
14387 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14388 */
14389 public boolean hasReqInfo() {
14390 return ((bitField0_ & 0x00000001) == 0x00000001);
14391 }
14392 /**
14393 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14394 */
14395 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14396 return reqInfo_;
14397 }
14398 /**
14399 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14400 */
14401 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14402 return reqInfo_;
14403 }
14404
14405 // required uint64 segmentTxId = 2;
14406 public static final int SEGMENTTXID_FIELD_NUMBER = 2;
14407 private long segmentTxId_;
14408 /**
14409 * <code>required uint64 segmentTxId = 2;</code>
14410 */
14411 public boolean hasSegmentTxId() {
14412 return ((bitField0_ & 0x00000002) == 0x00000002);
14413 }
14414 /**
14415 * <code>required uint64 segmentTxId = 2;</code>
14416 */
14417 public long getSegmentTxId() {
14418 return segmentTxId_;
14419 }
14420
14421 private void initFields() {
14422 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14423 segmentTxId_ = 0L;
14424 }
14425 private byte memoizedIsInitialized = -1;
14426 public final boolean isInitialized() {
14427 byte isInitialized = memoizedIsInitialized;
14428 if (isInitialized != -1) return isInitialized == 1;
14429
14430 if (!hasReqInfo()) {
14431 memoizedIsInitialized = 0;
14432 return false;
14433 }
14434 if (!hasSegmentTxId()) {
14435 memoizedIsInitialized = 0;
14436 return false;
14437 }
14438 if (!getReqInfo().isInitialized()) {
14439 memoizedIsInitialized = 0;
14440 return false;
14441 }
14442 memoizedIsInitialized = 1;
14443 return true;
14444 }
14445
14446 public void writeTo(com.google.protobuf.CodedOutputStream output)
14447 throws java.io.IOException {
14448 getSerializedSize();
14449 if (((bitField0_ & 0x00000001) == 0x00000001)) {
14450 output.writeMessage(1, reqInfo_);
14451 }
14452 if (((bitField0_ & 0x00000002) == 0x00000002)) {
14453 output.writeUInt64(2, segmentTxId_);
14454 }
14455 getUnknownFields().writeTo(output);
14456 }
14457
14458 private int memoizedSerializedSize = -1;
14459 public int getSerializedSize() {
14460 int size = memoizedSerializedSize;
14461 if (size != -1) return size;
14462
14463 size = 0;
14464 if (((bitField0_ & 0x00000001) == 0x00000001)) {
14465 size += com.google.protobuf.CodedOutputStream
14466 .computeMessageSize(1, reqInfo_);
14467 }
14468 if (((bitField0_ & 0x00000002) == 0x00000002)) {
14469 size += com.google.protobuf.CodedOutputStream
14470 .computeUInt64Size(2, segmentTxId_);
14471 }
14472 size += getUnknownFields().getSerializedSize();
14473 memoizedSerializedSize = size;
14474 return size;
14475 }
14476
14477 private static final long serialVersionUID = 0L;
14478 @java.lang.Override
14479 protected java.lang.Object writeReplace()
14480 throws java.io.ObjectStreamException {
14481 return super.writeReplace();
14482 }
14483
14484 @java.lang.Override
14485 public boolean equals(final java.lang.Object obj) {
14486 if (obj == this) {
14487 return true;
14488 }
14489 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
14490 return super.equals(obj);
14491 }
14492 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
14493
14494 boolean result = true;
14495 result = result && (hasReqInfo() == other.hasReqInfo());
14496 if (hasReqInfo()) {
14497 result = result && getReqInfo()
14498 .equals(other.getReqInfo());
14499 }
14500 result = result && (hasSegmentTxId() == other.hasSegmentTxId());
14501 if (hasSegmentTxId()) {
14502 result = result && (getSegmentTxId()
14503 == other.getSegmentTxId());
14504 }
14505 result = result &&
14506 getUnknownFields().equals(other.getUnknownFields());
14507 return result;
14508 }
14509
14510 private int memoizedHashCode = 0;
14511 @java.lang.Override
14512 public int hashCode() {
14513 if (memoizedHashCode != 0) {
14514 return memoizedHashCode;
14515 }
14516 int hash = 41;
14517 hash = (19 * hash) + getDescriptorForType().hashCode();
14518 if (hasReqInfo()) {
14519 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
14520 hash = (53 * hash) + getReqInfo().hashCode();
14521 }
14522 if (hasSegmentTxId()) {
14523 hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
14524 hash = (53 * hash) + hashLong(getSegmentTxId());
14525 }
14526 hash = (29 * hash) + getUnknownFields().hashCode();
14527 memoizedHashCode = hash;
14528 return hash;
14529 }
14530
14531 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14532 com.google.protobuf.ByteString data)
14533 throws com.google.protobuf.InvalidProtocolBufferException {
14534 return PARSER.parseFrom(data);
14535 }
14536 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14537 com.google.protobuf.ByteString data,
14538 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14539 throws com.google.protobuf.InvalidProtocolBufferException {
14540 return PARSER.parseFrom(data, extensionRegistry);
14541 }
14542 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
14543 throws com.google.protobuf.InvalidProtocolBufferException {
14544 return PARSER.parseFrom(data);
14545 }
14546 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14547 byte[] data,
14548 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14549 throws com.google.protobuf.InvalidProtocolBufferException {
14550 return PARSER.parseFrom(data, extensionRegistry);
14551 }
14552 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
14553 throws java.io.IOException {
14554 return PARSER.parseFrom(input);
14555 }
14556 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14557 java.io.InputStream input,
14558 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14559 throws java.io.IOException {
14560 return PARSER.parseFrom(input, extensionRegistry);
14561 }
14562 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
14563 throws java.io.IOException {
14564 return PARSER.parseDelimitedFrom(input);
14565 }
14566 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
14567 java.io.InputStream input,
14568 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14569 throws java.io.IOException {
14570 return PARSER.parseDelimitedFrom(input, extensionRegistry);
14571 }
14572 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14573 com.google.protobuf.CodedInputStream input)
14574 throws java.io.IOException {
14575 return PARSER.parseFrom(input);
14576 }
14577 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14578 com.google.protobuf.CodedInputStream input,
14579 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14580 throws java.io.IOException {
14581 return PARSER.parseFrom(input, extensionRegistry);
14582 }
14583
14584 public static Builder newBuilder() { return Builder.create(); }
14585 public Builder newBuilderForType() { return newBuilder(); }
14586 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
14587 return newBuilder().mergeFrom(prototype);
14588 }
14589 public Builder toBuilder() { return newBuilder(this); }
14590
14591 @java.lang.Override
14592 protected Builder newBuilderForType(
14593 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14594 Builder builder = new Builder(parent);
14595 return builder;
14596 }
14597 /**
14598 * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14599 *
14600 * <pre>
14601 **
14602 * prepareRecovery()
14603 * </pre>
14604 */
14605 public static final class Builder extends
14606 com.google.protobuf.GeneratedMessage.Builder<Builder>
14607 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
14608 public static final com.google.protobuf.Descriptors.Descriptor
14609 getDescriptor() {
14610 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14611 }
14612
14613 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14614 internalGetFieldAccessorTable() {
14615 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14616 .ensureFieldAccessorsInitialized(
14617 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14618 }
14619
14620 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
14621 private Builder() {
14622 maybeForceBuilderInitialization();
14623 }
14624
14625 private Builder(
14626 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14627 super(parent);
14628 maybeForceBuilderInitialization();
14629 }
14630 private void maybeForceBuilderInitialization() {
14631 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14632 getReqInfoFieldBuilder();
14633 }
14634 }
14635 private static Builder create() {
14636 return new Builder();
14637 }
14638
14639 public Builder clear() {
14640 super.clear();
14641 if (reqInfoBuilder_ == null) {
14642 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14643 } else {
14644 reqInfoBuilder_.clear();
14645 }
14646 bitField0_ = (bitField0_ & ~0x00000001);
14647 segmentTxId_ = 0L;
14648 bitField0_ = (bitField0_ & ~0x00000002);
14649 return this;
14650 }
14651
14652 public Builder clone() {
14653 return create().mergeFrom(buildPartial());
14654 }
14655
14656 public com.google.protobuf.Descriptors.Descriptor
14657 getDescriptorForType() {
14658 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14659 }
14660
14661 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
14662 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14663 }
14664
14665 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
14666 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
14667 if (!result.isInitialized()) {
14668 throw newUninitializedMessageException(result);
14669 }
14670 return result;
14671 }
14672
14673 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
14674 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
14675 int from_bitField0_ = bitField0_;
14676 int to_bitField0_ = 0;
14677 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14678 to_bitField0_ |= 0x00000001;
14679 }
14680 if (reqInfoBuilder_ == null) {
14681 result.reqInfo_ = reqInfo_;
14682 } else {
14683 result.reqInfo_ = reqInfoBuilder_.build();
14684 }
14685 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14686 to_bitField0_ |= 0x00000002;
14687 }
14688 result.segmentTxId_ = segmentTxId_;
14689 result.bitField0_ = to_bitField0_;
14690 onBuilt();
14691 return result;
14692 }
14693
14694 public Builder mergeFrom(com.google.protobuf.Message other) {
14695 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
14696 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
14697 } else {
14698 super.mergeFrom(other);
14699 return this;
14700 }
14701 }
14702
14703 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
14704 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
14705 if (other.hasReqInfo()) {
14706 mergeReqInfo(other.getReqInfo());
14707 }
14708 if (other.hasSegmentTxId()) {
14709 setSegmentTxId(other.getSegmentTxId());
14710 }
14711 this.mergeUnknownFields(other.getUnknownFields());
14712 return this;
14713 }
14714
14715 public final boolean isInitialized() {
14716 if (!hasReqInfo()) {
14717
14718 return false;
14719 }
14720 if (!hasSegmentTxId()) {
14721
14722 return false;
14723 }
14724 if (!getReqInfo().isInitialized()) {
14725
14726 return false;
14727 }
14728 return true;
14729 }
14730
14731 public Builder mergeFrom(
14732 com.google.protobuf.CodedInputStream input,
14733 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14734 throws java.io.IOException {
14735 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null;
14736 try {
14737 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14738 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14739 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage();
14740 throw e;
14741 } finally {
14742 if (parsedMessage != null) {
14743 mergeFrom(parsedMessage);
14744 }
14745 }
14746 return this;
14747 }
14748 private int bitField0_;
14749
14750 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14751 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14752 private com.google.protobuf.SingleFieldBuilder<
14753 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
14754 /**
14755 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14756 */
14757 public boolean hasReqInfo() {
14758 return ((bitField0_ & 0x00000001) == 0x00000001);
14759 }
14760 /**
14761 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14762 */
14763 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14764 if (reqInfoBuilder_ == null) {
14765 return reqInfo_;
14766 } else {
14767 return reqInfoBuilder_.getMessage();
14768 }
14769 }
14770 /**
14771 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14772 */
14773 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14774 if (reqInfoBuilder_ == null) {
14775 if (value == null) {
14776 throw new NullPointerException();
14777 }
14778 reqInfo_ = value;
14779 onChanged();
14780 } else {
14781 reqInfoBuilder_.setMessage(value);
14782 }
14783 bitField0_ |= 0x00000001;
14784 return this;
14785 }
14786 /**
14787 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14788 */
14789 public Builder setReqInfo(
14790 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
14791 if (reqInfoBuilder_ == null) {
14792 reqInfo_ = builderForValue.build();
14793 onChanged();
14794 } else {
14795 reqInfoBuilder_.setMessage(builderForValue.build());
14796 }
14797 bitField0_ |= 0x00000001;
14798 return this;
14799 }
14800 /**
14801 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14802 */
14803 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14804 if (reqInfoBuilder_ == null) {
14805 if (((bitField0_ & 0x00000001) == 0x00000001) &&
14806 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
14807 reqInfo_ =
14808 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
14809 } else {
14810 reqInfo_ = value;
14811 }
14812 onChanged();
14813 } else {
14814 reqInfoBuilder_.mergeFrom(value);
14815 }
14816 bitField0_ |= 0x00000001;
14817 return this;
14818 }
14819 /**
14820 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14821 */
14822 public Builder clearReqInfo() {
14823 if (reqInfoBuilder_ == null) {
14824 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14825 onChanged();
14826 } else {
14827 reqInfoBuilder_.clear();
14828 }
14829 bitField0_ = (bitField0_ & ~0x00000001);
14830 return this;
14831 }
14832 /**
14833 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14834 */
14835 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
14836 bitField0_ |= 0x00000001;
14837 onChanged();
14838 return getReqInfoFieldBuilder().getBuilder();
14839 }
14840 /**
14841 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14842 */
14843 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14844 if (reqInfoBuilder_ != null) {
14845 return reqInfoBuilder_.getMessageOrBuilder();
14846 } else {
14847 return reqInfo_;
14848 }
14849 }
14850 /**
14851 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14852 */
14853 private com.google.protobuf.SingleFieldBuilder<
14854 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
14855 getReqInfoFieldBuilder() {
14856 if (reqInfoBuilder_ == null) {
14857 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14858 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
14859 reqInfo_,
14860 getParentForChildren(),
14861 isClean());
14862 reqInfo_ = null;
14863 }
14864 return reqInfoBuilder_;
14865 }
14866
14867 // required uint64 segmentTxId = 2;
14868 private long segmentTxId_ ;
14869 /**
14870 * <code>required uint64 segmentTxId = 2;</code>
14871 */
14872 public boolean hasSegmentTxId() {
14873 return ((bitField0_ & 0x00000002) == 0x00000002);
14874 }
14875 /**
14876 * <code>required uint64 segmentTxId = 2;</code>
14877 */
14878 public long getSegmentTxId() {
14879 return segmentTxId_;
14880 }
14881 /**
14882 * <code>required uint64 segmentTxId = 2;</code>
14883 */
14884 public Builder setSegmentTxId(long value) {
14885 bitField0_ |= 0x00000002;
14886 segmentTxId_ = value;
14887 onChanged();
14888 return this;
14889 }
14890 /**
14891 * <code>required uint64 segmentTxId = 2;</code>
14892 */
14893 public Builder clearSegmentTxId() {
14894 bitField0_ = (bitField0_ & ~0x00000002);
14895 segmentTxId_ = 0L;
14896 onChanged();
14897 return this;
14898 }
14899
14900 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14901 }
14902
14903 static {
14904 defaultInstance = new PrepareRecoveryRequestProto(true);
14905 defaultInstance.initFields();
14906 }
14907
14908 // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14909 }
14910
14911 public interface PrepareRecoveryResponseProtoOrBuilder
14912 extends com.google.protobuf.MessageOrBuilder {
14913
14914 // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
14915 /**
14916 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14917 */
14918 boolean hasSegmentState();
14919 /**
14920 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14921 */
14922 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
14923 /**
14924 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14925 */
14926 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
14927
14928 // optional uint64 acceptedInEpoch = 2;
14929 /**
14930 * <code>optional uint64 acceptedInEpoch = 2;</code>
14931 */
14932 boolean hasAcceptedInEpoch();
14933 /**
14934 * <code>optional uint64 acceptedInEpoch = 2;</code>
14935 */
14936 long getAcceptedInEpoch();
14937
14938 // required uint64 lastWriterEpoch = 3;
14939 /**
14940 * <code>required uint64 lastWriterEpoch = 3;</code>
14941 */
14942 boolean hasLastWriterEpoch();
14943 /**
14944 * <code>required uint64 lastWriterEpoch = 3;</code>
14945 */
14946 long getLastWriterEpoch();
14947
14948 // optional uint64 lastCommittedTxId = 4;
14949 /**
14950 * <code>optional uint64 lastCommittedTxId = 4;</code>
14951 *
14952 * <pre>
14953 * The highest committed txid that this logger has ever seen.
14954 * This may be higher than the data it actually has, in the case
14955 * that it was lagging before the old writer crashed.
14956 * </pre>
14957 */
14958 boolean hasLastCommittedTxId();
14959 /**
14960 * <code>optional uint64 lastCommittedTxId = 4;</code>
14961 *
14962 * <pre>
14963 * The highest committed txid that this logger has ever seen.
14964 * This may be higher than the data it actually has, in the case
14965 * that it was lagging before the old writer crashed.
14966 * </pre>
14967 */
14968 long getLastCommittedTxId();
14969 }
14970 /**
14971 * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
14972 */
14973 public static final class PrepareRecoveryResponseProto extends
14974 com.google.protobuf.GeneratedMessage
14975 implements PrepareRecoveryResponseProtoOrBuilder {
14976 // Use PrepareRecoveryResponseProto.newBuilder() to construct.
14977 private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14978 super(builder);
14979 this.unknownFields = builder.getUnknownFields();
14980 }
14981 private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14982
14983 private static final PrepareRecoveryResponseProto defaultInstance;
14984 public static PrepareRecoveryResponseProto getDefaultInstance() {
14985 return defaultInstance;
14986 }
14987
14988 public PrepareRecoveryResponseProto getDefaultInstanceForType() {
14989 return defaultInstance;
14990 }
14991
14992 private final com.google.protobuf.UnknownFieldSet unknownFields;
14993 @java.lang.Override
14994 public final com.google.protobuf.UnknownFieldSet
14995 getUnknownFields() {
14996 return this.unknownFields;
14997 }
14998 private PrepareRecoveryResponseProto(
14999 com.google.protobuf.CodedInputStream input,
15000 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15001 throws com.google.protobuf.InvalidProtocolBufferException {
15002 initFields();
15003 int mutable_bitField0_ = 0;
15004 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15005 com.google.protobuf.UnknownFieldSet.newBuilder();
15006 try {
15007 boolean done = false;
15008 while (!done) {
15009 int tag = input.readTag();
15010 switch (tag) {
15011 case 0:
15012 done = true;
15013 break;
15014 default: {
15015 if (!parseUnknownField(input, unknownFields,
15016 extensionRegistry, tag)) {
15017 done = true;
15018 }
15019 break;
15020 }
15021 case 10: {
15022 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
15023 if (((bitField0_ & 0x00000001) == 0x00000001)) {
15024 subBuilder = segmentState_.toBuilder();
15025 }
15026 segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
15027 if (subBuilder != null) {
15028 subBuilder.mergeFrom(segmentState_);
15029 segmentState_ = subBuilder.buildPartial();
15030 }
15031 bitField0_ |= 0x00000001;
15032 break;
15033 }
15034 case 16: {
15035 bitField0_ |= 0x00000002;
15036 acceptedInEpoch_ = input.readUInt64();
15037 break;
15038 }
15039 case 24: {
15040 bitField0_ |= 0x00000004;
15041 lastWriterEpoch_ = input.readUInt64();
15042 break;
15043 }
15044 case 32: {
15045 bitField0_ |= 0x00000008;
15046 lastCommittedTxId_ = input.readUInt64();
15047 break;
15048 }
15049 }
15050 }
15051 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15052 throw e.setUnfinishedMessage(this);
15053 } catch (java.io.IOException e) {
15054 throw new com.google.protobuf.InvalidProtocolBufferException(
15055 e.getMessage()).setUnfinishedMessage(this);
15056 } finally {
15057 this.unknownFields = unknownFields.build();
15058 makeExtensionsImmutable();
15059 }
15060 }
15061 public static final com.google.protobuf.Descriptors.Descriptor
15062 getDescriptor() {
15063 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15064 }
15065
15066 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15067 internalGetFieldAccessorTable() {
15068 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15069 .ensureFieldAccessorsInitialized(
15070 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15071 }
15072
15073 public static com.google.protobuf.Parser<PrepareRecoveryResponseProto> PARSER =
15074 new com.google.protobuf.AbstractParser<PrepareRecoveryResponseProto>() {
15075 public PrepareRecoveryResponseProto parsePartialFrom(
15076 com.google.protobuf.CodedInputStream input,
15077 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15078 throws com.google.protobuf.InvalidProtocolBufferException {
15079 return new PrepareRecoveryResponseProto(input, extensionRegistry);
15080 }
15081 };
15082
15083 @java.lang.Override
15084 public com.google.protobuf.Parser<PrepareRecoveryResponseProto> getParserForType() {
15085 return PARSER;
15086 }
15087
15088 private int bitField0_;
15089 // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15090 public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
15091 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
15092 /**
15093 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15094 */
15095 public boolean hasSegmentState() {
15096 return ((bitField0_ & 0x00000001) == 0x00000001);
15097 }
15098 /**
15099 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15100 */
15101 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15102 return segmentState_;
15103 }
15104 /**
15105 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15106 */
15107 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15108 return segmentState_;
15109 }
15110
15111 // optional uint64 acceptedInEpoch = 2;
15112 public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
15113 private long acceptedInEpoch_;
15114 /**
15115 * <code>optional uint64 acceptedInEpoch = 2;</code>
15116 */
15117 public boolean hasAcceptedInEpoch() {
15118 return ((bitField0_ & 0x00000002) == 0x00000002);
15119 }
15120 /**
15121 * <code>optional uint64 acceptedInEpoch = 2;</code>
15122 */
15123 public long getAcceptedInEpoch() {
15124 return acceptedInEpoch_;
15125 }
15126
15127 // required uint64 lastWriterEpoch = 3;
15128 public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
15129 private long lastWriterEpoch_;
15130 /**
15131 * <code>required uint64 lastWriterEpoch = 3;</code>
15132 */
15133 public boolean hasLastWriterEpoch() {
15134 return ((bitField0_ & 0x00000004) == 0x00000004);
15135 }
15136 /**
15137 * <code>required uint64 lastWriterEpoch = 3;</code>
15138 */
15139 public long getLastWriterEpoch() {
15140 return lastWriterEpoch_;
15141 }
15142
15143 // optional uint64 lastCommittedTxId = 4;
15144 public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
15145 private long lastCommittedTxId_;
15146 /**
15147 * <code>optional uint64 lastCommittedTxId = 4;</code>
15148 *
15149 * <pre>
15150 * The highest committed txid that this logger has ever seen.
15151 * This may be higher than the data it actually has, in the case
15152 * that it was lagging before the old writer crashed.
15153 * </pre>
15154 */
15155 public boolean hasLastCommittedTxId() {
15156 return ((bitField0_ & 0x00000008) == 0x00000008);
15157 }
15158 /**
15159 * <code>optional uint64 lastCommittedTxId = 4;</code>
15160 *
15161 * <pre>
15162 * The highest committed txid that this logger has ever seen.
15163 * This may be higher than the data it actually has, in the case
15164 * that it was lagging before the old writer crashed.
15165 * </pre>
15166 */
15167 public long getLastCommittedTxId() {
15168 return lastCommittedTxId_;
15169 }
15170
15171 private void initFields() {
15172 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15173 acceptedInEpoch_ = 0L;
15174 lastWriterEpoch_ = 0L;
15175 lastCommittedTxId_ = 0L;
15176 }
15177 private byte memoizedIsInitialized = -1;
15178 public final boolean isInitialized() {
15179 byte isInitialized = memoizedIsInitialized;
15180 if (isInitialized != -1) return isInitialized == 1;
15181
15182 if (!hasLastWriterEpoch()) {
15183 memoizedIsInitialized = 0;
15184 return false;
15185 }
15186 if (hasSegmentState()) {
15187 if (!getSegmentState().isInitialized()) {
15188 memoizedIsInitialized = 0;
15189 return false;
15190 }
15191 }
15192 memoizedIsInitialized = 1;
15193 return true;
15194 }
15195
15196 public void writeTo(com.google.protobuf.CodedOutputStream output)
15197 throws java.io.IOException {
15198 getSerializedSize();
15199 if (((bitField0_ & 0x00000001) == 0x00000001)) {
15200 output.writeMessage(1, segmentState_);
15201 }
15202 if (((bitField0_ & 0x00000002) == 0x00000002)) {
15203 output.writeUInt64(2, acceptedInEpoch_);
15204 }
15205 if (((bitField0_ & 0x00000004) == 0x00000004)) {
15206 output.writeUInt64(3, lastWriterEpoch_);
15207 }
15208 if (((bitField0_ & 0x00000008) == 0x00000008)) {
15209 output.writeUInt64(4, lastCommittedTxId_);
15210 }
15211 getUnknownFields().writeTo(output);
15212 }
15213
15214 private int memoizedSerializedSize = -1;
15215 public int getSerializedSize() {
15216 int size = memoizedSerializedSize;
15217 if (size != -1) return size;
15218
15219 size = 0;
15220 if (((bitField0_ & 0x00000001) == 0x00000001)) {
15221 size += com.google.protobuf.CodedOutputStream
15222 .computeMessageSize(1, segmentState_);
15223 }
15224 if (((bitField0_ & 0x00000002) == 0x00000002)) {
15225 size += com.google.protobuf.CodedOutputStream
15226 .computeUInt64Size(2, acceptedInEpoch_);
15227 }
15228 if (((bitField0_ & 0x00000004) == 0x00000004)) {
15229 size += com.google.protobuf.CodedOutputStream
15230 .computeUInt64Size(3, lastWriterEpoch_);
15231 }
15232 if (((bitField0_ & 0x00000008) == 0x00000008)) {
15233 size += com.google.protobuf.CodedOutputStream
15234 .computeUInt64Size(4, lastCommittedTxId_);
15235 }
15236 size += getUnknownFields().getSerializedSize();
15237 memoizedSerializedSize = size;
15238 return size;
15239 }
15240
15241 private static final long serialVersionUID = 0L;
15242 @java.lang.Override
15243 protected java.lang.Object writeReplace()
15244 throws java.io.ObjectStreamException {
15245 return super.writeReplace();
15246 }
15247
15248 @java.lang.Override
15249 public boolean equals(final java.lang.Object obj) {
15250 if (obj == this) {
15251 return true;
15252 }
15253 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
15254 return super.equals(obj);
15255 }
15256 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
15257
15258 boolean result = true;
15259 result = result && (hasSegmentState() == other.hasSegmentState());
15260 if (hasSegmentState()) {
15261 result = result && getSegmentState()
15262 .equals(other.getSegmentState());
15263 }
15264 result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
15265 if (hasAcceptedInEpoch()) {
15266 result = result && (getAcceptedInEpoch()
15267 == other.getAcceptedInEpoch());
15268 }
15269 result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
15270 if (hasLastWriterEpoch()) {
15271 result = result && (getLastWriterEpoch()
15272 == other.getLastWriterEpoch());
15273 }
15274 result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
15275 if (hasLastCommittedTxId()) {
15276 result = result && (getLastCommittedTxId()
15277 == other.getLastCommittedTxId());
15278 }
15279 result = result &&
15280 getUnknownFields().equals(other.getUnknownFields());
15281 return result;
15282 }
15283
15284 private int memoizedHashCode = 0;
15285 @java.lang.Override
15286 public int hashCode() {
15287 if (memoizedHashCode != 0) {
15288 return memoizedHashCode;
15289 }
15290 int hash = 41;
15291 hash = (19 * hash) + getDescriptorForType().hashCode();
15292 if (hasSegmentState()) {
15293 hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
15294 hash = (53 * hash) + getSegmentState().hashCode();
15295 }
15296 if (hasAcceptedInEpoch()) {
15297 hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
15298 hash = (53 * hash) + hashLong(getAcceptedInEpoch());
15299 }
15300 if (hasLastWriterEpoch()) {
15301 hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
15302 hash = (53 * hash) + hashLong(getLastWriterEpoch());
15303 }
15304 if (hasLastCommittedTxId()) {
15305 hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
15306 hash = (53 * hash) + hashLong(getLastCommittedTxId());
15307 }
15308 hash = (29 * hash) + getUnknownFields().hashCode();
15309 memoizedHashCode = hash;
15310 return hash;
15311 }
15312
15313 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15314 com.google.protobuf.ByteString data)
15315 throws com.google.protobuf.InvalidProtocolBufferException {
15316 return PARSER.parseFrom(data);
15317 }
15318 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15319 com.google.protobuf.ByteString data,
15320 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15321 throws com.google.protobuf.InvalidProtocolBufferException {
15322 return PARSER.parseFrom(data, extensionRegistry);
15323 }
15324 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
15325 throws com.google.protobuf.InvalidProtocolBufferException {
15326 return PARSER.parseFrom(data);
15327 }
15328 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15329 byte[] data,
15330 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15331 throws com.google.protobuf.InvalidProtocolBufferException {
15332 return PARSER.parseFrom(data, extensionRegistry);
15333 }
15334 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
15335 throws java.io.IOException {
15336 return PARSER.parseFrom(input);
15337 }
15338 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15339 java.io.InputStream input,
15340 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15341 throws java.io.IOException {
15342 return PARSER.parseFrom(input, extensionRegistry);
15343 }
15344 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
15345 throws java.io.IOException {
15346 return PARSER.parseDelimitedFrom(input);
15347 }
15348 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
15349 java.io.InputStream input,
15350 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15351 throws java.io.IOException {
15352 return PARSER.parseDelimitedFrom(input, extensionRegistry);
15353 }
15354 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15355 com.google.protobuf.CodedInputStream input)
15356 throws java.io.IOException {
15357 return PARSER.parseFrom(input);
15358 }
15359 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15360 com.google.protobuf.CodedInputStream input,
15361 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15362 throws java.io.IOException {
15363 return PARSER.parseFrom(input, extensionRegistry);
15364 }
15365
15366 public static Builder newBuilder() { return Builder.create(); }
15367 public Builder newBuilderForType() { return newBuilder(); }
15368 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
15369 return newBuilder().mergeFrom(prototype);
15370 }
15371 public Builder toBuilder() { return newBuilder(this); }
15372
15373 @java.lang.Override
15374 protected Builder newBuilderForType(
15375 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15376 Builder builder = new Builder(parent);
15377 return builder;
15378 }
15379 /**
15380 * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
15381 */
15382 public static final class Builder extends
15383 com.google.protobuf.GeneratedMessage.Builder<Builder>
15384 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
15385 public static final com.google.protobuf.Descriptors.Descriptor
15386 getDescriptor() {
15387 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15388 }
15389
15390 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15391 internalGetFieldAccessorTable() {
15392 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15393 .ensureFieldAccessorsInitialized(
15394 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15395 }
15396
15397 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
15398 private Builder() {
15399 maybeForceBuilderInitialization();
15400 }
15401
15402 private Builder(
15403 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15404 super(parent);
15405 maybeForceBuilderInitialization();
15406 }
15407 private void maybeForceBuilderInitialization() {
15408 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15409 getSegmentStateFieldBuilder();
15410 }
15411 }
15412 private static Builder create() {
15413 return new Builder();
15414 }
15415
15416 public Builder clear() {
15417 super.clear();
15418 if (segmentStateBuilder_ == null) {
15419 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15420 } else {
15421 segmentStateBuilder_.clear();
15422 }
15423 bitField0_ = (bitField0_ & ~0x00000001);
15424 acceptedInEpoch_ = 0L;
15425 bitField0_ = (bitField0_ & ~0x00000002);
15426 lastWriterEpoch_ = 0L;
15427 bitField0_ = (bitField0_ & ~0x00000004);
15428 lastCommittedTxId_ = 0L;
15429 bitField0_ = (bitField0_ & ~0x00000008);
15430 return this;
15431 }
15432
15433 public Builder clone() {
15434 return create().mergeFrom(buildPartial());
15435 }
15436
15437 public com.google.protobuf.Descriptors.Descriptor
15438 getDescriptorForType() {
15439 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15440 }
15441
15442 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
15443 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
15444 }
15445
15446 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
15447 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
15448 if (!result.isInitialized()) {
15449 throw newUninitializedMessageException(result);
15450 }
15451 return result;
15452 }
15453
15454 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
15455 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
15456 int from_bitField0_ = bitField0_;
15457 int to_bitField0_ = 0;
15458 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15459 to_bitField0_ |= 0x00000001;
15460 }
15461 if (segmentStateBuilder_ == null) {
15462 result.segmentState_ = segmentState_;
15463 } else {
15464 result.segmentState_ = segmentStateBuilder_.build();
15465 }
15466 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
15467 to_bitField0_ |= 0x00000002;
15468 }
15469 result.acceptedInEpoch_ = acceptedInEpoch_;
15470 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
15471 to_bitField0_ |= 0x00000004;
15472 }
15473 result.lastWriterEpoch_ = lastWriterEpoch_;
15474 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
15475 to_bitField0_ |= 0x00000008;
15476 }
15477 result.lastCommittedTxId_ = lastCommittedTxId_;
15478 result.bitField0_ = to_bitField0_;
15479 onBuilt();
15480 return result;
15481 }
15482
15483 public Builder mergeFrom(com.google.protobuf.Message other) {
15484 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
15485 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
15486 } else {
15487 super.mergeFrom(other);
15488 return this;
15489 }
15490 }
15491
15492 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
15493 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
15494 if (other.hasSegmentState()) {
15495 mergeSegmentState(other.getSegmentState());
15496 }
15497 if (other.hasAcceptedInEpoch()) {
15498 setAcceptedInEpoch(other.getAcceptedInEpoch());
15499 }
15500 if (other.hasLastWriterEpoch()) {
15501 setLastWriterEpoch(other.getLastWriterEpoch());
15502 }
15503 if (other.hasLastCommittedTxId()) {
15504 setLastCommittedTxId(other.getLastCommittedTxId());
15505 }
15506 this.mergeUnknownFields(other.getUnknownFields());
15507 return this;
15508 }
15509
15510 public final boolean isInitialized() {
15511 if (!hasLastWriterEpoch()) {
15512
15513 return false;
15514 }
15515 if (hasSegmentState()) {
15516 if (!getSegmentState().isInitialized()) {
15517
15518 return false;
15519 }
15520 }
15521 return true;
15522 }
15523
15524 public Builder mergeFrom(
15525 com.google.protobuf.CodedInputStream input,
15526 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15527 throws java.io.IOException {
15528 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null;
15529 try {
15530 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15531 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15532 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage();
15533 throw e;
15534 } finally {
15535 if (parsedMessage != null) {
15536 mergeFrom(parsedMessage);
15537 }
15538 }
15539 return this;
15540 }
15541 private int bitField0_;
15542
15543 // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15544 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15545 private com.google.protobuf.SingleFieldBuilder<
15546 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
15547 /**
15548 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15549 */
15550 public boolean hasSegmentState() {
15551 return ((bitField0_ & 0x00000001) == 0x00000001);
15552 }
15553 /**
15554 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15555 */
15556 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15557 if (segmentStateBuilder_ == null) {
15558 return segmentState_;
15559 } else {
15560 return segmentStateBuilder_.getMessage();
15561 }
15562 }
15563 /**
15564 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15565 */
15566 public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15567 if (segmentStateBuilder_ == null) {
15568 if (value == null) {
15569 throw new NullPointerException();
15570 }
15571 segmentState_ = value;
15572 onChanged();
15573 } else {
15574 segmentStateBuilder_.setMessage(value);
15575 }
15576 bitField0_ |= 0x00000001;
15577 return this;
15578 }
15579 /**
15580 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15581 */
15582 public Builder setSegmentState(
15583 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
15584 if (segmentStateBuilder_ == null) {
15585 segmentState_ = builderForValue.build();
15586 onChanged();
15587 } else {
15588 segmentStateBuilder_.setMessage(builderForValue.build());
15589 }
15590 bitField0_ |= 0x00000001;
15591 return this;
15592 }
15593 /**
15594 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15595 */
15596 public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15597 if (segmentStateBuilder_ == null) {
15598 if (((bitField0_ & 0x00000001) == 0x00000001) &&
15599 segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
15600 segmentState_ =
15601 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
15602 } else {
15603 segmentState_ = value;
15604 }
15605 onChanged();
15606 } else {
15607 segmentStateBuilder_.mergeFrom(value);
15608 }
15609 bitField0_ |= 0x00000001;
15610 return this;
15611 }
15612 /**
15613 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15614 */
15615 public Builder clearSegmentState() {
15616 if (segmentStateBuilder_ == null) {
15617 segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15618 onChanged();
15619 } else {
15620 segmentStateBuilder_.clear();
15621 }
15622 bitField0_ = (bitField0_ & ~0x00000001);
15623 return this;
15624 }
15625 /**
15626 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15627 */
15628 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
15629 bitField0_ |= 0x00000001;
15630 onChanged();
15631 return getSegmentStateFieldBuilder().getBuilder();
15632 }
15633 /**
15634 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15635 */
15636 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15637 if (segmentStateBuilder_ != null) {
15638 return segmentStateBuilder_.getMessageOrBuilder();
15639 } else {
15640 return segmentState_;
15641 }
15642 }
15643 /**
15644 * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15645 */
15646 private com.google.protobuf.SingleFieldBuilder<
15647 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>
15648 getSegmentStateFieldBuilder() {
15649 if (segmentStateBuilder_ == null) {
15650 segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15651 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
15652 segmentState_,
15653 getParentForChildren(),
15654 isClean());
15655 segmentState_ = null;
15656 }
15657 return segmentStateBuilder_;
15658 }
15659
15660 // optional uint64 acceptedInEpoch = 2;
15661 private long acceptedInEpoch_ ;
15662 /**
15663 * <code>optional uint64 acceptedInEpoch = 2;</code>
15664 */
15665 public boolean hasAcceptedInEpoch() {
15666 return ((bitField0_ & 0x00000002) == 0x00000002);
15667 }
15668 /**
15669 * <code>optional uint64 acceptedInEpoch = 2;</code>
15670 */
15671 public long getAcceptedInEpoch() {
15672 return acceptedInEpoch_;
15673 }
15674 /**
15675 * <code>optional uint64 acceptedInEpoch = 2;</code>
15676 */
15677 public Builder setAcceptedInEpoch(long value) {
15678 bitField0_ |= 0x00000002;
15679 acceptedInEpoch_ = value;
15680 onChanged();
15681 return this;
15682 }
15683 /**
15684 * <code>optional uint64 acceptedInEpoch = 2;</code>
15685 */
15686 public Builder clearAcceptedInEpoch() {
15687 bitField0_ = (bitField0_ & ~0x00000002);
15688 acceptedInEpoch_ = 0L;
15689 onChanged();
15690 return this;
15691 }
15692
15693 // required uint64 lastWriterEpoch = 3;
15694 private long lastWriterEpoch_ ;
15695 /**
15696 * <code>required uint64 lastWriterEpoch = 3;</code>
15697 */
15698 public boolean hasLastWriterEpoch() {
15699 return ((bitField0_ & 0x00000004) == 0x00000004);
15700 }
15701 /**
15702 * <code>required uint64 lastWriterEpoch = 3;</code>
15703 */
15704 public long getLastWriterEpoch() {
15705 return lastWriterEpoch_;
15706 }
15707 /**
15708 * <code>required uint64 lastWriterEpoch = 3;</code>
15709 */
15710 public Builder setLastWriterEpoch(long value) {
15711 bitField0_ |= 0x00000004;
15712 lastWriterEpoch_ = value;
15713 onChanged();
15714 return this;
15715 }
15716 /**
15717 * <code>required uint64 lastWriterEpoch = 3;</code>
15718 */
15719 public Builder clearLastWriterEpoch() {
15720 bitField0_ = (bitField0_ & ~0x00000004);
15721 lastWriterEpoch_ = 0L;
15722 onChanged();
15723 return this;
15724 }
15725
15726 // optional uint64 lastCommittedTxId = 4;
15727 private long lastCommittedTxId_ ;
15728 /**
15729 * <code>optional uint64 lastCommittedTxId = 4;</code>
15730 *
15731 * <pre>
15732 * The highest committed txid that this logger has ever seen.
15733 * This may be higher than the data it actually has, in the case
15734 * that it was lagging before the old writer crashed.
15735 * </pre>
15736 */
15737 public boolean hasLastCommittedTxId() {
15738 return ((bitField0_ & 0x00000008) == 0x00000008);
15739 }
15740 /**
15741 * <code>optional uint64 lastCommittedTxId = 4;</code>
15742 *
15743 * <pre>
15744 * The highest committed txid that this logger has ever seen.
15745 * This may be higher than the data it actually has, in the case
15746 * that it was lagging before the old writer crashed.
15747 * </pre>
15748 */
15749 public long getLastCommittedTxId() {
15750 return lastCommittedTxId_;
15751 }
15752 /**
15753 * <code>optional uint64 lastCommittedTxId = 4;</code>
15754 *
15755 * <pre>
15756 * The highest committed txid that this logger has ever seen.
15757 * This may be higher than the data it actually has, in the case
15758 * that it was lagging before the old writer crashed.
15759 * </pre>
15760 */
15761 public Builder setLastCommittedTxId(long value) {
15762 bitField0_ |= 0x00000008;
15763 lastCommittedTxId_ = value;
15764 onChanged();
15765 return this;
15766 }
15767 /**
15768 * <code>optional uint64 lastCommittedTxId = 4;</code>
15769 *
15770 * <pre>
15771 * The highest committed txid that this logger has ever seen.
15772 * This may be higher than the data it actually has, in the case
15773 * that it was lagging before the old writer crashed.
15774 * </pre>
15775 */
15776 public Builder clearLastCommittedTxId() {
15777 bitField0_ = (bitField0_ & ~0x00000008);
15778 lastCommittedTxId_ = 0L;
15779 onChanged();
15780 return this;
15781 }
15782
15783 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15784 }
15785
15786 static {
15787 defaultInstance = new PrepareRecoveryResponseProto(true);
15788 defaultInstance.initFields();
15789 }
15790
15791 // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15792 }
15793
15794 public interface AcceptRecoveryRequestProtoOrBuilder
15795 extends com.google.protobuf.MessageOrBuilder {
15796
15797 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15798 /**
15799 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15800 */
15801 boolean hasReqInfo();
15802 /**
15803 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15804 */
15805 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
15806 /**
15807 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15808 */
15809 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
15810
15811 // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
15812 /**
15813 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15814 *
15815 * <pre>
15816 ** Details on the segment to recover
15817 * </pre>
15818 */
15819 boolean hasStateToAccept();
15820 /**
15821 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15822 *
15823 * <pre>
15824 ** Details on the segment to recover
15825 * </pre>
15826 */
15827 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
15828 /**
15829 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15830 *
15831 * <pre>
15832 ** Details on the segment to recover
15833 * </pre>
15834 */
15835 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
15836
15837 // required string fromURL = 3;
15838 /**
15839 * <code>required string fromURL = 3;</code>
15840 *
15841 * <pre>
15842 ** The URL from which the log may be copied
15843 * </pre>
15844 */
15845 boolean hasFromURL();
15846 /**
15847 * <code>required string fromURL = 3;</code>
15848 *
15849 * <pre>
15850 ** The URL from which the log may be copied
15851 * </pre>
15852 */
15853 java.lang.String getFromURL();
15854 /**
15855 * <code>required string fromURL = 3;</code>
15856 *
15857 * <pre>
15858 ** The URL from which the log may be copied
15859 * </pre>
15860 */
15861 com.google.protobuf.ByteString
15862 getFromURLBytes();
15863 }
15864 /**
15865 * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
15866 *
15867 * <pre>
15868 **
15869 * acceptRecovery()
15870 * </pre>
15871 */
15872 public static final class AcceptRecoveryRequestProto extends
15873 com.google.protobuf.GeneratedMessage
15874 implements AcceptRecoveryRequestProtoOrBuilder {
15875 // Use AcceptRecoveryRequestProto.newBuilder() to construct.
15876 private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15877 super(builder);
15878 this.unknownFields = builder.getUnknownFields();
15879 }
15880 private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15881
15882 private static final AcceptRecoveryRequestProto defaultInstance;
15883 public static AcceptRecoveryRequestProto getDefaultInstance() {
15884 return defaultInstance;
15885 }
15886
15887 public AcceptRecoveryRequestProto getDefaultInstanceForType() {
15888 return defaultInstance;
15889 }
15890
15891 private final com.google.protobuf.UnknownFieldSet unknownFields;
15892 @java.lang.Override
15893 public final com.google.protobuf.UnknownFieldSet
15894 getUnknownFields() {
15895 return this.unknownFields;
15896 }
15897 private AcceptRecoveryRequestProto(
15898 com.google.protobuf.CodedInputStream input,
15899 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15900 throws com.google.protobuf.InvalidProtocolBufferException {
15901 initFields();
15902 int mutable_bitField0_ = 0;
15903 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15904 com.google.protobuf.UnknownFieldSet.newBuilder();
15905 try {
15906 boolean done = false;
15907 while (!done) {
15908 int tag = input.readTag();
15909 switch (tag) {
15910 case 0:
15911 done = true;
15912 break;
15913 default: {
15914 if (!parseUnknownField(input, unknownFields,
15915 extensionRegistry, tag)) {
15916 done = true;
15917 }
15918 break;
15919 }
15920 case 10: {
15921 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
15922 if (((bitField0_ & 0x00000001) == 0x00000001)) {
15923 subBuilder = reqInfo_.toBuilder();
15924 }
15925 reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
15926 if (subBuilder != null) {
15927 subBuilder.mergeFrom(reqInfo_);
15928 reqInfo_ = subBuilder.buildPartial();
15929 }
15930 bitField0_ |= 0x00000001;
15931 break;
15932 }
15933 case 18: {
15934 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
15935 if (((bitField0_ & 0x00000002) == 0x00000002)) {
15936 subBuilder = stateToAccept_.toBuilder();
15937 }
15938 stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
15939 if (subBuilder != null) {
15940 subBuilder.mergeFrom(stateToAccept_);
15941 stateToAccept_ = subBuilder.buildPartial();
15942 }
15943 bitField0_ |= 0x00000002;
15944 break;
15945 }
15946 case 26: {
15947 bitField0_ |= 0x00000004;
15948 fromURL_ = input.readBytes();
15949 break;
15950 }
15951 }
15952 }
15953 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15954 throw e.setUnfinishedMessage(this);
15955 } catch (java.io.IOException e) {
15956 throw new com.google.protobuf.InvalidProtocolBufferException(
15957 e.getMessage()).setUnfinishedMessage(this);
15958 } finally {
15959 this.unknownFields = unknownFields.build();
15960 makeExtensionsImmutable();
15961 }
15962 }
15963 public static final com.google.protobuf.Descriptors.Descriptor
15964 getDescriptor() {
15965 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
15966 }
15967
15968 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15969 internalGetFieldAccessorTable() {
15970 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
15971 .ensureFieldAccessorsInitialized(
15972 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
15973 }
15974
15975 public static com.google.protobuf.Parser<AcceptRecoveryRequestProto> PARSER =
15976 new com.google.protobuf.AbstractParser<AcceptRecoveryRequestProto>() {
15977 public AcceptRecoveryRequestProto parsePartialFrom(
15978 com.google.protobuf.CodedInputStream input,
15979 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15980 throws com.google.protobuf.InvalidProtocolBufferException {
15981 return new AcceptRecoveryRequestProto(input, extensionRegistry);
15982 }
15983 };
15984
15985 @java.lang.Override
15986 public com.google.protobuf.Parser<AcceptRecoveryRequestProto> getParserForType() {
15987 return PARSER;
15988 }
15989
15990 private int bitField0_;
15991 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15992 public static final int REQINFO_FIELD_NUMBER = 1;
15993 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
15994 /**
15995 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15996 */
15997 public boolean hasReqInfo() {
15998 return ((bitField0_ & 0x00000001) == 0x00000001);
15999 }
16000 /**
16001 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16002 */
16003 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16004 return reqInfo_;
16005 }
16006 /**
16007 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16008 */
16009 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16010 return reqInfo_;
16011 }
16012
16013 // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16014 public static final int STATETOACCEPT_FIELD_NUMBER = 2;
16015 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
16016 /**
16017 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16018 *
16019 * <pre>
16020 ** Details on the segment to recover
16021 * </pre>
16022 */
16023 public boolean hasStateToAccept() {
16024 return ((bitField0_ & 0x00000002) == 0x00000002);
16025 }
16026 /**
16027 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16028 *
16029 * <pre>
16030 ** Details on the segment to recover
16031 * </pre>
16032 */
16033 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16034 return stateToAccept_;
16035 }
16036 /**
16037 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16038 *
16039 * <pre>
16040 ** Details on the segment to recover
16041 * </pre>
16042 */
16043 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
16044 return stateToAccept_;
16045 }
16046
16047 // required string fromURL = 3;
16048 public static final int FROMURL_FIELD_NUMBER = 3;
16049 private java.lang.Object fromURL_;
16050 /**
16051 * <code>required string fromURL = 3;</code>
16052 *
16053 * <pre>
16054 ** The URL from which the log may be copied
16055 * </pre>
16056 */
16057 public boolean hasFromURL() {
16058 return ((bitField0_ & 0x00000004) == 0x00000004);
16059 }
16060 /**
16061 * <code>required string fromURL = 3;</code>
16062 *
16063 * <pre>
16064 ** The URL from which the log may be copied
16065 * </pre>
16066 */
16067 public java.lang.String getFromURL() {
16068 java.lang.Object ref = fromURL_;
16069 if (ref instanceof java.lang.String) {
16070 return (java.lang.String) ref;
16071 } else {
16072 com.google.protobuf.ByteString bs =
16073 (com.google.protobuf.ByteString) ref;
16074 java.lang.String s = bs.toStringUtf8();
16075 if (bs.isValidUtf8()) {
16076 fromURL_ = s;
16077 }
16078 return s;
16079 }
16080 }
16081 /**
16082 * <code>required string fromURL = 3;</code>
16083 *
16084 * <pre>
16085 ** The URL from which the log may be copied
16086 * </pre>
16087 */
16088 public com.google.protobuf.ByteString
16089 getFromURLBytes() {
16090 java.lang.Object ref = fromURL_;
16091 if (ref instanceof java.lang.String) {
16092 com.google.protobuf.ByteString b =
16093 com.google.protobuf.ByteString.copyFromUtf8(
16094 (java.lang.String) ref);
16095 fromURL_ = b;
16096 return b;
16097 } else {
16098 return (com.google.protobuf.ByteString) ref;
16099 }
16100 }
16101
16102 private void initFields() {
16103 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16104 stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16105 fromURL_ = "";
16106 }
16107 private byte memoizedIsInitialized = -1;
16108 public final boolean isInitialized() {
16109 byte isInitialized = memoizedIsInitialized;
16110 if (isInitialized != -1) return isInitialized == 1;
16111
16112 if (!hasReqInfo()) {
16113 memoizedIsInitialized = 0;
16114 return false;
16115 }
16116 if (!hasStateToAccept()) {
16117 memoizedIsInitialized = 0;
16118 return false;
16119 }
16120 if (!hasFromURL()) {
16121 memoizedIsInitialized = 0;
16122 return false;
16123 }
16124 if (!getReqInfo().isInitialized()) {
16125 memoizedIsInitialized = 0;
16126 return false;
16127 }
16128 if (!getStateToAccept().isInitialized()) {
16129 memoizedIsInitialized = 0;
16130 return false;
16131 }
16132 memoizedIsInitialized = 1;
16133 return true;
16134 }
16135
16136 public void writeTo(com.google.protobuf.CodedOutputStream output)
16137 throws java.io.IOException {
16138 getSerializedSize();
16139 if (((bitField0_ & 0x00000001) == 0x00000001)) {
16140 output.writeMessage(1, reqInfo_);
16141 }
16142 if (((bitField0_ & 0x00000002) == 0x00000002)) {
16143 output.writeMessage(2, stateToAccept_);
16144 }
16145 if (((bitField0_ & 0x00000004) == 0x00000004)) {
16146 output.writeBytes(3, getFromURLBytes());
16147 }
16148 getUnknownFields().writeTo(output);
16149 }
16150
16151 private int memoizedSerializedSize = -1;
16152 public int getSerializedSize() {
16153 int size = memoizedSerializedSize;
16154 if (size != -1) return size;
16155
16156 size = 0;
16157 if (((bitField0_ & 0x00000001) == 0x00000001)) {
16158 size += com.google.protobuf.CodedOutputStream
16159 .computeMessageSize(1, reqInfo_);
16160 }
16161 if (((bitField0_ & 0x00000002) == 0x00000002)) {
16162 size += com.google.protobuf.CodedOutputStream
16163 .computeMessageSize(2, stateToAccept_);
16164 }
16165 if (((bitField0_ & 0x00000004) == 0x00000004)) {
16166 size += com.google.protobuf.CodedOutputStream
16167 .computeBytesSize(3, getFromURLBytes());
16168 }
16169 size += getUnknownFields().getSerializedSize();
16170 memoizedSerializedSize = size;
16171 return size;
16172 }
16173
16174 private static final long serialVersionUID = 0L;
16175 @java.lang.Override
16176 protected java.lang.Object writeReplace()
16177 throws java.io.ObjectStreamException {
16178 return super.writeReplace();
16179 }
16180
16181 @java.lang.Override
16182 public boolean equals(final java.lang.Object obj) {
16183 if (obj == this) {
16184 return true;
16185 }
16186 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
16187 return super.equals(obj);
16188 }
16189 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
16190
16191 boolean result = true;
16192 result = result && (hasReqInfo() == other.hasReqInfo());
16193 if (hasReqInfo()) {
16194 result = result && getReqInfo()
16195 .equals(other.getReqInfo());
16196 }
16197 result = result && (hasStateToAccept() == other.hasStateToAccept());
16198 if (hasStateToAccept()) {
16199 result = result && getStateToAccept()
16200 .equals(other.getStateToAccept());
16201 }
16202 result = result && (hasFromURL() == other.hasFromURL());
16203 if (hasFromURL()) {
16204 result = result && getFromURL()
16205 .equals(other.getFromURL());
16206 }
16207 result = result &&
16208 getUnknownFields().equals(other.getUnknownFields());
16209 return result;
16210 }
16211
16212 private int memoizedHashCode = 0;
16213 @java.lang.Override
16214 public int hashCode() {
16215 if (memoizedHashCode != 0) {
16216 return memoizedHashCode;
16217 }
16218 int hash = 41;
16219 hash = (19 * hash) + getDescriptorForType().hashCode();
16220 if (hasReqInfo()) {
16221 hash = (37 * hash) + REQINFO_FIELD_NUMBER;
16222 hash = (53 * hash) + getReqInfo().hashCode();
16223 }
16224 if (hasStateToAccept()) {
16225 hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
16226 hash = (53 * hash) + getStateToAccept().hashCode();
16227 }
16228 if (hasFromURL()) {
16229 hash = (37 * hash) + FROMURL_FIELD_NUMBER;
16230 hash = (53 * hash) + getFromURL().hashCode();
16231 }
16232 hash = (29 * hash) + getUnknownFields().hashCode();
16233 memoizedHashCode = hash;
16234 return hash;
16235 }
16236
16237 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16238 com.google.protobuf.ByteString data)
16239 throws com.google.protobuf.InvalidProtocolBufferException {
16240 return PARSER.parseFrom(data);
16241 }
16242 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16243 com.google.protobuf.ByteString data,
16244 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16245 throws com.google.protobuf.InvalidProtocolBufferException {
16246 return PARSER.parseFrom(data, extensionRegistry);
16247 }
16248 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
16249 throws com.google.protobuf.InvalidProtocolBufferException {
16250 return PARSER.parseFrom(data);
16251 }
16252 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16253 byte[] data,
16254 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16255 throws com.google.protobuf.InvalidProtocolBufferException {
16256 return PARSER.parseFrom(data, extensionRegistry);
16257 }
16258 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
16259 throws java.io.IOException {
16260 return PARSER.parseFrom(input);
16261 }
16262 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16263 java.io.InputStream input,
16264 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16265 throws java.io.IOException {
16266 return PARSER.parseFrom(input, extensionRegistry);
16267 }
16268 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
16269 throws java.io.IOException {
16270 return PARSER.parseDelimitedFrom(input);
16271 }
16272 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
16273 java.io.InputStream input,
16274 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16275 throws java.io.IOException {
16276 return PARSER.parseDelimitedFrom(input, extensionRegistry);
16277 }
16278 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16279 com.google.protobuf.CodedInputStream input)
16280 throws java.io.IOException {
16281 return PARSER.parseFrom(input);
16282 }
16283 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16284 com.google.protobuf.CodedInputStream input,
16285 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16286 throws java.io.IOException {
16287 return PARSER.parseFrom(input, extensionRegistry);
16288 }
16289
16290 public static Builder newBuilder() { return Builder.create(); }
16291 public Builder newBuilderForType() { return newBuilder(); }
16292 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
16293 return newBuilder().mergeFrom(prototype);
16294 }
16295 public Builder toBuilder() { return newBuilder(this); }
16296
16297 @java.lang.Override
16298 protected Builder newBuilderForType(
16299 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16300 Builder builder = new Builder(parent);
16301 return builder;
16302 }
16303 /**
16304 * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
16305 *
16306 * <pre>
16307 **
16308 * acceptRecovery()
16309 * </pre>
16310 */
16311 public static final class Builder extends
16312 com.google.protobuf.GeneratedMessage.Builder<Builder>
16313 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
16314 public static final com.google.protobuf.Descriptors.Descriptor
16315 getDescriptor() {
16316 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16317 }
16318
16319 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16320 internalGetFieldAccessorTable() {
16321 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
16322 .ensureFieldAccessorsInitialized(
16323 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
16324 }
16325
16326 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
16327 private Builder() {
16328 maybeForceBuilderInitialization();
16329 }
16330
16331 private Builder(
16332 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16333 super(parent);
16334 maybeForceBuilderInitialization();
16335 }
16336 private void maybeForceBuilderInitialization() {
16337 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16338 getReqInfoFieldBuilder();
16339 getStateToAcceptFieldBuilder();
16340 }
16341 }
16342 private static Builder create() {
16343 return new Builder();
16344 }
16345
16346 public Builder clear() {
16347 super.clear();
16348 if (reqInfoBuilder_ == null) {
16349 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16350 } else {
16351 reqInfoBuilder_.clear();
16352 }
16353 bitField0_ = (bitField0_ & ~0x00000001);
16354 if (stateToAcceptBuilder_ == null) {
16355 stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16356 } else {
16357 stateToAcceptBuilder_.clear();
16358 }
16359 bitField0_ = (bitField0_ & ~0x00000002);
16360 fromURL_ = "";
16361 bitField0_ = (bitField0_ & ~0x00000004);
16362 return this;
16363 }
16364
16365 public Builder clone() {
16366 return create().mergeFrom(buildPartial());
16367 }
16368
16369 public com.google.protobuf.Descriptors.Descriptor
16370 getDescriptorForType() {
16371 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16372 }
16373
16374 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
16375 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
16376 }
16377
16378 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
16379 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
16380 if (!result.isInitialized()) {
16381 throw newUninitializedMessageException(result);
16382 }
16383 return result;
16384 }
16385
16386 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
16387 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
16388 int from_bitField0_ = bitField0_;
16389 int to_bitField0_ = 0;
16390 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16391 to_bitField0_ |= 0x00000001;
16392 }
16393 if (reqInfoBuilder_ == null) {
16394 result.reqInfo_ = reqInfo_;
16395 } else {
16396 result.reqInfo_ = reqInfoBuilder_.build();
16397 }
16398 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
16399 to_bitField0_ |= 0x00000002;
16400 }
16401 if (stateToAcceptBuilder_ == null) {
16402 result.stateToAccept_ = stateToAccept_;
16403 } else {
16404 result.stateToAccept_ = stateToAcceptBuilder_.build();
16405 }
16406 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
16407 to_bitField0_ |= 0x00000004;
16408 }
16409 result.fromURL_ = fromURL_;
16410 result.bitField0_ = to_bitField0_;
16411 onBuilt();
16412 return result;
16413 }
16414
16415 public Builder mergeFrom(com.google.protobuf.Message other) {
16416 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
16417 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
16418 } else {
16419 super.mergeFrom(other);
16420 return this;
16421 }
16422 }
16423
16424 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
16425 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
16426 if (other.hasReqInfo()) {
16427 mergeReqInfo(other.getReqInfo());
16428 }
16429 if (other.hasStateToAccept()) {
16430 mergeStateToAccept(other.getStateToAccept());
16431 }
16432 if (other.hasFromURL()) {
16433 bitField0_ |= 0x00000004;
16434 fromURL_ = other.fromURL_;
16435 onChanged();
16436 }
16437 this.mergeUnknownFields(other.getUnknownFields());
16438 return this;
16439 }
16440
16441 public final boolean isInitialized() {
16442 if (!hasReqInfo()) {
16443
16444 return false;
16445 }
16446 if (!hasStateToAccept()) {
16447
16448 return false;
16449 }
16450 if (!hasFromURL()) {
16451
16452 return false;
16453 }
16454 if (!getReqInfo().isInitialized()) {
16455
16456 return false;
16457 }
16458 if (!getStateToAccept().isInitialized()) {
16459
16460 return false;
16461 }
16462 return true;
16463 }
16464
16465 public Builder mergeFrom(
16466 com.google.protobuf.CodedInputStream input,
16467 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16468 throws java.io.IOException {
16469 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null;
16470 try {
16471 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16472 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16473 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage();
16474 throw e;
16475 } finally {
16476 if (parsedMessage != null) {
16477 mergeFrom(parsedMessage);
16478 }
16479 }
16480 return this;
16481 }
16482 private int bitField0_;
16483
16484 // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16485 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16486 private com.google.protobuf.SingleFieldBuilder<
16487 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
16488 /**
16489 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16490 */
16491 public boolean hasReqInfo() {
16492 return ((bitField0_ & 0x00000001) == 0x00000001);
16493 }
16494 /**
16495 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16496 */
16497 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16498 if (reqInfoBuilder_ == null) {
16499 return reqInfo_;
16500 } else {
16501 return reqInfoBuilder_.getMessage();
16502 }
16503 }
16504 /**
16505 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16506 */
16507 public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16508 if (reqInfoBuilder_ == null) {
16509 if (value == null) {
16510 throw new NullPointerException();
16511 }
16512 reqInfo_ = value;
16513 onChanged();
16514 } else {
16515 reqInfoBuilder_.setMessage(value);
16516 }
16517 bitField0_ |= 0x00000001;
16518 return this;
16519 }
16520 /**
16521 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16522 */
16523 public Builder setReqInfo(
16524 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
16525 if (reqInfoBuilder_ == null) {
16526 reqInfo_ = builderForValue.build();
16527 onChanged();
16528 } else {
16529 reqInfoBuilder_.setMessage(builderForValue.build());
16530 }
16531 bitField0_ |= 0x00000001;
16532 return this;
16533 }
16534 /**
16535 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16536 */
16537 public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16538 if (reqInfoBuilder_ == null) {
16539 if (((bitField0_ & 0x00000001) == 0x00000001) &&
16540 reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
16541 reqInfo_ =
16542 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
16543 } else {
16544 reqInfo_ = value;
16545 }
16546 onChanged();
16547 } else {
16548 reqInfoBuilder_.mergeFrom(value);
16549 }
16550 bitField0_ |= 0x00000001;
16551 return this;
16552 }
16553 /**
16554 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16555 */
16556 public Builder clearReqInfo() {
16557 if (reqInfoBuilder_ == null) {
16558 reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16559 onChanged();
16560 } else {
16561 reqInfoBuilder_.clear();
16562 }
16563 bitField0_ = (bitField0_ & ~0x00000001);
16564 return this;
16565 }
16566 /**
16567 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16568 */
16569 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
16570 bitField0_ |= 0x00000001;
16571 onChanged();
16572 return getReqInfoFieldBuilder().getBuilder();
16573 }
16574 /**
16575 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16576 */
16577 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16578 if (reqInfoBuilder_ != null) {
16579 return reqInfoBuilder_.getMessageOrBuilder();
16580 } else {
16581 return reqInfo_;
16582 }
16583 }
16584 /**
16585 * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16586 */
16587 private com.google.protobuf.SingleFieldBuilder<
16588 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>
16589 getReqInfoFieldBuilder() {
16590 if (reqInfoBuilder_ == null) {
16591 reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16592 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
16593 reqInfo_,
16594 getParentForChildren(),
16595 isClean());
16596 reqInfo_ = null;
16597 }
16598 return reqInfoBuilder_;
16599 }
16600
16601 // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16602 private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16603 private com.google.protobuf.SingleFieldBuilder<
16604 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
16605 /**
16606 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16607 *
16608 * <pre>
16609 ** Details on the segment to recover
16610 * </pre>
16611 */
16612 public boolean hasStateToAccept() {
16613 return ((bitField0_ & 0x00000002) == 0x00000002);
16614 }
16615 /**
16616 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16617 *
16618 * <pre>
16619 ** Details on the segment to recover
16620 * </pre>
16621 */
16622 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16623 if (stateToAcceptBuilder_ == null) {
16624 return stateToAccept_;
16625 } else {
16626 return stateToAcceptBuilder_.getMessage();
16627 }
16628 }
16629 /**
16630 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16631 *
16632 * <pre>
16633 ** Details on the segment to recover
16634 * </pre>
16635 */
16636 public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16637 if (stateToAcceptBuilder_ == null) {
16638 if (value == null) {
16639 throw new NullPointerException();
16640 }
16641 stateToAccept_ = value;
16642 onChanged();
16643 } else {
16644 stateToAcceptBuilder_.setMessage(value);
16645 }
16646 bitField0_ |= 0x00000002;
16647 return this;
16648 }
16649 /**
16650 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16651 *
16652 * <pre>
16653 ** Details on the segment to recover
16654 * </pre>
16655 */
16656 public Builder setStateToAccept(
16657 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
16658 if (stateToAcceptBuilder_ == null) {
16659 stateToAccept_ = builderForValue.build();
16660 onChanged();
16661 } else {
16662 stateToAcceptBuilder_.setMessage(builderForValue.build());
16663 }
16664 bitField0_ |= 0x00000002;
16665 return this;
16666 }
16667 /**
16668 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16669 *
16670 * <pre>
16671 ** Details on the segment to recover
16672 * </pre>
16673 */
16674 public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16675 if (stateToAcceptBuilder_ == null) {
16676 if (((bitField0_ & 0x00000002) == 0x00000002) &&
16677 stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
16678 stateToAccept_ =
16679 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
16680 } else {
16681 stateToAccept_ = value;
16682 }
16683 onChanged();
16684 } else {
16685 stateToAcceptBuilder_.mergeFrom(value);
16686 }
16687 bitField0_ |= 0x00000002;
16688 return this;
16689 }
16690 /**
16691 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16692 *
16693 * <pre>
16694 ** Details on the segment to recover
16695 * </pre>
16696 */
16697 public Builder clearStateToAccept() {
16698 if (stateToAcceptBuilder_ == null) {
16699 stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16700 onChanged();
16701 } else {
16702 stateToAcceptBuilder_.clear();
16703 }
16704 bitField0_ = (bitField0_ & ~0x00000002);
16705 return this;
16706 }
16707 /**
16708 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16709 *
16710 * <pre>
16711 ** Details on the segment to recover
16712 * </pre>
16713 */
16714 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
16715 bitField0_ |= 0x00000002;
16716 onChanged();
16717 return getStateToAcceptFieldBuilder().getBuilder();
16718 }
16719 /**
16720 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16721 *
16722 * <pre>
16723 ** Details on the segment to recover
16724 * </pre>
16725 */
16726 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
16727 if (stateToAcceptBuilder_ != null) {
16728 return stateToAcceptBuilder_.getMessageOrBuilder();
16729 } else {
16730 return stateToAccept_;
16731 }
16732 }
16733 /**
16734 * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16735 *
16736 * <pre>
16737 ** Details on the segment to recover
16738 * </pre>
16739 */
16740 private com.google.protobuf.SingleFieldBuilder<
16741 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>
16742 getStateToAcceptFieldBuilder() {
16743 if (stateToAcceptBuilder_ == null) {
16744 stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16745 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
16746 stateToAccept_,
16747 getParentForChildren(),
16748 isClean());
16749 stateToAccept_ = null;
16750 }
16751 return stateToAcceptBuilder_;
16752 }
16753
16754 // required string fromURL = 3;
16755 private java.lang.Object fromURL_ = "";
16756 /**
16757 * <code>required string fromURL = 3;</code>
16758 *
16759 * <pre>
16760 ** The URL from which the log may be copied
16761 * </pre>
16762 */
16763 public boolean hasFromURL() {
16764 return ((bitField0_ & 0x00000004) == 0x00000004);
16765 }
16766 /**
16767 * <code>required string fromURL = 3;</code>
16768 *
16769 * <pre>
16770 ** The URL from which the log may be copied
16771 * </pre>
16772 */
16773 public java.lang.String getFromURL() {
16774 java.lang.Object ref = fromURL_;
16775 if (!(ref instanceof java.lang.String)) {
16776 java.lang.String s = ((com.google.protobuf.ByteString) ref)
16777 .toStringUtf8();
16778 fromURL_ = s;
16779 return s;
16780 } else {
16781 return (java.lang.String) ref;
16782 }
16783 }
16784 /**
16785 * <code>required string fromURL = 3;</code>
16786 *
16787 * <pre>
16788 ** The URL from which the log may be copied
16789 * </pre>
16790 */
16791 public com.google.protobuf.ByteString
16792 getFromURLBytes() {
16793 java.lang.Object ref = fromURL_;
16794 if (ref instanceof String) {
16795 com.google.protobuf.ByteString b =
16796 com.google.protobuf.ByteString.copyFromUtf8(
16797 (java.lang.String) ref);
16798 fromURL_ = b;
16799 return b;
16800 } else {
16801 return (com.google.protobuf.ByteString) ref;
16802 }
16803 }
16804 /**
16805 * <code>required string fromURL = 3;</code>
16806 *
16807 * <pre>
16808 ** The URL from which the log may be copied
16809 * </pre>
16810 */
16811 public Builder setFromURL(
16812 java.lang.String value) {
16813 if (value == null) {
16814 throw new NullPointerException();
16815 }
16816 bitField0_ |= 0x00000004;
16817 fromURL_ = value;
16818 onChanged();
16819 return this;
16820 }
16821 /**
16822 * <code>required string fromURL = 3;</code>
16823 *
16824 * <pre>
16825 ** The URL from which the log may be copied
16826 * </pre>
16827 */
16828 public Builder clearFromURL() {
16829 bitField0_ = (bitField0_ & ~0x00000004);
16830 fromURL_ = getDefaultInstance().getFromURL();
16831 onChanged();
16832 return this;
16833 }
16834 /**
16835 * <code>required string fromURL = 3;</code>
16836 *
16837 * <pre>
16838 ** The URL from which the log may be copied
16839 * </pre>
16840 */
16841 public Builder setFromURLBytes(
16842 com.google.protobuf.ByteString value) {
16843 if (value == null) {
16844 throw new NullPointerException();
16845 }
16846 bitField0_ |= 0x00000004;
16847 fromURL_ = value;
16848 onChanged();
16849 return this;
16850 }
16851
16852 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16853 }
16854
16855 static {
16856 defaultInstance = new AcceptRecoveryRequestProto(true);
16857 defaultInstance.initFields();
16858 }
16859
16860 // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16861 }
16862
16863 public interface AcceptRecoveryResponseProtoOrBuilder
16864 extends com.google.protobuf.MessageOrBuilder {
16865 }
16866 /**
16867 * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
16868 */
16869 public static final class AcceptRecoveryResponseProto extends
16870 com.google.protobuf.GeneratedMessage
16871 implements AcceptRecoveryResponseProtoOrBuilder {
16872 // Use AcceptRecoveryResponseProto.newBuilder() to construct.
16873 private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16874 super(builder);
16875 this.unknownFields = builder.getUnknownFields();
16876 }
16877 private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16878
16879 private static final AcceptRecoveryResponseProto defaultInstance;
16880 public static AcceptRecoveryResponseProto getDefaultInstance() {
16881 return defaultInstance;
16882 }
16883
16884 public AcceptRecoveryResponseProto getDefaultInstanceForType() {
16885 return defaultInstance;
16886 }
16887
16888 private final com.google.protobuf.UnknownFieldSet unknownFields;
16889 @java.lang.Override
16890 public final com.google.protobuf.UnknownFieldSet
16891 getUnknownFields() {
16892 return this.unknownFields;
16893 }
16894 private AcceptRecoveryResponseProto(
16895 com.google.protobuf.CodedInputStream input,
16896 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16897 throws com.google.protobuf.InvalidProtocolBufferException {
16898 initFields();
16899 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16900 com.google.protobuf.UnknownFieldSet.newBuilder();
16901 try {
16902 boolean done = false;
16903 while (!done) {
16904 int tag = input.readTag();
16905 switch (tag) {
16906 case 0:
16907 done = true;
16908 break;
16909 default: {
16910 if (!parseUnknownField(input, unknownFields,
16911 extensionRegistry, tag)) {
16912 done = true;
16913 }
16914 break;
16915 }
16916 }
16917 }
16918 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16919 throw e.setUnfinishedMessage(this);
16920 } catch (java.io.IOException e) {
16921 throw new com.google.protobuf.InvalidProtocolBufferException(
16922 e.getMessage()).setUnfinishedMessage(this);
16923 } finally {
16924 this.unknownFields = unknownFields.build();
16925 makeExtensionsImmutable();
16926 }
16927 }
16928 public static final com.google.protobuf.Descriptors.Descriptor
16929 getDescriptor() {
16930 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
16931 }
16932
16933 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16934 internalGetFieldAccessorTable() {
16935 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
16936 .ensureFieldAccessorsInitialized(
16937 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
16938 }
16939
16940 public static com.google.protobuf.Parser<AcceptRecoveryResponseProto> PARSER =
16941 new com.google.protobuf.AbstractParser<AcceptRecoveryResponseProto>() {
16942 public AcceptRecoveryResponseProto parsePartialFrom(
16943 com.google.protobuf.CodedInputStream input,
16944 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16945 throws com.google.protobuf.InvalidProtocolBufferException {
16946 return new AcceptRecoveryResponseProto(input, extensionRegistry);
16947 }
16948 };
16949
16950 @java.lang.Override
16951 public com.google.protobuf.Parser<AcceptRecoveryResponseProto> getParserForType() {
16952 return PARSER;
16953 }
16954
16955 private void initFields() {
16956 }
16957 private byte memoizedIsInitialized = -1;
16958 public final boolean isInitialized() {
16959 byte isInitialized = memoizedIsInitialized;
16960 if (isInitialized != -1) return isInitialized == 1;
16961
16962 memoizedIsInitialized = 1;
16963 return true;
16964 }
16965
16966 public void writeTo(com.google.protobuf.CodedOutputStream output)
16967 throws java.io.IOException {
16968 getSerializedSize();
16969 getUnknownFields().writeTo(output);
16970 }
16971
16972 private int memoizedSerializedSize = -1;
16973 public int getSerializedSize() {
16974 int size = memoizedSerializedSize;
16975 if (size != -1) return size;
16976
16977 size = 0;
16978 size += getUnknownFields().getSerializedSize();
16979 memoizedSerializedSize = size;
16980 return size;
16981 }
16982
16983 private static final long serialVersionUID = 0L;
16984 @java.lang.Override
16985 protected java.lang.Object writeReplace()
16986 throws java.io.ObjectStreamException {
16987 return super.writeReplace();
16988 }
16989
16990 @java.lang.Override
16991 public boolean equals(final java.lang.Object obj) {
16992 if (obj == this) {
16993 return true;
16994 }
16995 if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
16996 return super.equals(obj);
16997 }
16998 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
16999
17000 boolean result = true;
17001 result = result &&
17002 getUnknownFields().equals(other.getUnknownFields());
17003 return result;
17004 }
17005
17006 private int memoizedHashCode = 0;
17007 @java.lang.Override
17008 public int hashCode() {
17009 if (memoizedHashCode != 0) {
17010 return memoizedHashCode;
17011 }
17012 int hash = 41;
17013 hash = (19 * hash) + getDescriptorForType().hashCode();
17014 hash = (29 * hash) + getUnknownFields().hashCode();
17015 memoizedHashCode = hash;
17016 return hash;
17017 }
17018
17019 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17020 com.google.protobuf.ByteString data)
17021 throws com.google.protobuf.InvalidProtocolBufferException {
17022 return PARSER.parseFrom(data);
17023 }
17024 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17025 com.google.protobuf.ByteString data,
17026 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17027 throws com.google.protobuf.InvalidProtocolBufferException {
17028 return PARSER.parseFrom(data, extensionRegistry);
17029 }
17030 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
17031 throws com.google.protobuf.InvalidProtocolBufferException {
17032 return PARSER.parseFrom(data);
17033 }
17034 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17035 byte[] data,
17036 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17037 throws com.google.protobuf.InvalidProtocolBufferException {
17038 return PARSER.parseFrom(data, extensionRegistry);
17039 }
17040 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
17041 throws java.io.IOException {
17042 return PARSER.parseFrom(input);
17043 }
17044 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17045 java.io.InputStream input,
17046 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17047 throws java.io.IOException {
17048 return PARSER.parseFrom(input, extensionRegistry);
17049 }
17050 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
17051 throws java.io.IOException {
17052 return PARSER.parseDelimitedFrom(input);
17053 }
17054 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
17055 java.io.InputStream input,
17056 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17057 throws java.io.IOException {
17058 return PARSER.parseDelimitedFrom(input, extensionRegistry);
17059 }
17060 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17061 com.google.protobuf.CodedInputStream input)
17062 throws java.io.IOException {
17063 return PARSER.parseFrom(input);
17064 }
17065 public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17066 com.google.protobuf.CodedInputStream input,
17067 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17068 throws java.io.IOException {
17069 return PARSER.parseFrom(input, extensionRegistry);
17070 }
17071
17072 public static Builder newBuilder() { return Builder.create(); }
17073 public Builder newBuilderForType() { return newBuilder(); }
17074 public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
17075 return newBuilder().mergeFrom(prototype);
17076 }
17077 public Builder toBuilder() { return newBuilder(this); }
17078
17079 @java.lang.Override
17080 protected Builder newBuilderForType(
17081 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17082 Builder builder = new Builder(parent);
17083 return builder;
17084 }
17085 /**
17086 * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
17087 */
17088 public static final class Builder extends
17089 com.google.protobuf.GeneratedMessage.Builder<Builder>
17090 implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
17091 public static final com.google.protobuf.Descriptors.Descriptor
17092 getDescriptor() {
17093 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17094 }
17095
17096 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17097 internalGetFieldAccessorTable() {
17098 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
17099 .ensureFieldAccessorsInitialized(
17100 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
17101 }
17102
17103 // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
17104 private Builder() {
17105 maybeForceBuilderInitialization();
17106 }
17107
17108 private Builder(
17109 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17110 super(parent);
17111 maybeForceBuilderInitialization();
17112 }
17113 private void maybeForceBuilderInitialization() {
17114 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17115 }
17116 }
17117 private static Builder create() {
17118 return new Builder();
17119 }
17120
17121 public Builder clear() {
17122 super.clear();
17123 return this;
17124 }
17125
17126 public Builder clone() {
17127 return create().mergeFrom(buildPartial());
17128 }
17129
17130 public com.google.protobuf.Descriptors.Descriptor
17131 getDescriptorForType() {
17132 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17133 }
17134
17135 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
17136 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17137 }
17138
17139 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
17140 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
17141 if (!result.isInitialized()) {
17142 throw newUninitializedMessageException(result);
17143 }
17144 return result;
17145 }
17146
17147 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
17148 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
17149 onBuilt();
17150 return result;
17151 }
17152
17153 public Builder mergeFrom(com.google.protobuf.Message other) {
17154 if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
17155 return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
17156 } else {
17157 super.mergeFrom(other);
17158 return this;
17159 }
17160 }
17161
17162 public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
17163 if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
17164 this.mergeUnknownFields(other.getUnknownFields());
17165 return this;
17166 }
17167
17168 public final boolean isInitialized() {
17169 return true;
17170 }
17171
17172 public Builder mergeFrom(
17173 com.google.protobuf.CodedInputStream input,
17174 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17175 throws java.io.IOException {
17176 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null;
17177 try {
17178 parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17179 } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17180 parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage();
17181 throw e;
17182 } finally {
17183 if (parsedMessage != null) {
17184 mergeFrom(parsedMessage);
17185 }
17186 }
17187 return this;
17188 }
17189
17190 // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17191 }
17192
17193 static {
17194 defaultInstance = new AcceptRecoveryResponseProto(true);
17195 defaultInstance.initFields();
17196 }
17197
17198 // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17199 }
17200
17201 /**
17202 * Protobuf service {@code hadoop.hdfs.QJournalProtocolService}
17203 *
17204 * <pre>
17205 **
17206 * Protocol used to journal edits to a JournalNode.
17207 * See the request and response for details of rpc call.
17208 * </pre>
17209 */
17210 public static abstract class QJournalProtocolService
17211 implements com.google.protobuf.Service {
17212 protected QJournalProtocolService() {}
17213
17214 public interface Interface {
17215 /**
17216 * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17217 */
17218 public abstract void isFormatted(
17219 com.google.protobuf.RpcController controller,
17220 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17221 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17222
17223 /**
17224 * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17225 */
17226 public abstract void getJournalState(
17227 com.google.protobuf.RpcController controller,
17228 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17229 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17230
17231 /**
17232 * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17233 */
17234 public abstract void newEpoch(
17235 com.google.protobuf.RpcController controller,
17236 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17237 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17238
17239 /**
17240 * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17241 */
17242 public abstract void format(
17243 com.google.protobuf.RpcController controller,
17244 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17245 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17246
17247 /**
17248 * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17249 */
17250 public abstract void journal(
17251 com.google.protobuf.RpcController controller,
17252 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17253 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17254
17255 /**
17256 * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17257 */
17258 public abstract void heartbeat(
17259 com.google.protobuf.RpcController controller,
17260 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17261 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17262
17263 /**
17264 * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17265 */
17266 public abstract void startLogSegment(
17267 com.google.protobuf.RpcController controller,
17268 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17269 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17270
17271 /**
17272 * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17273 */
17274 public abstract void finalizeLogSegment(
17275 com.google.protobuf.RpcController controller,
17276 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17277 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17278
17279 /**
17280 * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17281 */
17282 public abstract void purgeLogs(
17283 com.google.protobuf.RpcController controller,
17284 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17285 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17286
17287 /**
17288 * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17289 */
17290 public abstract void getEditLogManifest(
17291 com.google.protobuf.RpcController controller,
17292 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17293 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17294
17295 /**
17296 * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17297 */
17298 public abstract void prepareRecovery(
17299 com.google.protobuf.RpcController controller,
17300 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17301 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17302
17303 /**
17304 * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17305 */
17306 public abstract void acceptRecovery(
17307 com.google.protobuf.RpcController controller,
17308 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17309 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17310
17311 }
17312
17313 public static com.google.protobuf.Service newReflectiveService(
17314 final Interface impl) {
17315 return new QJournalProtocolService() {
17316 @java.lang.Override
17317 public void isFormatted(
17318 com.google.protobuf.RpcController controller,
17319 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17320 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17321 impl.isFormatted(controller, request, done);
17322 }
17323
17324 @java.lang.Override
17325 public void getJournalState(
17326 com.google.protobuf.RpcController controller,
17327 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17328 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17329 impl.getJournalState(controller, request, done);
17330 }
17331
17332 @java.lang.Override
17333 public void newEpoch(
17334 com.google.protobuf.RpcController controller,
17335 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17336 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17337 impl.newEpoch(controller, request, done);
17338 }
17339
17340 @java.lang.Override
17341 public void format(
17342 com.google.protobuf.RpcController controller,
17343 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17344 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17345 impl.format(controller, request, done);
17346 }
17347
17348 @java.lang.Override
17349 public void journal(
17350 com.google.protobuf.RpcController controller,
17351 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17352 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17353 impl.journal(controller, request, done);
17354 }
17355
17356 @java.lang.Override
17357 public void heartbeat(
17358 com.google.protobuf.RpcController controller,
17359 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17360 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17361 impl.heartbeat(controller, request, done);
17362 }
17363
17364 @java.lang.Override
17365 public void startLogSegment(
17366 com.google.protobuf.RpcController controller,
17367 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17368 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17369 impl.startLogSegment(controller, request, done);
17370 }
17371
17372 @java.lang.Override
17373 public void finalizeLogSegment(
17374 com.google.protobuf.RpcController controller,
17375 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17376 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17377 impl.finalizeLogSegment(controller, request, done);
17378 }
17379
17380 @java.lang.Override
17381 public void purgeLogs(
17382 com.google.protobuf.RpcController controller,
17383 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17384 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17385 impl.purgeLogs(controller, request, done);
17386 }
17387
17388 @java.lang.Override
17389 public void getEditLogManifest(
17390 com.google.protobuf.RpcController controller,
17391 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17392 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17393 impl.getEditLogManifest(controller, request, done);
17394 }
17395
17396 @java.lang.Override
17397 public void prepareRecovery(
17398 com.google.protobuf.RpcController controller,
17399 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17400 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17401 impl.prepareRecovery(controller, request, done);
17402 }
17403
17404 @java.lang.Override
17405 public void acceptRecovery(
17406 com.google.protobuf.RpcController controller,
17407 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17408 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17409 impl.acceptRecovery(controller, request, done);
17410 }
17411
17412 };
17413 }
17414
17415 public static com.google.protobuf.BlockingService
17416 newReflectiveBlockingService(final BlockingInterface impl) {
17417 return new com.google.protobuf.BlockingService() {
17418 public final com.google.protobuf.Descriptors.ServiceDescriptor
17419 getDescriptorForType() {
17420 return getDescriptor();
17421 }
17422
17423 public final com.google.protobuf.Message callBlockingMethod(
17424 com.google.protobuf.Descriptors.MethodDescriptor method,
17425 com.google.protobuf.RpcController controller,
17426 com.google.protobuf.Message request)
17427 throws com.google.protobuf.ServiceException {
17428 if (method.getService() != getDescriptor()) {
17429 throw new java.lang.IllegalArgumentException(
17430 "Service.callBlockingMethod() given method descriptor for " +
17431 "wrong service type.");
17432 }
17433 switch(method.getIndex()) {
17434 case 0:
17435 return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
17436 case 1:
17437 return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
17438 case 2:
17439 return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
17440 case 3:
17441 return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
17442 case 4:
17443 return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
17444 case 5:
17445 return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
17446 case 6:
17447 return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
17448 case 7:
17449 return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
17450 case 8:
17451 return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
17452 case 9:
17453 return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
17454 case 10:
17455 return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
17456 case 11:
17457 return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
17458 default:
17459 throw new java.lang.AssertionError("Can't get here.");
17460 }
17461 }
17462
17463 public final com.google.protobuf.Message
17464 getRequestPrototype(
17465 com.google.protobuf.Descriptors.MethodDescriptor method) {
17466 if (method.getService() != getDescriptor()) {
17467 throw new java.lang.IllegalArgumentException(
17468 "Service.getRequestPrototype() given method " +
17469 "descriptor for wrong service type.");
17470 }
17471 switch(method.getIndex()) {
17472 case 0:
17473 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17474 case 1:
17475 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17476 case 2:
17477 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17478 case 3:
17479 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17480 case 4:
17481 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17482 case 5:
17483 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17484 case 6:
17485 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17486 case 7:
17487 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17488 case 8:
17489 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17490 case 9:
17491 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17492 case 10:
17493 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17494 case 11:
17495 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17496 default:
17497 throw new java.lang.AssertionError("Can't get here.");
17498 }
17499 }
17500
17501 public final com.google.protobuf.Message
17502 getResponsePrototype(
17503 com.google.protobuf.Descriptors.MethodDescriptor method) {
17504 if (method.getService() != getDescriptor()) {
17505 throw new java.lang.IllegalArgumentException(
17506 "Service.getResponsePrototype() given method " +
17507 "descriptor for wrong service type.");
17508 }
17509 switch(method.getIndex()) {
17510 case 0:
17511 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17512 case 1:
17513 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17514 case 2:
17515 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17516 case 3:
17517 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17518 case 4:
17519 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17520 case 5:
17521 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17522 case 6:
17523 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17524 case 7:
17525 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17526 case 8:
17527 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17528 case 9:
17529 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17530 case 10:
17531 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17532 case 11:
17533 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17534 default:
17535 throw new java.lang.AssertionError("Can't get here.");
17536 }
17537 }
17538
17539 };
17540 }
17541
17542 /**
17543 * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17544 */
17545 public abstract void isFormatted(
17546 com.google.protobuf.RpcController controller,
17547 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17548 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17549
17550 /**
17551 * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17552 */
17553 public abstract void getJournalState(
17554 com.google.protobuf.RpcController controller,
17555 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17556 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17557
17558 /**
17559 * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17560 */
17561 public abstract void newEpoch(
17562 com.google.protobuf.RpcController controller,
17563 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17564 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17565
17566 /**
17567 * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17568 */
17569 public abstract void format(
17570 com.google.protobuf.RpcController controller,
17571 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17572 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17573
17574 /**
17575 * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17576 */
17577 public abstract void journal(
17578 com.google.protobuf.RpcController controller,
17579 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17580 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17581
17582 /**
17583 * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17584 */
17585 public abstract void heartbeat(
17586 com.google.protobuf.RpcController controller,
17587 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17588 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17589
17590 /**
17591 * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17592 */
17593 public abstract void startLogSegment(
17594 com.google.protobuf.RpcController controller,
17595 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17596 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17597
17598 /**
17599 * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17600 */
17601 public abstract void finalizeLogSegment(
17602 com.google.protobuf.RpcController controller,
17603 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17604 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17605
17606 /**
17607 * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17608 */
17609 public abstract void purgeLogs(
17610 com.google.protobuf.RpcController controller,
17611 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17612 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17613
17614 /**
17615 * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17616 */
17617 public abstract void getEditLogManifest(
17618 com.google.protobuf.RpcController controller,
17619 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17620 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17621
17622 /**
17623 * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17624 */
17625 public abstract void prepareRecovery(
17626 com.google.protobuf.RpcController controller,
17627 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17628 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17629
17630 /**
17631 * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17632 */
17633 public abstract void acceptRecovery(
17634 com.google.protobuf.RpcController controller,
17635 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17636 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17637
17638 public static final
17639 com.google.protobuf.Descriptors.ServiceDescriptor
17640 getDescriptor() {
17641 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
17642 }
17643 public final com.google.protobuf.Descriptors.ServiceDescriptor
17644 getDescriptorForType() {
17645 return getDescriptor();
17646 }
17647
17648 public final void callMethod(
17649 com.google.protobuf.Descriptors.MethodDescriptor method,
17650 com.google.protobuf.RpcController controller,
17651 com.google.protobuf.Message request,
17652 com.google.protobuf.RpcCallback<
17653 com.google.protobuf.Message> done) {
17654 if (method.getService() != getDescriptor()) {
17655 throw new java.lang.IllegalArgumentException(
17656 "Service.callMethod() given method descriptor for wrong " +
17657 "service type.");
17658 }
17659 switch(method.getIndex()) {
17660 case 0:
17661 this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
17662 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
17663 done));
17664 return;
17665 case 1:
17666 this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
17667 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
17668 done));
17669 return;
17670 case 2:
17671 this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
17672 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
17673 done));
17674 return;
17675 case 3:
17676 this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
17677 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
17678 done));
17679 return;
17680 case 4:
17681 this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
17682 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
17683 done));
17684 return;
17685 case 5:
17686 this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
17687 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
17688 done));
17689 return;
17690 case 6:
17691 this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
17692 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
17693 done));
17694 return;
17695 case 7:
17696 this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
17697 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
17698 done));
17699 return;
17700 case 8:
17701 this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
17702 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
17703 done));
17704 return;
17705 case 9:
17706 this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
17707 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
17708 done));
17709 return;
17710 case 10:
17711 this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
17712 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
17713 done));
17714 return;
17715 case 11:
17716 this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
17717 com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
17718 done));
17719 return;
17720 default:
17721 throw new java.lang.AssertionError("Can't get here.");
17722 }
17723 }
17724
17725 public final com.google.protobuf.Message
17726 getRequestPrototype(
17727 com.google.protobuf.Descriptors.MethodDescriptor method) {
17728 if (method.getService() != getDescriptor()) {
17729 throw new java.lang.IllegalArgumentException(
17730 "Service.getRequestPrototype() given method " +
17731 "descriptor for wrong service type.");
17732 }
17733 switch(method.getIndex()) {
17734 case 0:
17735 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17736 case 1:
17737 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17738 case 2:
17739 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17740 case 3:
17741 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17742 case 4:
17743 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17744 case 5:
17745 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17746 case 6:
17747 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17748 case 7:
17749 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17750 case 8:
17751 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17752 case 9:
17753 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17754 case 10:
17755 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17756 case 11:
17757 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17758 default:
17759 throw new java.lang.AssertionError("Can't get here.");
17760 }
17761 }
17762
17763 public final com.google.protobuf.Message
17764 getResponsePrototype(
17765 com.google.protobuf.Descriptors.MethodDescriptor method) {
17766 if (method.getService() != getDescriptor()) {
17767 throw new java.lang.IllegalArgumentException(
17768 "Service.getResponsePrototype() given method " +
17769 "descriptor for wrong service type.");
17770 }
17771 switch(method.getIndex()) {
17772 case 0:
17773 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17774 case 1:
17775 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17776 case 2:
17777 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17778 case 3:
17779 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17780 case 4:
17781 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17782 case 5:
17783 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17784 case 6:
17785 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17786 case 7:
17787 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17788 case 8:
17789 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17790 case 9:
17791 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17792 case 10:
17793 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17794 case 11:
17795 return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17796 default:
17797 throw new java.lang.AssertionError("Can't get here.");
17798 }
17799 }
17800
17801 public static Stub newStub(
17802 com.google.protobuf.RpcChannel channel) {
17803 return new Stub(channel);
17804 }
17805
17806 public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
17807 private Stub(com.google.protobuf.RpcChannel channel) {
17808 this.channel = channel;
17809 }
17810
17811 private final com.google.protobuf.RpcChannel channel;
17812
17813 public com.google.protobuf.RpcChannel getChannel() {
17814 return channel;
17815 }
17816
17817 public void isFormatted(
17818 com.google.protobuf.RpcController controller,
17819 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17820 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17821 channel.callMethod(
17822 getDescriptor().getMethods().get(0),
17823 controller,
17824 request,
17825 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
17826 com.google.protobuf.RpcUtil.generalizeCallback(
17827 done,
17828 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
17829 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
17830 }
17831
17832 public void getJournalState(
17833 com.google.protobuf.RpcController controller,
17834 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17835 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17836 channel.callMethod(
17837 getDescriptor().getMethods().get(1),
17838 controller,
17839 request,
17840 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
17841 com.google.protobuf.RpcUtil.generalizeCallback(
17842 done,
17843 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
17844 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
17845 }
17846
17847 public void newEpoch(
17848 com.google.protobuf.RpcController controller,
17849 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17850 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17851 channel.callMethod(
17852 getDescriptor().getMethods().get(2),
17853 controller,
17854 request,
17855 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
17856 com.google.protobuf.RpcUtil.generalizeCallback(
17857 done,
17858 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
17859 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
17860 }
17861
17862 public void format(
17863 com.google.protobuf.RpcController controller,
17864 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17865 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17866 channel.callMethod(
17867 getDescriptor().getMethods().get(3),
17868 controller,
17869 request,
17870 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
17871 com.google.protobuf.RpcUtil.generalizeCallback(
17872 done,
17873 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
17874 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
17875 }
17876
17877 public void journal(
17878 com.google.protobuf.RpcController controller,
17879 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17880 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17881 channel.callMethod(
17882 getDescriptor().getMethods().get(4),
17883 controller,
17884 request,
17885 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
17886 com.google.protobuf.RpcUtil.generalizeCallback(
17887 done,
17888 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
17889 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
17890 }
17891
17892 public void heartbeat(
17893 com.google.protobuf.RpcController controller,
17894 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17895 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17896 channel.callMethod(
17897 getDescriptor().getMethods().get(5),
17898 controller,
17899 request,
17900 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
17901 com.google.protobuf.RpcUtil.generalizeCallback(
17902 done,
17903 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
17904 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
17905 }
17906
17907 public void startLogSegment(
17908 com.google.protobuf.RpcController controller,
17909 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17910 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17911 channel.callMethod(
17912 getDescriptor().getMethods().get(6),
17913 controller,
17914 request,
17915 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
17916 com.google.protobuf.RpcUtil.generalizeCallback(
17917 done,
17918 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
17919 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
17920 }
17921
17922 public void finalizeLogSegment(
17923 com.google.protobuf.RpcController controller,
17924 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17925 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17926 channel.callMethod(
17927 getDescriptor().getMethods().get(7),
17928 controller,
17929 request,
17930 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
17931 com.google.protobuf.RpcUtil.generalizeCallback(
17932 done,
17933 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
17934 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
17935 }
17936
17937 public void purgeLogs(
17938 com.google.protobuf.RpcController controller,
17939 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17940 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17941 channel.callMethod(
17942 getDescriptor().getMethods().get(8),
17943 controller,
17944 request,
17945 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
17946 com.google.protobuf.RpcUtil.generalizeCallback(
17947 done,
17948 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
17949 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
17950 }
17951
17952 public void getEditLogManifest(
17953 com.google.protobuf.RpcController controller,
17954 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17955 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17956 channel.callMethod(
17957 getDescriptor().getMethods().get(9),
17958 controller,
17959 request,
17960 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
17961 com.google.protobuf.RpcUtil.generalizeCallback(
17962 done,
17963 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
17964 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
17965 }
17966
17967 public void prepareRecovery(
17968 com.google.protobuf.RpcController controller,
17969 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17970 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17971 channel.callMethod(
17972 getDescriptor().getMethods().get(10),
17973 controller,
17974 request,
17975 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
17976 com.google.protobuf.RpcUtil.generalizeCallback(
17977 done,
17978 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
17979 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
17980 }
17981
17982 public void acceptRecovery(
17983 com.google.protobuf.RpcController controller,
17984 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17985 com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17986 channel.callMethod(
17987 getDescriptor().getMethods().get(11),
17988 controller,
17989 request,
17990 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
17991 com.google.protobuf.RpcUtil.generalizeCallback(
17992 done,
17993 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
17994 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
17995 }
17996 }
17997
17998 public static BlockingInterface newBlockingStub(
17999 com.google.protobuf.BlockingRpcChannel channel) {
18000 return new BlockingStub(channel);
18001 }
18002
18003 public interface BlockingInterface {
18004 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
18005 com.google.protobuf.RpcController controller,
18006 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
18007 throws com.google.protobuf.ServiceException;
18008
18009 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
18010 com.google.protobuf.RpcController controller,
18011 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
18012 throws com.google.protobuf.ServiceException;
18013
18014 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18015 com.google.protobuf.RpcController controller,
18016 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18017 throws com.google.protobuf.ServiceException;
18018
18019 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18020 com.google.protobuf.RpcController controller,
18021 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18022 throws com.google.protobuf.ServiceException;
18023
18024 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18025 com.google.protobuf.RpcController controller,
18026 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18027 throws com.google.protobuf.ServiceException;
18028
18029 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18030 com.google.protobuf.RpcController controller,
18031 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18032 throws com.google.protobuf.ServiceException;
18033
18034 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18035 com.google.protobuf.RpcController controller,
18036 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18037 throws com.google.protobuf.ServiceException;
18038
18039 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18040 com.google.protobuf.RpcController controller,
18041 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18042 throws com.google.protobuf.ServiceException;
18043
18044 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18045 com.google.protobuf.RpcController controller,
18046 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18047 throws com.google.protobuf.ServiceException;
18048
18049 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18050 com.google.protobuf.RpcController controller,
18051 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18052 throws com.google.protobuf.ServiceException;
18053
18054 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18055 com.google.protobuf.RpcController controller,
18056 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18057 throws com.google.protobuf.ServiceException;
18058
18059 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18060 com.google.protobuf.RpcController controller,
18061 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18062 throws com.google.protobuf.ServiceException;
18063 }
18064
18065 private static final class BlockingStub implements BlockingInterface {
18066 private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
18067 this.channel = channel;
18068 }
18069
18070 private final com.google.protobuf.BlockingRpcChannel channel;
18071
18072 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
18073 com.google.protobuf.RpcController controller,
18074 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
18075 throws com.google.protobuf.ServiceException {
18076 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
18077 getDescriptor().getMethods().get(0),
18078 controller,
18079 request,
18080 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
18081 }
18082
18083
18084 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
18085 com.google.protobuf.RpcController controller,
18086 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
18087 throws com.google.protobuf.ServiceException {
18088 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
18089 getDescriptor().getMethods().get(1),
18090 controller,
18091 request,
18092 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
18093 }
18094
18095
18096 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18097 com.google.protobuf.RpcController controller,
18098 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18099 throws com.google.protobuf.ServiceException {
18100 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
18101 getDescriptor().getMethods().get(2),
18102 controller,
18103 request,
18104 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
18105 }
18106
18107
18108 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18109 com.google.protobuf.RpcController controller,
18110 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18111 throws com.google.protobuf.ServiceException {
18112 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
18113 getDescriptor().getMethods().get(3),
18114 controller,
18115 request,
18116 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
18117 }
18118
18119
18120 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18121 com.google.protobuf.RpcController controller,
18122 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18123 throws com.google.protobuf.ServiceException {
18124 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
18125 getDescriptor().getMethods().get(4),
18126 controller,
18127 request,
18128 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
18129 }
18130
18131
18132 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18133 com.google.protobuf.RpcController controller,
18134 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18135 throws com.google.protobuf.ServiceException {
18136 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
18137 getDescriptor().getMethods().get(5),
18138 controller,
18139 request,
18140 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
18141 }
18142
18143
18144 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18145 com.google.protobuf.RpcController controller,
18146 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18147 throws com.google.protobuf.ServiceException {
18148 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
18149 getDescriptor().getMethods().get(6),
18150 controller,
18151 request,
18152 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
18153 }
18154
18155
18156 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18157 com.google.protobuf.RpcController controller,
18158 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18159 throws com.google.protobuf.ServiceException {
18160 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
18161 getDescriptor().getMethods().get(7),
18162 controller,
18163 request,
18164 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
18165 }
18166
18167
18168 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18169 com.google.protobuf.RpcController controller,
18170 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18171 throws com.google.protobuf.ServiceException {
18172 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
18173 getDescriptor().getMethods().get(8),
18174 controller,
18175 request,
18176 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
18177 }
18178
18179
18180 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18181 com.google.protobuf.RpcController controller,
18182 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18183 throws com.google.protobuf.ServiceException {
18184 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
18185 getDescriptor().getMethods().get(9),
18186 controller,
18187 request,
18188 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
18189 }
18190
18191
18192 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18193 com.google.protobuf.RpcController controller,
18194 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18195 throws com.google.protobuf.ServiceException {
18196 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
18197 getDescriptor().getMethods().get(10),
18198 controller,
18199 request,
18200 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
18201 }
18202
18203
18204 public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18205 com.google.protobuf.RpcController controller,
18206 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18207 throws com.google.protobuf.ServiceException {
18208 return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
18209 getDescriptor().getMethods().get(11),
18210 controller,
18211 request,
18212 org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
18213 }
18214
18215 }
18216
18217 // @@protoc_insertion_point(class_scope:hadoop.hdfs.QJournalProtocolService)
18218 }
18219
18220 private static com.google.protobuf.Descriptors.Descriptor
18221 internal_static_hadoop_hdfs_JournalIdProto_descriptor;
18222 private static
18223 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18224 internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
18225 private static com.google.protobuf.Descriptors.Descriptor
18226 internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
18227 private static
18228 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18229 internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
18230 private static com.google.protobuf.Descriptors.Descriptor
18231 internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
18232 private static
18233 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18234 internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
18235 private static com.google.protobuf.Descriptors.Descriptor
18236 internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
18237 private static
18238 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18239 internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
18240 private static com.google.protobuf.Descriptors.Descriptor
18241 internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
18242 private static
18243 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18244 internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
18245 private static com.google.protobuf.Descriptors.Descriptor
18246 internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
18247 private static
18248 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18249 internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
18250 private static com.google.protobuf.Descriptors.Descriptor
18251 internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
18252 private static
18253 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18254 internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
18255 private static com.google.protobuf.Descriptors.Descriptor
18256 internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
18257 private static
18258 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18259 internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
18260 private static com.google.protobuf.Descriptors.Descriptor
18261 internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
18262 private static
18263 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18264 internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
18265 private static com.google.protobuf.Descriptors.Descriptor
18266 internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
18267 private static
18268 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18269 internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
18270 private static com.google.protobuf.Descriptors.Descriptor
18271 internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
18272 private static
18273 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18274 internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
18275 private static com.google.protobuf.Descriptors.Descriptor
18276 internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
18277 private static
18278 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18279 internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
18280 private static com.google.protobuf.Descriptors.Descriptor
18281 internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
18282 private static
18283 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18284 internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
18285 private static com.google.protobuf.Descriptors.Descriptor
18286 internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
18287 private static
18288 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18289 internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
18290 private static com.google.protobuf.Descriptors.Descriptor
18291 internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
18292 private static
18293 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18294 internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
18295 private static com.google.protobuf.Descriptors.Descriptor
18296 internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
18297 private static
18298 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18299 internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
18300 private static com.google.protobuf.Descriptors.Descriptor
18301 internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
18302 private static
18303 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18304 internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
18305 private static com.google.protobuf.Descriptors.Descriptor
18306 internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
18307 private static
18308 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18309 internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
18310 private static com.google.protobuf.Descriptors.Descriptor
18311 internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
18312 private static
18313 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18314 internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
18315 private static com.google.protobuf.Descriptors.Descriptor
18316 internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
18317 private static
18318 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18319 internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
18320 private static com.google.protobuf.Descriptors.Descriptor
18321 internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
18322 private static
18323 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18324 internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
18325 private static com.google.protobuf.Descriptors.Descriptor
18326 internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
18327 private static
18328 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18329 internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
18330 private static com.google.protobuf.Descriptors.Descriptor
18331 internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
18332 private static
18333 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18334 internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
18335 private static com.google.protobuf.Descriptors.Descriptor
18336 internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
18337 private static
18338 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18339 internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
18340 private static com.google.protobuf.Descriptors.Descriptor
18341 internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
18342 private static
18343 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18344 internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
18345 private static com.google.protobuf.Descriptors.Descriptor
18346 internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
18347 private static
18348 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18349 internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
18350 private static com.google.protobuf.Descriptors.Descriptor
18351 internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
18352 private static
18353 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18354 internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
18355 private static com.google.protobuf.Descriptors.Descriptor
18356 internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
18357 private static
18358 com.google.protobuf.GeneratedMessage.FieldAccessorTable
18359 internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
18360
18361 public static com.google.protobuf.Descriptors.FileDescriptor
18362 getDescriptor() {
18363 return descriptor;
18364 }
18365 private static com.google.protobuf.Descriptors.FileDescriptor
18366 descriptor;
18367 static {
18368 java.lang.String[] descriptorData = {
18369 "\n\026QJournalProtocol.proto\022\013hadoop.hdfs\032\nh" +
18370 "dfs.proto\"$\n\016JournalIdProto\022\022\n\nidentifie" +
18371 "r\030\001 \002(\t\"\201\001\n\020RequestInfoProto\022.\n\tjournalI" +
18372 "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\r\n\005" +
18373 "epoch\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\r" +
18374 "committedTxId\030\004 \001(\004\"M\n\021SegmentStateProto" +
18375 "\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014" +
18376 "isInProgress\030\003 \002(\010\"k\n\032PersistedRecoveryP" +
18377 "axosData\0224\n\014segmentState\030\001 \002(\0132\036.hadoop." +
18378 "hdfs.SegmentStateProto\022\027\n\017acceptedInEpoc",
18379 "h\030\002 \002(\004\"\221\001\n\023JournalRequestProto\022.\n\007reqIn" +
18380 "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
18381 "\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007" +
18382 "records\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024J" +
18383 "ournalResponseProto\"G\n\025HeartbeatRequestP" +
18384 "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ" +
18385 "estInfoProto\"\030\n\026HeartbeatResponseProto\"[" +
18386 "\n\033StartLogSegmentRequestProto\022.\n\007reqInfo" +
18387 "\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022\014\n" +
18388 "\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResponsePr",
18389 "oto\"t\n\036FinalizeLogSegmentRequestProto\022.\n" +
18390 "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo" +
18391 "Proto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(" +
18392 "\004\"!\n\037FinalizeLogSegmentResponseProto\"^\n\025" +
18393 "PurgeLogsRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
18394 ".hadoop.hdfs.RequestInfoProto\022\025\n\rminTxId" +
18395 "ToKeep\030\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"C" +
18396 "\n\027IsFormattedRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
18397 ".hadoop.hdfs.JournalIdProto\"/\n\030IsFormatt" +
18398 "edResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"G\n\033",
18399 "GetJournalStateRequestProto\022(\n\003jid\030\001 \002(\013" +
18400 "2\033.hadoop.hdfs.JournalIdProto\"K\n\034GetJour" +
18401 "nalStateResponseProto\022\031\n\021lastPromisedEpo" +
18402 "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\"o\n\022FormatRequ" +
18403 "estProto\022(\n\003jid\030\001 \002(\0132\033.hadoop.hdfs.Jour" +
18404 "nalIdProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs" +
18405 ".NamespaceInfoProto\"\025\n\023FormatResponsePro" +
18406 "to\"\200\001\n\024NewEpochRequestProto\022(\n\003jid\030\001 \002(\013" +
18407 "2\033.hadoop.hdfs.JournalIdProto\022/\n\006nsInfo\030" +
18408 "\002 \002(\0132\037.hadoop.hdfs.NamespaceInfoProto\022\r",
18409 "\n\005epoch\030\003 \002(\004\"0\n\025NewEpochResponseProto\022\027" +
18410 "\n\017lastSegmentTxId\030\001 \001(\004\"\224\001\n\036GetEditLogMa" +
18411 "nifestRequestProto\022(\n\003jid\030\001 \002(\0132\033.hadoop" +
18412 ".hdfs.JournalIdProto\022\021\n\tsinceTxId\030\002 \002(\004\022" +
18413 "\030\n\nforReading\030\003 \001(\010:\004true\022\033\n\014inProgressO" +
18414 "k\030\004 \001(\010:\005false\"n\n\037GetEditLogManifestResp" +
18415 "onseProto\0229\n\010manifest\030\001 \002(\0132\'.hadoop.hdf" +
18416 "s.RemoteEditLogManifestProto\022\020\n\010httpPort" +
18417 "\030\002 \002(\r\"b\n\033PrepareRecoveryRequestProto\022.\n" +
18418 "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo",
18419 "Proto\022\023\n\013segmentTxId\030\002 \002(\004\"\241\001\n\034PrepareRe" +
18420 "coveryResponseProto\0224\n\014segmentState\030\001 \001(" +
18421 "\0132\036.hadoop.hdfs.SegmentStateProto\022\027\n\017acc" +
18422 "eptedInEpoch\030\002 \001(\004\022\027\n\017lastWriterEpoch\030\003 " +
18423 "\002(\004\022\031\n\021lastCommittedTxId\030\004 \001(\004\"\224\001\n\032Accep" +
18424 "tRecoveryRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
18425 ".hadoop.hdfs.RequestInfoProto\0225\n\rstateTo" +
18426 "Accept\030\002 \002(\0132\036.hadoop.hdfs.SegmentStateP" +
18427 "roto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033AcceptRecoveryR" +
18428 "esponseProto2\220\t\n\027QJournalProtocolService",
18429 "\022Z\n\013isFormatted\022$.hadoop.hdfs.IsFormatte" +
18430 "dRequestProto\032%.hadoop.hdfs.IsFormattedR" +
18431 "esponseProto\022f\n\017getJournalState\022(.hadoop" +
18432 ".hdfs.GetJournalStateRequestProto\032).hado" +
18433 "op.hdfs.GetJournalStateResponseProto\022Q\n\010" +
18434 "newEpoch\022!.hadoop.hdfs.NewEpochRequestPr" +
18435 "oto\032\".hadoop.hdfs.NewEpochResponseProto\022" +
18436 "K\n\006format\022\037.hadoop.hdfs.FormatRequestPro" +
18437 "to\032 .hadoop.hdfs.FormatResponseProto\022N\n\007" +
18438 "journal\022 .hadoop.hdfs.JournalRequestProt",
18439 "o\032!.hadoop.hdfs.JournalResponseProto\022T\n\t" +
18440 "heartbeat\022\".hadoop.hdfs.HeartbeatRequest" +
18441 "Proto\032#.hadoop.hdfs.HeartbeatResponsePro" +
18442 "to\022f\n\017startLogSegment\022(.hadoop.hdfs.Star" +
18443 "tLogSegmentRequestProto\032).hadoop.hdfs.St" +
18444 "artLogSegmentResponseProto\022o\n\022finalizeLo" +
18445 "gSegment\022+.hadoop.hdfs.FinalizeLogSegmen" +
18446 "tRequestProto\032,.hadoop.hdfs.FinalizeLogS" +
18447 "egmentResponseProto\022T\n\tpurgeLogs\022\".hadoo" +
18448 "p.hdfs.PurgeLogsRequestProto\032#.hadoop.hd",
18449 "fs.PurgeLogsResponseProto\022o\n\022getEditLogM" +
18450 "anifest\022+.hadoop.hdfs.GetEditLogManifest" +
18451 "RequestProto\032,.hadoop.hdfs.GetEditLogMan" +
18452 "ifestResponseProto\022f\n\017prepareRecovery\022(." +
18453 "hadoop.hdfs.PrepareRecoveryRequestProto\032" +
18454 ").hadoop.hdfs.PrepareRecoveryResponsePro" +
18455 "to\022c\n\016acceptRecovery\022\'.hadoop.hdfs.Accep" +
18456 "tRecoveryRequestProto\032(.hadoop.hdfs.Acce" +
18457 "ptRecoveryResponseProtoBH\n(org.apache.ha" +
18458 "doop.hdfs.qjournal.protocolB\026QJournalPro",
18459 "tocolProtos\210\001\001\240\001\001"
18460 };
18461 com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
18462 new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
18463 public com.google.protobuf.ExtensionRegistry assignDescriptors(
18464 com.google.protobuf.Descriptors.FileDescriptor root) {
18465 descriptor = root;
18466 internal_static_hadoop_hdfs_JournalIdProto_descriptor =
18467 getDescriptor().getMessageTypes().get(0);
18468 internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable = new
18469 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18470 internal_static_hadoop_hdfs_JournalIdProto_descriptor,
18471 new java.lang.String[] { "Identifier", });
18472 internal_static_hadoop_hdfs_RequestInfoProto_descriptor =
18473 getDescriptor().getMessageTypes().get(1);
18474 internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable = new
18475 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18476 internal_static_hadoop_hdfs_RequestInfoProto_descriptor,
18477 new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", });
18478 internal_static_hadoop_hdfs_SegmentStateProto_descriptor =
18479 getDescriptor().getMessageTypes().get(2);
18480 internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable = new
18481 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18482 internal_static_hadoop_hdfs_SegmentStateProto_descriptor,
18483 new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
18484 internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor =
18485 getDescriptor().getMessageTypes().get(3);
18486 internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable = new
18487 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18488 internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor,
18489 new java.lang.String[] { "SegmentState", "AcceptedInEpoch", });
18490 internal_static_hadoop_hdfs_JournalRequestProto_descriptor =
18491 getDescriptor().getMessageTypes().get(4);
18492 internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable = new
18493 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18494 internal_static_hadoop_hdfs_JournalRequestProto_descriptor,
18495 new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", });
18496 internal_static_hadoop_hdfs_JournalResponseProto_descriptor =
18497 getDescriptor().getMessageTypes().get(5);
18498 internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable = new
18499 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18500 internal_static_hadoop_hdfs_JournalResponseProto_descriptor,
18501 new java.lang.String[] { });
18502 internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor =
18503 getDescriptor().getMessageTypes().get(6);
18504 internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable = new
18505 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18506 internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor,
18507 new java.lang.String[] { "ReqInfo", });
18508 internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor =
18509 getDescriptor().getMessageTypes().get(7);
18510 internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable = new
18511 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18512 internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor,
18513 new java.lang.String[] { });
18514 internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor =
18515 getDescriptor().getMessageTypes().get(8);
18516 internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable = new
18517 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18518 internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor,
18519 new java.lang.String[] { "ReqInfo", "Txid", });
18520 internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor =
18521 getDescriptor().getMessageTypes().get(9);
18522 internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable = new
18523 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18524 internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor,
18525 new java.lang.String[] { });
18526 internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor =
18527 getDescriptor().getMessageTypes().get(10);
18528 internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
18529 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18530 internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor,
18531 new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", });
18532 internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor =
18533 getDescriptor().getMessageTypes().get(11);
18534 internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
18535 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18536 internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor,
18537 new java.lang.String[] { });
18538 internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor =
18539 getDescriptor().getMessageTypes().get(12);
18540 internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable = new
18541 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18542 internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor,
18543 new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", });
18544 internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor =
18545 getDescriptor().getMessageTypes().get(13);
18546 internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable = new
18547 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18548 internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor,
18549 new java.lang.String[] { });
18550 internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor =
18551 getDescriptor().getMessageTypes().get(14);
18552 internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable = new
18553 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18554 internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor,
18555 new java.lang.String[] { "Jid", });
18556 internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor =
18557 getDescriptor().getMessageTypes().get(15);
18558 internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable = new
18559 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18560 internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor,
18561 new java.lang.String[] { "IsFormatted", });
18562 internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor =
18563 getDescriptor().getMessageTypes().get(16);
18564 internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable = new
18565 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18566 internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor,
18567 new java.lang.String[] { "Jid", });
18568 internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor =
18569 getDescriptor().getMessageTypes().get(17);
18570 internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable = new
18571 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18572 internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor,
18573 new java.lang.String[] { "LastPromisedEpoch", "HttpPort", });
18574 internal_static_hadoop_hdfs_FormatRequestProto_descriptor =
18575 getDescriptor().getMessageTypes().get(18);
18576 internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable = new
18577 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18578 internal_static_hadoop_hdfs_FormatRequestProto_descriptor,
18579 new java.lang.String[] { "Jid", "NsInfo", });
18580 internal_static_hadoop_hdfs_FormatResponseProto_descriptor =
18581 getDescriptor().getMessageTypes().get(19);
18582 internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable = new
18583 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18584 internal_static_hadoop_hdfs_FormatResponseProto_descriptor,
18585 new java.lang.String[] { });
18586 internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor =
18587 getDescriptor().getMessageTypes().get(20);
18588 internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable = new
18589 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18590 internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor,
18591 new java.lang.String[] { "Jid", "NsInfo", "Epoch", });
18592 internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor =
18593 getDescriptor().getMessageTypes().get(21);
18594 internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable = new
18595 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18596 internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor,
18597 new java.lang.String[] { "LastSegmentTxId", });
18598 internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor =
18599 getDescriptor().getMessageTypes().get(22);
18600 internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable = new
18601 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18602 internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor,
18603 new java.lang.String[] { "Jid", "SinceTxId", "ForReading", "InProgressOk", });
18604 internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor =
18605 getDescriptor().getMessageTypes().get(23);
18606 internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable = new
18607 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18608 internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor,
18609 new java.lang.String[] { "Manifest", "HttpPort", });
18610 internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor =
18611 getDescriptor().getMessageTypes().get(24);
18612 internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable = new
18613 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18614 internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor,
18615 new java.lang.String[] { "ReqInfo", "SegmentTxId", });
18616 internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor =
18617 getDescriptor().getMessageTypes().get(25);
18618 internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable = new
18619 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18620 internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor,
18621 new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", });
18622 internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor =
18623 getDescriptor().getMessageTypes().get(26);
18624 internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable = new
18625 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18626 internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor,
18627 new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", });
18628 internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor =
18629 getDescriptor().getMessageTypes().get(27);
18630 internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable = new
18631 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18632 internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor,
18633 new java.lang.String[] { });
18634 return null;
18635 }
18636 };
18637 com.google.protobuf.Descriptors.FileDescriptor
18638 .internalBuildGeneratedFileFrom(descriptorData,
18639 new com.google.protobuf.Descriptors.FileDescriptor[] {
18640 org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
18641 }, assigner);
18642 }
18643
18644 // @@protoc_insertion_point(outer_class_scope)
18645 }