View Javadoc

1   // Generated by the protocol buffer compiler.  DO NOT EDIT!
2   // source: ZooKeeper.proto
3   
4   package org.apache.hadoop.hbase.protobuf.generated;
5   
6   public final class ZooKeeperProtos {
7     private ZooKeeperProtos() {}
8     public static void registerAllExtensions(
9         com.google.protobuf.ExtensionRegistry registry) {
10    }
11    public interface MetaRegionServerOrBuilder
12        extends com.google.protobuf.MessageOrBuilder {
13  
14      // required .hbase.pb.ServerName server = 1;
15      /**
16       * <code>required .hbase.pb.ServerName server = 1;</code>
17       *
18       * <pre>
19       * The ServerName hosting the meta region currently, or destination server,
20       * if meta region is in transition.
21       * </pre>
22       */
23      boolean hasServer();
24      /**
25       * <code>required .hbase.pb.ServerName server = 1;</code>
26       *
27       * <pre>
28       * The ServerName hosting the meta region currently, or destination server,
29       * if meta region is in transition.
30       * </pre>
31       */
32      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
33      /**
34       * <code>required .hbase.pb.ServerName server = 1;</code>
35       *
36       * <pre>
37       * The ServerName hosting the meta region currently, or destination server,
38       * if meta region is in transition.
39       * </pre>
40       */
41      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
42  
43      // optional uint32 rpc_version = 2;
44      /**
45       * <code>optional uint32 rpc_version = 2;</code>
46       *
47       * <pre>
48       * The major version of the rpc the server speaks.  This is used so that
49       * clients connecting to the cluster can have prior knowledge of what version
50       * to send to a RegionServer.  AsyncHBase will use this to detect versions.
51       * </pre>
52       */
53      boolean hasRpcVersion();
54      /**
55       * <code>optional uint32 rpc_version = 2;</code>
56       *
57       * <pre>
58       * The major version of the rpc the server speaks.  This is used so that
59       * clients connecting to the cluster can have prior knowledge of what version
60       * to send to a RegionServer.  AsyncHBase will use this to detect versions.
61       * </pre>
62       */
63      int getRpcVersion();
64  
65      // optional .hbase.pb.RegionState.State state = 3;
66      /**
67       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
68       *
69       * <pre>
70       * State of the region transition. OPEN means fully operational 'hbase:meta'
71       * </pre>
72       */
73      boolean hasState();
74      /**
75       * <code>optional .hbase.pb.RegionState.State state = 3;</code>
76       *
77       * <pre>
78       * State of the region transition. OPEN means fully operational 'hbase:meta'
79       * </pre>
80       */
81      org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState();
82    }
83    /**
84     * Protobuf type {@code hbase.pb.MetaRegionServer}
85     *
86     * <pre>
87     **
88     * Content of the meta-region-server znode.
89     * </pre>
90     */
91    public static final class MetaRegionServer extends
92        com.google.protobuf.GeneratedMessage
93        implements MetaRegionServerOrBuilder {
94      // Use MetaRegionServer.newBuilder() to construct.
95      private MetaRegionServer(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
96        super(builder);
97        this.unknownFields = builder.getUnknownFields();
98      }
99      private MetaRegionServer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
100 
101     private static final MetaRegionServer defaultInstance;
102     public static MetaRegionServer getDefaultInstance() {
103       return defaultInstance;
104     }
105 
106     public MetaRegionServer getDefaultInstanceForType() {
107       return defaultInstance;
108     }
109 
110     private final com.google.protobuf.UnknownFieldSet unknownFields;
111     @java.lang.Override
112     public final com.google.protobuf.UnknownFieldSet
113         getUnknownFields() {
114       return this.unknownFields;
115     }
116     private MetaRegionServer(
117         com.google.protobuf.CodedInputStream input,
118         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
119         throws com.google.protobuf.InvalidProtocolBufferException {
120       initFields();
121       int mutable_bitField0_ = 0;
122       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
123           com.google.protobuf.UnknownFieldSet.newBuilder();
124       try {
125         boolean done = false;
126         while (!done) {
127           int tag = input.readTag();
128           switch (tag) {
129             case 0:
130               done = true;
131               break;
132             default: {
133               if (!parseUnknownField(input, unknownFields,
134                                      extensionRegistry, tag)) {
135                 done = true;
136               }
137               break;
138             }
139             case 10: {
140               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
141               if (((bitField0_ & 0x00000001) == 0x00000001)) {
142                 subBuilder = server_.toBuilder();
143               }
144               server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
145               if (subBuilder != null) {
146                 subBuilder.mergeFrom(server_);
147                 server_ = subBuilder.buildPartial();
148               }
149               bitField0_ |= 0x00000001;
150               break;
151             }
152             case 16: {
153               bitField0_ |= 0x00000002;
154               rpcVersion_ = input.readUInt32();
155               break;
156             }
157             case 24: {
158               int rawValue = input.readEnum();
159               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue);
160               if (value == null) {
161                 unknownFields.mergeVarintField(3, rawValue);
162               } else {
163                 bitField0_ |= 0x00000004;
164                 state_ = value;
165               }
166               break;
167             }
168           }
169         }
170       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
171         throw e.setUnfinishedMessage(this);
172       } catch (java.io.IOException e) {
173         throw new com.google.protobuf.InvalidProtocolBufferException(
174             e.getMessage()).setUnfinishedMessage(this);
175       } finally {
176         this.unknownFields = unknownFields.build();
177         makeExtensionsImmutable();
178       }
179     }
180     public static final com.google.protobuf.Descriptors.Descriptor
181         getDescriptor() {
182       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
183     }
184 
185     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
186         internalGetFieldAccessorTable() {
187       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
188           .ensureFieldAccessorsInitialized(
189               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
190     }
191 
192     public static com.google.protobuf.Parser<MetaRegionServer> PARSER =
193         new com.google.protobuf.AbstractParser<MetaRegionServer>() {
194       public MetaRegionServer parsePartialFrom(
195           com.google.protobuf.CodedInputStream input,
196           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
197           throws com.google.protobuf.InvalidProtocolBufferException {
198         return new MetaRegionServer(input, extensionRegistry);
199       }
200     };
201 
202     @java.lang.Override
203     public com.google.protobuf.Parser<MetaRegionServer> getParserForType() {
204       return PARSER;
205     }
206 
207     private int bitField0_;
208     // required .hbase.pb.ServerName server = 1;
209     public static final int SERVER_FIELD_NUMBER = 1;
210     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
211     /**
212      * <code>required .hbase.pb.ServerName server = 1;</code>
213      *
214      * <pre>
215      * The ServerName hosting the meta region currently, or destination server,
216      * if meta region is in transition.
217      * </pre>
218      */
219     public boolean hasServer() {
220       return ((bitField0_ & 0x00000001) == 0x00000001);
221     }
222     /**
223      * <code>required .hbase.pb.ServerName server = 1;</code>
224      *
225      * <pre>
226      * The ServerName hosting the meta region currently, or destination server,
227      * if meta region is in transition.
228      * </pre>
229      */
230     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
231       return server_;
232     }
233     /**
234      * <code>required .hbase.pb.ServerName server = 1;</code>
235      *
236      * <pre>
237      * The ServerName hosting the meta region currently, or destination server,
238      * if meta region is in transition.
239      * </pre>
240      */
241     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
242       return server_;
243     }
244 
245     // optional uint32 rpc_version = 2;
246     public static final int RPC_VERSION_FIELD_NUMBER = 2;
247     private int rpcVersion_;
248     /**
249      * <code>optional uint32 rpc_version = 2;</code>
250      *
251      * <pre>
252      * The major version of the rpc the server speaks.  This is used so that
253      * clients connecting to the cluster can have prior knowledge of what version
254      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
255      * </pre>
256      */
257     public boolean hasRpcVersion() {
258       return ((bitField0_ & 0x00000002) == 0x00000002);
259     }
260     /**
261      * <code>optional uint32 rpc_version = 2;</code>
262      *
263      * <pre>
264      * The major version of the rpc the server speaks.  This is used so that
265      * clients connecting to the cluster can have prior knowledge of what version
266      * to send to a RegionServer.  AsyncHBase will use this to detect versions.
267      * </pre>
268      */
269     public int getRpcVersion() {
270       return rpcVersion_;
271     }
272 
273     // optional .hbase.pb.RegionState.State state = 3;
274     public static final int STATE_FIELD_NUMBER = 3;
275     private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_;
276     /**
277      * <code>optional .hbase.pb.RegionState.State state = 3;</code>
278      *
279      * <pre>
280      * State of the region transition. OPEN means fully operational 'hbase:meta'
281      * </pre>
282      */
283     public boolean hasState() {
284       return ((bitField0_ & 0x00000004) == 0x00000004);
285     }
286     /**
287      * <code>optional .hbase.pb.RegionState.State state = 3;</code>
288      *
289      * <pre>
290      * State of the region transition. OPEN means fully operational 'hbase:meta'
291      * </pre>
292      */
293     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
294       return state_;
295     }
296 
297     private void initFields() {
298       server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
299       rpcVersion_ = 0;
300       state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
301     }
302     private byte memoizedIsInitialized = -1;
303     public final boolean isInitialized() {
304       byte isInitialized = memoizedIsInitialized;
305       if (isInitialized != -1) return isInitialized == 1;
306 
307       if (!hasServer()) {
308         memoizedIsInitialized = 0;
309         return false;
310       }
311       if (!getServer().isInitialized()) {
312         memoizedIsInitialized = 0;
313         return false;
314       }
315       memoizedIsInitialized = 1;
316       return true;
317     }
318 
319     public void writeTo(com.google.protobuf.CodedOutputStream output)
320                         throws java.io.IOException {
321       getSerializedSize();
322       if (((bitField0_ & 0x00000001) == 0x00000001)) {
323         output.writeMessage(1, server_);
324       }
325       if (((bitField0_ & 0x00000002) == 0x00000002)) {
326         output.writeUInt32(2, rpcVersion_);
327       }
328       if (((bitField0_ & 0x00000004) == 0x00000004)) {
329         output.writeEnum(3, state_.getNumber());
330       }
331       getUnknownFields().writeTo(output);
332     }
333 
334     private int memoizedSerializedSize = -1;
335     public int getSerializedSize() {
336       int size = memoizedSerializedSize;
337       if (size != -1) return size;
338 
339       size = 0;
340       if (((bitField0_ & 0x00000001) == 0x00000001)) {
341         size += com.google.protobuf.CodedOutputStream
342           .computeMessageSize(1, server_);
343       }
344       if (((bitField0_ & 0x00000002) == 0x00000002)) {
345         size += com.google.protobuf.CodedOutputStream
346           .computeUInt32Size(2, rpcVersion_);
347       }
348       if (((bitField0_ & 0x00000004) == 0x00000004)) {
349         size += com.google.protobuf.CodedOutputStream
350           .computeEnumSize(3, state_.getNumber());
351       }
352       size += getUnknownFields().getSerializedSize();
353       memoizedSerializedSize = size;
354       return size;
355     }
356 
357     private static final long serialVersionUID = 0L;
358     @java.lang.Override
359     protected java.lang.Object writeReplace()
360         throws java.io.ObjectStreamException {
361       return super.writeReplace();
362     }
363 
364     @java.lang.Override
365     public boolean equals(final java.lang.Object obj) {
366       if (obj == this) {
367        return true;
368       }
369       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)) {
370         return super.equals(obj);
371       }
372       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
373 
374       boolean result = true;
375       result = result && (hasServer() == other.hasServer());
376       if (hasServer()) {
377         result = result && getServer()
378             .equals(other.getServer());
379       }
380       result = result && (hasRpcVersion() == other.hasRpcVersion());
381       if (hasRpcVersion()) {
382         result = result && (getRpcVersion()
383             == other.getRpcVersion());
384       }
385       result = result && (hasState() == other.hasState());
386       if (hasState()) {
387         result = result &&
388             (getState() == other.getState());
389       }
390       result = result &&
391           getUnknownFields().equals(other.getUnknownFields());
392       return result;
393     }
394 
395     private int memoizedHashCode = 0;
396     @java.lang.Override
397     public int hashCode() {
398       if (memoizedHashCode != 0) {
399         return memoizedHashCode;
400       }
401       int hash = 41;
402       hash = (19 * hash) + getDescriptorForType().hashCode();
403       if (hasServer()) {
404         hash = (37 * hash) + SERVER_FIELD_NUMBER;
405         hash = (53 * hash) + getServer().hashCode();
406       }
407       if (hasRpcVersion()) {
408         hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
409         hash = (53 * hash) + getRpcVersion();
410       }
411       if (hasState()) {
412         hash = (37 * hash) + STATE_FIELD_NUMBER;
413         hash = (53 * hash) + hashEnum(getState());
414       }
415       hash = (29 * hash) + getUnknownFields().hashCode();
416       memoizedHashCode = hash;
417       return hash;
418     }
419 
420     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
421         com.google.protobuf.ByteString data)
422         throws com.google.protobuf.InvalidProtocolBufferException {
423       return PARSER.parseFrom(data);
424     }
425     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
426         com.google.protobuf.ByteString data,
427         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
428         throws com.google.protobuf.InvalidProtocolBufferException {
429       return PARSER.parseFrom(data, extensionRegistry);
430     }
431     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
432         throws com.google.protobuf.InvalidProtocolBufferException {
433       return PARSER.parseFrom(data);
434     }
435     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
436         byte[] data,
437         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
438         throws com.google.protobuf.InvalidProtocolBufferException {
439       return PARSER.parseFrom(data, extensionRegistry);
440     }
441     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
442         throws java.io.IOException {
443       return PARSER.parseFrom(input);
444     }
445     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
446         java.io.InputStream input,
447         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
448         throws java.io.IOException {
449       return PARSER.parseFrom(input, extensionRegistry);
450     }
451     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
452         throws java.io.IOException {
453       return PARSER.parseDelimitedFrom(input);
454     }
455     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
456         java.io.InputStream input,
457         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
458         throws java.io.IOException {
459       return PARSER.parseDelimitedFrom(input, extensionRegistry);
460     }
461     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
462         com.google.protobuf.CodedInputStream input)
463         throws java.io.IOException {
464       return PARSER.parseFrom(input);
465     }
466     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
467         com.google.protobuf.CodedInputStream input,
468         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
469         throws java.io.IOException {
470       return PARSER.parseFrom(input, extensionRegistry);
471     }
472 
473     public static Builder newBuilder() { return Builder.create(); }
474     public Builder newBuilderForType() { return newBuilder(); }
475     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
476       return newBuilder().mergeFrom(prototype);
477     }
478     public Builder toBuilder() { return newBuilder(this); }
479 
480     @java.lang.Override
481     protected Builder newBuilderForType(
482         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
483       Builder builder = new Builder(parent);
484       return builder;
485     }
486     /**
487      * Protobuf type {@code hbase.pb.MetaRegionServer}
488      *
489      * <pre>
490      **
491      * Content of the meta-region-server znode.
492      * </pre>
493      */
494     public static final class Builder extends
495         com.google.protobuf.GeneratedMessage.Builder<Builder>
496        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
497       public static final com.google.protobuf.Descriptors.Descriptor
498           getDescriptor() {
499         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
500       }
501 
502       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
503           internalGetFieldAccessorTable() {
504         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
505             .ensureFieldAccessorsInitialized(
506                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
507       }
508 
509       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
510       private Builder() {
511         maybeForceBuilderInitialization();
512       }
513 
514       private Builder(
515           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
516         super(parent);
517         maybeForceBuilderInitialization();
518       }
519       private void maybeForceBuilderInitialization() {
520         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
521           getServerFieldBuilder();
522         }
523       }
524       private static Builder create() {
525         return new Builder();
526       }
527 
528       public Builder clear() {
529         super.clear();
530         if (serverBuilder_ == null) {
531           server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
532         } else {
533           serverBuilder_.clear();
534         }
535         bitField0_ = (bitField0_ & ~0x00000001);
536         rpcVersion_ = 0;
537         bitField0_ = (bitField0_ & ~0x00000002);
538         state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
539         bitField0_ = (bitField0_ & ~0x00000004);
540         return this;
541       }
542 
543       public Builder clone() {
544         return create().mergeFrom(buildPartial());
545       }
546 
547       public com.google.protobuf.Descriptors.Descriptor
548           getDescriptorForType() {
549         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
550       }
551 
552       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
553         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
554       }
555 
556       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
557         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
558         if (!result.isInitialized()) {
559           throw newUninitializedMessageException(result);
560         }
561         return result;
562       }
563 
564       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
565         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
566         int from_bitField0_ = bitField0_;
567         int to_bitField0_ = 0;
568         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
569           to_bitField0_ |= 0x00000001;
570         }
571         if (serverBuilder_ == null) {
572           result.server_ = server_;
573         } else {
574           result.server_ = serverBuilder_.build();
575         }
576         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
577           to_bitField0_ |= 0x00000002;
578         }
579         result.rpcVersion_ = rpcVersion_;
580         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
581           to_bitField0_ |= 0x00000004;
582         }
583         result.state_ = state_;
584         result.bitField0_ = to_bitField0_;
585         onBuilt();
586         return result;
587       }
588 
589       public Builder mergeFrom(com.google.protobuf.Message other) {
590         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
591           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
592         } else {
593           super.mergeFrom(other);
594           return this;
595         }
596       }
597 
598       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
599         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
600         if (other.hasServer()) {
601           mergeServer(other.getServer());
602         }
603         if (other.hasRpcVersion()) {
604           setRpcVersion(other.getRpcVersion());
605         }
606         if (other.hasState()) {
607           setState(other.getState());
608         }
609         this.mergeUnknownFields(other.getUnknownFields());
610         return this;
611       }
612 
613       public final boolean isInitialized() {
614         if (!hasServer()) {
615           
616           return false;
617         }
618         if (!getServer().isInitialized()) {
619           
620           return false;
621         }
622         return true;
623       }
624 
625       public Builder mergeFrom(
626           com.google.protobuf.CodedInputStream input,
627           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
628           throws java.io.IOException {
629         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parsedMessage = null;
630         try {
631           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
632         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
633           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) e.getUnfinishedMessage();
634           throw e;
635         } finally {
636           if (parsedMessage != null) {
637             mergeFrom(parsedMessage);
638           }
639         }
640         return this;
641       }
642       private int bitField0_;
643 
644       // required .hbase.pb.ServerName server = 1;
645       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
646       private com.google.protobuf.SingleFieldBuilder<
647           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
648       /**
649        * <code>required .hbase.pb.ServerName server = 1;</code>
650        *
651        * <pre>
652        * The ServerName hosting the meta region currently, or destination server,
653        * if meta region is in transition.
654        * </pre>
655        */
656       public boolean hasServer() {
657         return ((bitField0_ & 0x00000001) == 0x00000001);
658       }
659       /**
660        * <code>required .hbase.pb.ServerName server = 1;</code>
661        *
662        * <pre>
663        * The ServerName hosting the meta region currently, or destination server,
664        * if meta region is in transition.
665        * </pre>
666        */
667       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
668         if (serverBuilder_ == null) {
669           return server_;
670         } else {
671           return serverBuilder_.getMessage();
672         }
673       }
674       /**
675        * <code>required .hbase.pb.ServerName server = 1;</code>
676        *
677        * <pre>
678        * The ServerName hosting the meta region currently, or destination server,
679        * if meta region is in transition.
680        * </pre>
681        */
682       public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
683         if (serverBuilder_ == null) {
684           if (value == null) {
685             throw new NullPointerException();
686           }
687           server_ = value;
688           onChanged();
689         } else {
690           serverBuilder_.setMessage(value);
691         }
692         bitField0_ |= 0x00000001;
693         return this;
694       }
695       /**
696        * <code>required .hbase.pb.ServerName server = 1;</code>
697        *
698        * <pre>
699        * The ServerName hosting the meta region currently, or destination server,
700        * if meta region is in transition.
701        * </pre>
702        */
703       public Builder setServer(
704           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
705         if (serverBuilder_ == null) {
706           server_ = builderForValue.build();
707           onChanged();
708         } else {
709           serverBuilder_.setMessage(builderForValue.build());
710         }
711         bitField0_ |= 0x00000001;
712         return this;
713       }
714       /**
715        * <code>required .hbase.pb.ServerName server = 1;</code>
716        *
717        * <pre>
718        * The ServerName hosting the meta region currently, or destination server,
719        * if meta region is in transition.
720        * </pre>
721        */
722       public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
723         if (serverBuilder_ == null) {
724           if (((bitField0_ & 0x00000001) == 0x00000001) &&
725               server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
726             server_ =
727               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
728           } else {
729             server_ = value;
730           }
731           onChanged();
732         } else {
733           serverBuilder_.mergeFrom(value);
734         }
735         bitField0_ |= 0x00000001;
736         return this;
737       }
738       /**
739        * <code>required .hbase.pb.ServerName server = 1;</code>
740        *
741        * <pre>
742        * The ServerName hosting the meta region currently, or destination server,
743        * if meta region is in transition.
744        * </pre>
745        */
746       public Builder clearServer() {
747         if (serverBuilder_ == null) {
748           server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
749           onChanged();
750         } else {
751           serverBuilder_.clear();
752         }
753         bitField0_ = (bitField0_ & ~0x00000001);
754         return this;
755       }
756       /**
757        * <code>required .hbase.pb.ServerName server = 1;</code>
758        *
759        * <pre>
760        * The ServerName hosting the meta region currently, or destination server,
761        * if meta region is in transition.
762        * </pre>
763        */
764       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
765         bitField0_ |= 0x00000001;
766         onChanged();
767         return getServerFieldBuilder().getBuilder();
768       }
769       /**
770        * <code>required .hbase.pb.ServerName server = 1;</code>
771        *
772        * <pre>
773        * The ServerName hosting the meta region currently, or destination server,
774        * if meta region is in transition.
775        * </pre>
776        */
777       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
778         if (serverBuilder_ != null) {
779           return serverBuilder_.getMessageOrBuilder();
780         } else {
781           return server_;
782         }
783       }
784       /**
785        * <code>required .hbase.pb.ServerName server = 1;</code>
786        *
787        * <pre>
788        * The ServerName hosting the meta region currently, or destination server,
789        * if meta region is in transition.
790        * </pre>
791        */
792       private com.google.protobuf.SingleFieldBuilder<
793           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
794           getServerFieldBuilder() {
795         if (serverBuilder_ == null) {
796           serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
797               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
798                   server_,
799                   getParentForChildren(),
800                   isClean());
801           server_ = null;
802         }
803         return serverBuilder_;
804       }
805 
806       // optional uint32 rpc_version = 2;
807       private int rpcVersion_ ;
808       /**
809        * <code>optional uint32 rpc_version = 2;</code>
810        *
811        * <pre>
812        * The major version of the rpc the server speaks.  This is used so that
813        * clients connecting to the cluster can have prior knowledge of what version
814        * to send to a RegionServer.  AsyncHBase will use this to detect versions.
815        * </pre>
816        */
817       public boolean hasRpcVersion() {
818         return ((bitField0_ & 0x00000002) == 0x00000002);
819       }
820       /**
821        * <code>optional uint32 rpc_version = 2;</code>
822        *
823        * <pre>
824        * The major version of the rpc the server speaks.  This is used so that
825        * clients connecting to the cluster can have prior knowledge of what version
826        * to send to a RegionServer.  AsyncHBase will use this to detect versions.
827        * </pre>
828        */
829       public int getRpcVersion() {
830         return rpcVersion_;
831       }
832       /**
833        * <code>optional uint32 rpc_version = 2;</code>
834        *
835        * <pre>
836        * The major version of the rpc the server speaks.  This is used so that
837        * clients connecting to the cluster can have prior knowledge of what version
838        * to send to a RegionServer.  AsyncHBase will use this to detect versions.
839        * </pre>
840        */
841       public Builder setRpcVersion(int value) {
842         bitField0_ |= 0x00000002;
843         rpcVersion_ = value;
844         onChanged();
845         return this;
846       }
847       /**
848        * <code>optional uint32 rpc_version = 2;</code>
849        *
850        * <pre>
851        * The major version of the rpc the server speaks.  This is used so that
852        * clients connecting to the cluster can have prior knowledge of what version
853        * to send to a RegionServer.  AsyncHBase will use this to detect versions.
854        * </pre>
855        */
856       public Builder clearRpcVersion() {
857         bitField0_ = (bitField0_ & ~0x00000002);
858         rpcVersion_ = 0;
859         onChanged();
860         return this;
861       }
862 
863       // optional .hbase.pb.RegionState.State state = 3;
864       private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
865       /**
866        * <code>optional .hbase.pb.RegionState.State state = 3;</code>
867        *
868        * <pre>
869        * State of the region transition. OPEN means fully operational 'hbase:meta'
870        * </pre>
871        */
872       public boolean hasState() {
873         return ((bitField0_ & 0x00000004) == 0x00000004);
874       }
875       /**
876        * <code>optional .hbase.pb.RegionState.State state = 3;</code>
877        *
878        * <pre>
879        * State of the region transition. OPEN means fully operational 'hbase:meta'
880        * </pre>
881        */
882       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
883         return state_;
884       }
885       /**
886        * <code>optional .hbase.pb.RegionState.State state = 3;</code>
887        *
888        * <pre>
889        * State of the region transition. OPEN means fully operational 'hbase:meta'
890        * </pre>
891        */
892       public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value) {
893         if (value == null) {
894           throw new NullPointerException();
895         }
896         bitField0_ |= 0x00000004;
897         state_ = value;
898         onChanged();
899         return this;
900       }
901       /**
902        * <code>optional .hbase.pb.RegionState.State state = 3;</code>
903        *
904        * <pre>
905        * State of the region transition. OPEN means fully operational 'hbase:meta'
906        * </pre>
907        */
908       public Builder clearState() {
909         bitField0_ = (bitField0_ & ~0x00000004);
910         state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
911         onChanged();
912         return this;
913       }
914 
915       // @@protoc_insertion_point(builder_scope:hbase.pb.MetaRegionServer)
916     }
917 
918     static {
919       defaultInstance = new MetaRegionServer(true);
920       defaultInstance.initFields();
921     }
922 
923     // @@protoc_insertion_point(class_scope:hbase.pb.MetaRegionServer)
924   }
925 
926   public interface MasterOrBuilder
927       extends com.google.protobuf.MessageOrBuilder {
928 
929     // required .hbase.pb.ServerName master = 1;
930     /**
931      * <code>required .hbase.pb.ServerName master = 1;</code>
932      *
933      * <pre>
934      * The ServerName of the current Master
935      * </pre>
936      */
937     boolean hasMaster();
938     /**
939      * <code>required .hbase.pb.ServerName master = 1;</code>
940      *
941      * <pre>
942      * The ServerName of the current Master
943      * </pre>
944      */
945     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
946     /**
947      * <code>required .hbase.pb.ServerName master = 1;</code>
948      *
949      * <pre>
950      * The ServerName of the current Master
951      * </pre>
952      */
953     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
954 
955     // optional uint32 rpc_version = 2;
956     /**
957      * <code>optional uint32 rpc_version = 2;</code>
958      *
959      * <pre>
960      * Major RPC version so that clients can know what version the master can accept.
961      * </pre>
962      */
963     boolean hasRpcVersion();
964     /**
965      * <code>optional uint32 rpc_version = 2;</code>
966      *
967      * <pre>
968      * Major RPC version so that clients can know what version the master can accept.
969      * </pre>
970      */
971     int getRpcVersion();
972 
973     // optional uint32 info_port = 3;
974     /**
975      * <code>optional uint32 info_port = 3;</code>
976      */
977     boolean hasInfoPort();
978     /**
979      * <code>optional uint32 info_port = 3;</code>
980      */
981     int getInfoPort();
982   }
983   /**
984    * Protobuf type {@code hbase.pb.Master}
985    *
986    * <pre>
987    **
988    * Content of the master znode.
989    * </pre>
990    */
991   public static final class Master extends
992       com.google.protobuf.GeneratedMessage
993       implements MasterOrBuilder {
994     // Use Master.newBuilder() to construct.
995     private Master(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
996       super(builder);
997       this.unknownFields = builder.getUnknownFields();
998     }
999     private Master(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1000 
1001     private static final Master defaultInstance;
1002     public static Master getDefaultInstance() {
1003       return defaultInstance;
1004     }
1005 
1006     public Master getDefaultInstanceForType() {
1007       return defaultInstance;
1008     }
1009 
1010     private final com.google.protobuf.UnknownFieldSet unknownFields;
1011     @java.lang.Override
1012     public final com.google.protobuf.UnknownFieldSet
1013         getUnknownFields() {
1014       return this.unknownFields;
1015     }
1016     private Master(
1017         com.google.protobuf.CodedInputStream input,
1018         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1019         throws com.google.protobuf.InvalidProtocolBufferException {
1020       initFields();
1021       int mutable_bitField0_ = 0;
1022       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1023           com.google.protobuf.UnknownFieldSet.newBuilder();
1024       try {
1025         boolean done = false;
1026         while (!done) {
1027           int tag = input.readTag();
1028           switch (tag) {
1029             case 0:
1030               done = true;
1031               break;
1032             default: {
1033               if (!parseUnknownField(input, unknownFields,
1034                                      extensionRegistry, tag)) {
1035                 done = true;
1036               }
1037               break;
1038             }
1039             case 10: {
1040               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
1041               if (((bitField0_ & 0x00000001) == 0x00000001)) {
1042                 subBuilder = master_.toBuilder();
1043               }
1044               master_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
1045               if (subBuilder != null) {
1046                 subBuilder.mergeFrom(master_);
1047                 master_ = subBuilder.buildPartial();
1048               }
1049               bitField0_ |= 0x00000001;
1050               break;
1051             }
1052             case 16: {
1053               bitField0_ |= 0x00000002;
1054               rpcVersion_ = input.readUInt32();
1055               break;
1056             }
1057             case 24: {
1058               bitField0_ |= 0x00000004;
1059               infoPort_ = input.readUInt32();
1060               break;
1061             }
1062           }
1063         }
1064       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1065         throw e.setUnfinishedMessage(this);
1066       } catch (java.io.IOException e) {
1067         throw new com.google.protobuf.InvalidProtocolBufferException(
1068             e.getMessage()).setUnfinishedMessage(this);
1069       } finally {
1070         this.unknownFields = unknownFields.build();
1071         makeExtensionsImmutable();
1072       }
1073     }
1074     public static final com.google.protobuf.Descriptors.Descriptor
1075         getDescriptor() {
1076       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
1077     }
1078 
1079     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1080         internalGetFieldAccessorTable() {
1081       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
1082           .ensureFieldAccessorsInitialized(
1083               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
1084     }
1085 
1086     public static com.google.protobuf.Parser<Master> PARSER =
1087         new com.google.protobuf.AbstractParser<Master>() {
1088       public Master parsePartialFrom(
1089           com.google.protobuf.CodedInputStream input,
1090           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1091           throws com.google.protobuf.InvalidProtocolBufferException {
1092         return new Master(input, extensionRegistry);
1093       }
1094     };
1095 
1096     @java.lang.Override
1097     public com.google.protobuf.Parser<Master> getParserForType() {
1098       return PARSER;
1099     }
1100 
1101     private int bitField0_;
1102     // required .hbase.pb.ServerName master = 1;
1103     public static final int MASTER_FIELD_NUMBER = 1;
1104     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_;
1105     /**
1106      * <code>required .hbase.pb.ServerName master = 1;</code>
1107      *
1108      * <pre>
1109      * The ServerName of the current Master
1110      * </pre>
1111      */
1112     public boolean hasMaster() {
1113       return ((bitField0_ & 0x00000001) == 0x00000001);
1114     }
1115     /**
1116      * <code>required .hbase.pb.ServerName master = 1;</code>
1117      *
1118      * <pre>
1119      * The ServerName of the current Master
1120      * </pre>
1121      */
1122     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
1123       return master_;
1124     }
1125     /**
1126      * <code>required .hbase.pb.ServerName master = 1;</code>
1127      *
1128      * <pre>
1129      * The ServerName of the current Master
1130      * </pre>
1131      */
1132     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
1133       return master_;
1134     }
1135 
1136     // optional uint32 rpc_version = 2;
1137     public static final int RPC_VERSION_FIELD_NUMBER = 2;
1138     private int rpcVersion_;
1139     /**
1140      * <code>optional uint32 rpc_version = 2;</code>
1141      *
1142      * <pre>
1143      * Major RPC version so that clients can know what version the master can accept.
1144      * </pre>
1145      */
1146     public boolean hasRpcVersion() {
1147       return ((bitField0_ & 0x00000002) == 0x00000002);
1148     }
1149     /**
1150      * <code>optional uint32 rpc_version = 2;</code>
1151      *
1152      * <pre>
1153      * Major RPC version so that clients can know what version the master can accept.
1154      * </pre>
1155      */
1156     public int getRpcVersion() {
1157       return rpcVersion_;
1158     }
1159 
1160     // optional uint32 info_port = 3;
1161     public static final int INFO_PORT_FIELD_NUMBER = 3;
1162     private int infoPort_;
1163     /**
1164      * <code>optional uint32 info_port = 3;</code>
1165      */
1166     public boolean hasInfoPort() {
1167       return ((bitField0_ & 0x00000004) == 0x00000004);
1168     }
1169     /**
1170      * <code>optional uint32 info_port = 3;</code>
1171      */
1172     public int getInfoPort() {
1173       return infoPort_;
1174     }
1175 
1176     private void initFields() {
1177       master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
1178       rpcVersion_ = 0;
1179       infoPort_ = 0;
1180     }
1181     private byte memoizedIsInitialized = -1;
1182     public final boolean isInitialized() {
1183       byte isInitialized = memoizedIsInitialized;
1184       if (isInitialized != -1) return isInitialized == 1;
1185 
1186       if (!hasMaster()) {
1187         memoizedIsInitialized = 0;
1188         return false;
1189       }
1190       if (!getMaster().isInitialized()) {
1191         memoizedIsInitialized = 0;
1192         return false;
1193       }
1194       memoizedIsInitialized = 1;
1195       return true;
1196     }
1197 
1198     public void writeTo(com.google.protobuf.CodedOutputStream output)
1199                         throws java.io.IOException {
1200       getSerializedSize();
1201       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1202         output.writeMessage(1, master_);
1203       }
1204       if (((bitField0_ & 0x00000002) == 0x00000002)) {
1205         output.writeUInt32(2, rpcVersion_);
1206       }
1207       if (((bitField0_ & 0x00000004) == 0x00000004)) {
1208         output.writeUInt32(3, infoPort_);
1209       }
1210       getUnknownFields().writeTo(output);
1211     }
1212 
1213     private int memoizedSerializedSize = -1;
1214     public int getSerializedSize() {
1215       int size = memoizedSerializedSize;
1216       if (size != -1) return size;
1217 
1218       size = 0;
1219       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1220         size += com.google.protobuf.CodedOutputStream
1221           .computeMessageSize(1, master_);
1222       }
1223       if (((bitField0_ & 0x00000002) == 0x00000002)) {
1224         size += com.google.protobuf.CodedOutputStream
1225           .computeUInt32Size(2, rpcVersion_);
1226       }
1227       if (((bitField0_ & 0x00000004) == 0x00000004)) {
1228         size += com.google.protobuf.CodedOutputStream
1229           .computeUInt32Size(3, infoPort_);
1230       }
1231       size += getUnknownFields().getSerializedSize();
1232       memoizedSerializedSize = size;
1233       return size;
1234     }
1235 
1236     private static final long serialVersionUID = 0L;
1237     @java.lang.Override
1238     protected java.lang.Object writeReplace()
1239         throws java.io.ObjectStreamException {
1240       return super.writeReplace();
1241     }
1242 
1243     @java.lang.Override
1244     public boolean equals(final java.lang.Object obj) {
1245       if (obj == this) {
1246        return true;
1247       }
1248       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)) {
1249         return super.equals(obj);
1250       }
1251       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) obj;
1252 
1253       boolean result = true;
1254       result = result && (hasMaster() == other.hasMaster());
1255       if (hasMaster()) {
1256         result = result && getMaster()
1257             .equals(other.getMaster());
1258       }
1259       result = result && (hasRpcVersion() == other.hasRpcVersion());
1260       if (hasRpcVersion()) {
1261         result = result && (getRpcVersion()
1262             == other.getRpcVersion());
1263       }
1264       result = result && (hasInfoPort() == other.hasInfoPort());
1265       if (hasInfoPort()) {
1266         result = result && (getInfoPort()
1267             == other.getInfoPort());
1268       }
1269       result = result &&
1270           getUnknownFields().equals(other.getUnknownFields());
1271       return result;
1272     }
1273 
1274     private int memoizedHashCode = 0;
1275     @java.lang.Override
1276     public int hashCode() {
1277       if (memoizedHashCode != 0) {
1278         return memoizedHashCode;
1279       }
1280       int hash = 41;
1281       hash = (19 * hash) + getDescriptorForType().hashCode();
1282       if (hasMaster()) {
1283         hash = (37 * hash) + MASTER_FIELD_NUMBER;
1284         hash = (53 * hash) + getMaster().hashCode();
1285       }
1286       if (hasRpcVersion()) {
1287         hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
1288         hash = (53 * hash) + getRpcVersion();
1289       }
1290       if (hasInfoPort()) {
1291         hash = (37 * hash) + INFO_PORT_FIELD_NUMBER;
1292         hash = (53 * hash) + getInfoPort();
1293       }
1294       hash = (29 * hash) + getUnknownFields().hashCode();
1295       memoizedHashCode = hash;
1296       return hash;
1297     }
1298 
1299     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1300         com.google.protobuf.ByteString data)
1301         throws com.google.protobuf.InvalidProtocolBufferException {
1302       return PARSER.parseFrom(data);
1303     }
1304     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1305         com.google.protobuf.ByteString data,
1306         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1307         throws com.google.protobuf.InvalidProtocolBufferException {
1308       return PARSER.parseFrom(data, extensionRegistry);
1309     }
1310     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(byte[] data)
1311         throws com.google.protobuf.InvalidProtocolBufferException {
1312       return PARSER.parseFrom(data);
1313     }
1314     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1315         byte[] data,
1316         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1317         throws com.google.protobuf.InvalidProtocolBufferException {
1318       return PARSER.parseFrom(data, extensionRegistry);
1319     }
1320     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(java.io.InputStream input)
1321         throws java.io.IOException {
1322       return PARSER.parseFrom(input);
1323     }
1324     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1325         java.io.InputStream input,
1326         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1327         throws java.io.IOException {
1328       return PARSER.parseFrom(input, extensionRegistry);
1329     }
1330     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(java.io.InputStream input)
1331         throws java.io.IOException {
1332       return PARSER.parseDelimitedFrom(input);
1333     }
1334     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(
1335         java.io.InputStream input,
1336         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1337         throws java.io.IOException {
1338       return PARSER.parseDelimitedFrom(input, extensionRegistry);
1339     }
1340     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1341         com.google.protobuf.CodedInputStream input)
1342         throws java.io.IOException {
1343       return PARSER.parseFrom(input);
1344     }
1345     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
1346         com.google.protobuf.CodedInputStream input,
1347         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1348         throws java.io.IOException {
1349       return PARSER.parseFrom(input, extensionRegistry);
1350     }
1351 
1352     public static Builder newBuilder() { return Builder.create(); }
1353     public Builder newBuilderForType() { return newBuilder(); }
1354     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master prototype) {
1355       return newBuilder().mergeFrom(prototype);
1356     }
1357     public Builder toBuilder() { return newBuilder(this); }
1358 
1359     @java.lang.Override
1360     protected Builder newBuilderForType(
1361         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1362       Builder builder = new Builder(parent);
1363       return builder;
1364     }
1365     /**
1366      * Protobuf type {@code hbase.pb.Master}
1367      *
1368      * <pre>
1369      **
1370      * Content of the master znode.
1371      * </pre>
1372      */
1373     public static final class Builder extends
1374         com.google.protobuf.GeneratedMessage.Builder<Builder>
1375        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MasterOrBuilder {
1376       public static final com.google.protobuf.Descriptors.Descriptor
1377           getDescriptor() {
1378         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
1379       }
1380 
1381       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1382           internalGetFieldAccessorTable() {
1383         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
1384             .ensureFieldAccessorsInitialized(
1385                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
1386       }
1387 
1388       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.newBuilder()
1389       private Builder() {
1390         maybeForceBuilderInitialization();
1391       }
1392 
1393       private Builder(
1394           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1395         super(parent);
1396         maybeForceBuilderInitialization();
1397       }
1398       private void maybeForceBuilderInitialization() {
1399         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1400           getMasterFieldBuilder();
1401         }
1402       }
1403       private static Builder create() {
1404         return new Builder();
1405       }
1406 
1407       public Builder clear() {
1408         super.clear();
1409         if (masterBuilder_ == null) {
1410           master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
1411         } else {
1412           masterBuilder_.clear();
1413         }
1414         bitField0_ = (bitField0_ & ~0x00000001);
1415         rpcVersion_ = 0;
1416         bitField0_ = (bitField0_ & ~0x00000002);
1417         infoPort_ = 0;
1418         bitField0_ = (bitField0_ & ~0x00000004);
1419         return this;
1420       }
1421 
1422       public Builder clone() {
1423         return create().mergeFrom(buildPartial());
1424       }
1425 
1426       public com.google.protobuf.Descriptors.Descriptor
1427           getDescriptorForType() {
1428         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
1429       }
1430 
1431       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master getDefaultInstanceForType() {
1432         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance();
1433       }
1434 
1435       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master build() {
1436         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial();
1437         if (!result.isInitialized()) {
1438           throw newUninitializedMessageException(result);
1439         }
1440         return result;
1441       }
1442 
1443       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildPartial() {
1444         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master(this);
1445         int from_bitField0_ = bitField0_;
1446         int to_bitField0_ = 0;
1447         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1448           to_bitField0_ |= 0x00000001;
1449         }
1450         if (masterBuilder_ == null) {
1451           result.master_ = master_;
1452         } else {
1453           result.master_ = masterBuilder_.build();
1454         }
1455         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1456           to_bitField0_ |= 0x00000002;
1457         }
1458         result.rpcVersion_ = rpcVersion_;
1459         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1460           to_bitField0_ |= 0x00000004;
1461         }
1462         result.infoPort_ = infoPort_;
1463         result.bitField0_ = to_bitField0_;
1464         onBuilt();
1465         return result;
1466       }
1467 
1468       public Builder mergeFrom(com.google.protobuf.Message other) {
1469         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) {
1470           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)other);
1471         } else {
1472           super.mergeFrom(other);
1473           return this;
1474         }
1475       }
1476 
1477       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other) {
1478         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance()) return this;
1479         if (other.hasMaster()) {
1480           mergeMaster(other.getMaster());
1481         }
1482         if (other.hasRpcVersion()) {
1483           setRpcVersion(other.getRpcVersion());
1484         }
1485         if (other.hasInfoPort()) {
1486           setInfoPort(other.getInfoPort());
1487         }
1488         this.mergeUnknownFields(other.getUnknownFields());
1489         return this;
1490       }
1491 
1492       public final boolean isInitialized() {
1493         if (!hasMaster()) {
1494           
1495           return false;
1496         }
1497         if (!getMaster().isInitialized()) {
1498           
1499           return false;
1500         }
1501         return true;
1502       }
1503 
1504       public Builder mergeFrom(
1505           com.google.protobuf.CodedInputStream input,
1506           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1507           throws java.io.IOException {
1508         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parsedMessage = null;
1509         try {
1510           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1511         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1512           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) e.getUnfinishedMessage();
1513           throw e;
1514         } finally {
1515           if (parsedMessage != null) {
1516             mergeFrom(parsedMessage);
1517           }
1518         }
1519         return this;
1520       }
1521       private int bitField0_;
1522 
1523       // required .hbase.pb.ServerName master = 1;
1524       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
1525       private com.google.protobuf.SingleFieldBuilder<
1526           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_;
1527       /**
1528        * <code>required .hbase.pb.ServerName master = 1;</code>
1529        *
1530        * <pre>
1531        * The ServerName of the current Master
1532        * </pre>
1533        */
1534       public boolean hasMaster() {
1535         return ((bitField0_ & 0x00000001) == 0x00000001);
1536       }
1537       /**
1538        * <code>required .hbase.pb.ServerName master = 1;</code>
1539        *
1540        * <pre>
1541        * The ServerName of the current Master
1542        * </pre>
1543        */
1544       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
1545         if (masterBuilder_ == null) {
1546           return master_;
1547         } else {
1548           return masterBuilder_.getMessage();
1549         }
1550       }
1551       /**
1552        * <code>required .hbase.pb.ServerName master = 1;</code>
1553        *
1554        * <pre>
1555        * The ServerName of the current Master
1556        * </pre>
1557        */
1558       public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
1559         if (masterBuilder_ == null) {
1560           if (value == null) {
1561             throw new NullPointerException();
1562           }
1563           master_ = value;
1564           onChanged();
1565         } else {
1566           masterBuilder_.setMessage(value);
1567         }
1568         bitField0_ |= 0x00000001;
1569         return this;
1570       }
1571       /**
1572        * <code>required .hbase.pb.ServerName master = 1;</code>
1573        *
1574        * <pre>
1575        * The ServerName of the current Master
1576        * </pre>
1577        */
1578       public Builder setMaster(
1579           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
1580         if (masterBuilder_ == null) {
1581           master_ = builderForValue.build();
1582           onChanged();
1583         } else {
1584           masterBuilder_.setMessage(builderForValue.build());
1585         }
1586         bitField0_ |= 0x00000001;
1587         return this;
1588       }
1589       /**
1590        * <code>required .hbase.pb.ServerName master = 1;</code>
1591        *
1592        * <pre>
1593        * The ServerName of the current Master
1594        * </pre>
1595        */
1596       public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
1597         if (masterBuilder_ == null) {
1598           if (((bitField0_ & 0x00000001) == 0x00000001) &&
1599               master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
1600             master_ =
1601               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial();
1602           } else {
1603             master_ = value;
1604           }
1605           onChanged();
1606         } else {
1607           masterBuilder_.mergeFrom(value);
1608         }
1609         bitField0_ |= 0x00000001;
1610         return this;
1611       }
1612       /**
1613        * <code>required .hbase.pb.ServerName master = 1;</code>
1614        *
1615        * <pre>
1616        * The ServerName of the current Master
1617        * </pre>
1618        */
1619       public Builder clearMaster() {
1620         if (masterBuilder_ == null) {
1621           master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
1622           onChanged();
1623         } else {
1624           masterBuilder_.clear();
1625         }
1626         bitField0_ = (bitField0_ & ~0x00000001);
1627         return this;
1628       }
1629       /**
1630        * <code>required .hbase.pb.ServerName master = 1;</code>
1631        *
1632        * <pre>
1633        * The ServerName of the current Master
1634        * </pre>
1635        */
1636       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() {
1637         bitField0_ |= 0x00000001;
1638         onChanged();
1639         return getMasterFieldBuilder().getBuilder();
1640       }
1641       /**
1642        * <code>required .hbase.pb.ServerName master = 1;</code>
1643        *
1644        * <pre>
1645        * The ServerName of the current Master
1646        * </pre>
1647        */
1648       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
1649         if (masterBuilder_ != null) {
1650           return masterBuilder_.getMessageOrBuilder();
1651         } else {
1652           return master_;
1653         }
1654       }
1655       /**
1656        * <code>required .hbase.pb.ServerName master = 1;</code>
1657        *
1658        * <pre>
1659        * The ServerName of the current Master
1660        * </pre>
1661        */
1662       private com.google.protobuf.SingleFieldBuilder<
1663           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
1664           getMasterFieldBuilder() {
1665         if (masterBuilder_ == null) {
1666           masterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1667               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
1668                   master_,
1669                   getParentForChildren(),
1670                   isClean());
1671           master_ = null;
1672         }
1673         return masterBuilder_;
1674       }
1675 
1676       // optional uint32 rpc_version = 2;
1677       private int rpcVersion_ ;
1678       /**
1679        * <code>optional uint32 rpc_version = 2;</code>
1680        *
1681        * <pre>
1682        * Major RPC version so that clients can know what version the master can accept.
1683        * </pre>
1684        */
1685       public boolean hasRpcVersion() {
1686         return ((bitField0_ & 0x00000002) == 0x00000002);
1687       }
1688       /**
1689        * <code>optional uint32 rpc_version = 2;</code>
1690        *
1691        * <pre>
1692        * Major RPC version so that clients can know what version the master can accept.
1693        * </pre>
1694        */
1695       public int getRpcVersion() {
1696         return rpcVersion_;
1697       }
1698       /**
1699        * <code>optional uint32 rpc_version = 2;</code>
1700        *
1701        * <pre>
1702        * Major RPC version so that clients can know what version the master can accept.
1703        * </pre>
1704        */
1705       public Builder setRpcVersion(int value) {
1706         bitField0_ |= 0x00000002;
1707         rpcVersion_ = value;
1708         onChanged();
1709         return this;
1710       }
1711       /**
1712        * <code>optional uint32 rpc_version = 2;</code>
1713        *
1714        * <pre>
1715        * Major RPC version so that clients can know what version the master can accept.
1716        * </pre>
1717        */
1718       public Builder clearRpcVersion() {
1719         bitField0_ = (bitField0_ & ~0x00000002);
1720         rpcVersion_ = 0;
1721         onChanged();
1722         return this;
1723       }
1724 
1725       // optional uint32 info_port = 3;
1726       private int infoPort_ ;
1727       /**
1728        * <code>optional uint32 info_port = 3;</code>
1729        */
1730       public boolean hasInfoPort() {
1731         return ((bitField0_ & 0x00000004) == 0x00000004);
1732       }
1733       /**
1734        * <code>optional uint32 info_port = 3;</code>
1735        */
1736       public int getInfoPort() {
1737         return infoPort_;
1738       }
1739       /**
1740        * <code>optional uint32 info_port = 3;</code>
1741        */
1742       public Builder setInfoPort(int value) {
1743         bitField0_ |= 0x00000004;
1744         infoPort_ = value;
1745         onChanged();
1746         return this;
1747       }
1748       /**
1749        * <code>optional uint32 info_port = 3;</code>
1750        */
1751       public Builder clearInfoPort() {
1752         bitField0_ = (bitField0_ & ~0x00000004);
1753         infoPort_ = 0;
1754         onChanged();
1755         return this;
1756       }
1757 
1758       // @@protoc_insertion_point(builder_scope:hbase.pb.Master)
1759     }
1760 
1761     static {
1762       defaultInstance = new Master(true);
1763       defaultInstance.initFields();
1764     }
1765 
1766     // @@protoc_insertion_point(class_scope:hbase.pb.Master)
1767   }
1768 
1769   public interface ClusterUpOrBuilder
1770       extends com.google.protobuf.MessageOrBuilder {
1771 
1772     // required string start_date = 1;
1773     /**
1774      * <code>required string start_date = 1;</code>
1775      *
1776      * <pre>
1777      * If this znode is present, cluster is up.  Currently
1778      * the data is cluster start_date.
1779      * </pre>
1780      */
1781     boolean hasStartDate();
1782     /**
1783      * <code>required string start_date = 1;</code>
1784      *
1785      * <pre>
1786      * If this znode is present, cluster is up.  Currently
1787      * the data is cluster start_date.
1788      * </pre>
1789      */
1790     java.lang.String getStartDate();
1791     /**
1792      * <code>required string start_date = 1;</code>
1793      *
1794      * <pre>
1795      * If this znode is present, cluster is up.  Currently
1796      * the data is cluster start_date.
1797      * </pre>
1798      */
1799     com.google.protobuf.ByteString
1800         getStartDateBytes();
1801   }
1802   /**
1803    * Protobuf type {@code hbase.pb.ClusterUp}
1804    *
1805    * <pre>
1806    **
1807    * Content of the '/hbase/running', cluster state, znode.
1808    * </pre>
1809    */
1810   public static final class ClusterUp extends
1811       com.google.protobuf.GeneratedMessage
1812       implements ClusterUpOrBuilder {
1813     // Use ClusterUp.newBuilder() to construct.
1814     private ClusterUp(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1815       super(builder);
1816       this.unknownFields = builder.getUnknownFields();
1817     }
1818     private ClusterUp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1819 
1820     private static final ClusterUp defaultInstance;
1821     public static ClusterUp getDefaultInstance() {
1822       return defaultInstance;
1823     }
1824 
1825     public ClusterUp getDefaultInstanceForType() {
1826       return defaultInstance;
1827     }
1828 
1829     private final com.google.protobuf.UnknownFieldSet unknownFields;
1830     @java.lang.Override
1831     public final com.google.protobuf.UnknownFieldSet
1832         getUnknownFields() {
1833       return this.unknownFields;
1834     }
1835     private ClusterUp(
1836         com.google.protobuf.CodedInputStream input,
1837         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1838         throws com.google.protobuf.InvalidProtocolBufferException {
1839       initFields();
1840       int mutable_bitField0_ = 0;
1841       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1842           com.google.protobuf.UnknownFieldSet.newBuilder();
1843       try {
1844         boolean done = false;
1845         while (!done) {
1846           int tag = input.readTag();
1847           switch (tag) {
1848             case 0:
1849               done = true;
1850               break;
1851             default: {
1852               if (!parseUnknownField(input, unknownFields,
1853                                      extensionRegistry, tag)) {
1854                 done = true;
1855               }
1856               break;
1857             }
1858             case 10: {
1859               bitField0_ |= 0x00000001;
1860               startDate_ = input.readBytes();
1861               break;
1862             }
1863           }
1864         }
1865       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1866         throw e.setUnfinishedMessage(this);
1867       } catch (java.io.IOException e) {
1868         throw new com.google.protobuf.InvalidProtocolBufferException(
1869             e.getMessage()).setUnfinishedMessage(this);
1870       } finally {
1871         this.unknownFields = unknownFields.build();
1872         makeExtensionsImmutable();
1873       }
1874     }
1875     public static final com.google.protobuf.Descriptors.Descriptor
1876         getDescriptor() {
1877       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
1878     }
1879 
1880     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1881         internalGetFieldAccessorTable() {
1882       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
1883           .ensureFieldAccessorsInitialized(
1884               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
1885     }
1886 
1887     public static com.google.protobuf.Parser<ClusterUp> PARSER =
1888         new com.google.protobuf.AbstractParser<ClusterUp>() {
1889       public ClusterUp parsePartialFrom(
1890           com.google.protobuf.CodedInputStream input,
1891           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1892           throws com.google.protobuf.InvalidProtocolBufferException {
1893         return new ClusterUp(input, extensionRegistry);
1894       }
1895     };
1896 
1897     @java.lang.Override
1898     public com.google.protobuf.Parser<ClusterUp> getParserForType() {
1899       return PARSER;
1900     }
1901 
1902     private int bitField0_;
1903     // required string start_date = 1;
1904     public static final int START_DATE_FIELD_NUMBER = 1;
1905     private java.lang.Object startDate_;
1906     /**
1907      * <code>required string start_date = 1;</code>
1908      *
1909      * <pre>
1910      * If this znode is present, cluster is up.  Currently
1911      * the data is cluster start_date.
1912      * </pre>
1913      */
1914     public boolean hasStartDate() {
1915       return ((bitField0_ & 0x00000001) == 0x00000001);
1916     }
1917     /**
1918      * <code>required string start_date = 1;</code>
1919      *
1920      * <pre>
1921      * If this znode is present, cluster is up.  Currently
1922      * the data is cluster start_date.
1923      * </pre>
1924      */
1925     public java.lang.String getStartDate() {
1926       java.lang.Object ref = startDate_;
1927       if (ref instanceof java.lang.String) {
1928         return (java.lang.String) ref;
1929       } else {
1930         com.google.protobuf.ByteString bs = 
1931             (com.google.protobuf.ByteString) ref;
1932         java.lang.String s = bs.toStringUtf8();
1933         if (bs.isValidUtf8()) {
1934           startDate_ = s;
1935         }
1936         return s;
1937       }
1938     }
1939     /**
1940      * <code>required string start_date = 1;</code>
1941      *
1942      * <pre>
1943      * If this znode is present, cluster is up.  Currently
1944      * the data is cluster start_date.
1945      * </pre>
1946      */
1947     public com.google.protobuf.ByteString
1948         getStartDateBytes() {
1949       java.lang.Object ref = startDate_;
1950       if (ref instanceof java.lang.String) {
1951         com.google.protobuf.ByteString b = 
1952             com.google.protobuf.ByteString.copyFromUtf8(
1953                 (java.lang.String) ref);
1954         startDate_ = b;
1955         return b;
1956       } else {
1957         return (com.google.protobuf.ByteString) ref;
1958       }
1959     }
1960 
1961     private void initFields() {
1962       startDate_ = "";
1963     }
1964     private byte memoizedIsInitialized = -1;
1965     public final boolean isInitialized() {
1966       byte isInitialized = memoizedIsInitialized;
1967       if (isInitialized != -1) return isInitialized == 1;
1968 
1969       if (!hasStartDate()) {
1970         memoizedIsInitialized = 0;
1971         return false;
1972       }
1973       memoizedIsInitialized = 1;
1974       return true;
1975     }
1976 
1977     public void writeTo(com.google.protobuf.CodedOutputStream output)
1978                         throws java.io.IOException {
1979       getSerializedSize();
1980       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1981         output.writeBytes(1, getStartDateBytes());
1982       }
1983       getUnknownFields().writeTo(output);
1984     }
1985 
1986     private int memoizedSerializedSize = -1;
1987     public int getSerializedSize() {
1988       int size = memoizedSerializedSize;
1989       if (size != -1) return size;
1990 
1991       size = 0;
1992       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1993         size += com.google.protobuf.CodedOutputStream
1994           .computeBytesSize(1, getStartDateBytes());
1995       }
1996       size += getUnknownFields().getSerializedSize();
1997       memoizedSerializedSize = size;
1998       return size;
1999     }
2000 
2001     private static final long serialVersionUID = 0L;
2002     @java.lang.Override
2003     protected java.lang.Object writeReplace()
2004         throws java.io.ObjectStreamException {
2005       return super.writeReplace();
2006     }
2007 
2008     @java.lang.Override
2009     public boolean equals(final java.lang.Object obj) {
2010       if (obj == this) {
2011        return true;
2012       }
2013       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)) {
2014         return super.equals(obj);
2015       }
2016       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) obj;
2017 
2018       boolean result = true;
2019       result = result && (hasStartDate() == other.hasStartDate());
2020       if (hasStartDate()) {
2021         result = result && getStartDate()
2022             .equals(other.getStartDate());
2023       }
2024       result = result &&
2025           getUnknownFields().equals(other.getUnknownFields());
2026       return result;
2027     }
2028 
2029     private int memoizedHashCode = 0;
2030     @java.lang.Override
2031     public int hashCode() {
2032       if (memoizedHashCode != 0) {
2033         return memoizedHashCode;
2034       }
2035       int hash = 41;
2036       hash = (19 * hash) + getDescriptorForType().hashCode();
2037       if (hasStartDate()) {
2038         hash = (37 * hash) + START_DATE_FIELD_NUMBER;
2039         hash = (53 * hash) + getStartDate().hashCode();
2040       }
2041       hash = (29 * hash) + getUnknownFields().hashCode();
2042       memoizedHashCode = hash;
2043       return hash;
2044     }
2045 
2046     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2047         com.google.protobuf.ByteString data)
2048         throws com.google.protobuf.InvalidProtocolBufferException {
2049       return PARSER.parseFrom(data);
2050     }
2051     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2052         com.google.protobuf.ByteString data,
2053         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2054         throws com.google.protobuf.InvalidProtocolBufferException {
2055       return PARSER.parseFrom(data, extensionRegistry);
2056     }
2057     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(byte[] data)
2058         throws com.google.protobuf.InvalidProtocolBufferException {
2059       return PARSER.parseFrom(data);
2060     }
2061     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2062         byte[] data,
2063         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2064         throws com.google.protobuf.InvalidProtocolBufferException {
2065       return PARSER.parseFrom(data, extensionRegistry);
2066     }
2067     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(java.io.InputStream input)
2068         throws java.io.IOException {
2069       return PARSER.parseFrom(input);
2070     }
2071     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2072         java.io.InputStream input,
2073         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2074         throws java.io.IOException {
2075       return PARSER.parseFrom(input, extensionRegistry);
2076     }
2077     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(java.io.InputStream input)
2078         throws java.io.IOException {
2079       return PARSER.parseDelimitedFrom(input);
2080     }
2081     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(
2082         java.io.InputStream input,
2083         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2084         throws java.io.IOException {
2085       return PARSER.parseDelimitedFrom(input, extensionRegistry);
2086     }
2087     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2088         com.google.protobuf.CodedInputStream input)
2089         throws java.io.IOException {
2090       return PARSER.parseFrom(input);
2091     }
2092     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
2093         com.google.protobuf.CodedInputStream input,
2094         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2095         throws java.io.IOException {
2096       return PARSER.parseFrom(input, extensionRegistry);
2097     }
2098 
2099     public static Builder newBuilder() { return Builder.create(); }
2100     public Builder newBuilderForType() { return newBuilder(); }
2101     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp prototype) {
2102       return newBuilder().mergeFrom(prototype);
2103     }
2104     public Builder toBuilder() { return newBuilder(this); }
2105 
2106     @java.lang.Override
2107     protected Builder newBuilderForType(
2108         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2109       Builder builder = new Builder(parent);
2110       return builder;
2111     }
2112     /**
2113      * Protobuf type {@code hbase.pb.ClusterUp}
2114      *
2115      * <pre>
2116      **
2117      * Content of the '/hbase/running', cluster state, znode.
2118      * </pre>
2119      */
2120     public static final class Builder extends
2121         com.google.protobuf.GeneratedMessage.Builder<Builder>
2122        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder {
2123       public static final com.google.protobuf.Descriptors.Descriptor
2124           getDescriptor() {
2125         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
2126       }
2127 
2128       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2129           internalGetFieldAccessorTable() {
2130         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
2131             .ensureFieldAccessorsInitialized(
2132                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
2133       }
2134 
2135       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.newBuilder()
2136       private Builder() {
2137         maybeForceBuilderInitialization();
2138       }
2139 
2140       private Builder(
2141           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2142         super(parent);
2143         maybeForceBuilderInitialization();
2144       }
2145       private void maybeForceBuilderInitialization() {
2146         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2147         }
2148       }
2149       private static Builder create() {
2150         return new Builder();
2151       }
2152 
2153       public Builder clear() {
2154         super.clear();
2155         startDate_ = "";
2156         bitField0_ = (bitField0_ & ~0x00000001);
2157         return this;
2158       }
2159 
2160       public Builder clone() {
2161         return create().mergeFrom(buildPartial());
2162       }
2163 
2164       public com.google.protobuf.Descriptors.Descriptor
2165           getDescriptorForType() {
2166         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
2167       }
2168 
2169       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp getDefaultInstanceForType() {
2170         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance();
2171       }
2172 
2173       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp build() {
2174         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial();
2175         if (!result.isInitialized()) {
2176           throw newUninitializedMessageException(result);
2177         }
2178         return result;
2179       }
2180 
2181       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildPartial() {
2182         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp(this);
2183         int from_bitField0_ = bitField0_;
2184         int to_bitField0_ = 0;
2185         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2186           to_bitField0_ |= 0x00000001;
2187         }
2188         result.startDate_ = startDate_;
2189         result.bitField0_ = to_bitField0_;
2190         onBuilt();
2191         return result;
2192       }
2193 
2194       public Builder mergeFrom(com.google.protobuf.Message other) {
2195         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) {
2196           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)other);
2197         } else {
2198           super.mergeFrom(other);
2199           return this;
2200         }
2201       }
2202 
2203       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other) {
2204         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance()) return this;
2205         if (other.hasStartDate()) {
2206           bitField0_ |= 0x00000001;
2207           startDate_ = other.startDate_;
2208           onChanged();
2209         }
2210         this.mergeUnknownFields(other.getUnknownFields());
2211         return this;
2212       }
2213 
2214       public final boolean isInitialized() {
2215         if (!hasStartDate()) {
2216           
2217           return false;
2218         }
2219         return true;
2220       }
2221 
2222       public Builder mergeFrom(
2223           com.google.protobuf.CodedInputStream input,
2224           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2225           throws java.io.IOException {
2226         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parsedMessage = null;
2227         try {
2228           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2229         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2230           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) e.getUnfinishedMessage();
2231           throw e;
2232         } finally {
2233           if (parsedMessage != null) {
2234             mergeFrom(parsedMessage);
2235           }
2236         }
2237         return this;
2238       }
2239       private int bitField0_;
2240 
2241       // required string start_date = 1;
2242       private java.lang.Object startDate_ = "";
2243       /**
2244        * <code>required string start_date = 1;</code>
2245        *
2246        * <pre>
2247        * If this znode is present, cluster is up.  Currently
2248        * the data is cluster start_date.
2249        * </pre>
2250        */
2251       public boolean hasStartDate() {
2252         return ((bitField0_ & 0x00000001) == 0x00000001);
2253       }
2254       /**
2255        * <code>required string start_date = 1;</code>
2256        *
2257        * <pre>
2258        * If this znode is present, cluster is up.  Currently
2259        * the data is cluster start_date.
2260        * </pre>
2261        */
2262       public java.lang.String getStartDate() {
2263         java.lang.Object ref = startDate_;
2264         if (!(ref instanceof java.lang.String)) {
2265           java.lang.String s = ((com.google.protobuf.ByteString) ref)
2266               .toStringUtf8();
2267           startDate_ = s;
2268           return s;
2269         } else {
2270           return (java.lang.String) ref;
2271         }
2272       }
2273       /**
2274        * <code>required string start_date = 1;</code>
2275        *
2276        * <pre>
2277        * If this znode is present, cluster is up.  Currently
2278        * the data is cluster start_date.
2279        * </pre>
2280        */
2281       public com.google.protobuf.ByteString
2282           getStartDateBytes() {
2283         java.lang.Object ref = startDate_;
2284         if (ref instanceof String) {
2285           com.google.protobuf.ByteString b = 
2286               com.google.protobuf.ByteString.copyFromUtf8(
2287                   (java.lang.String) ref);
2288           startDate_ = b;
2289           return b;
2290         } else {
2291           return (com.google.protobuf.ByteString) ref;
2292         }
2293       }
2294       /**
2295        * <code>required string start_date = 1;</code>
2296        *
2297        * <pre>
2298        * If this znode is present, cluster is up.  Currently
2299        * the data is cluster start_date.
2300        * </pre>
2301        */
2302       public Builder setStartDate(
2303           java.lang.String value) {
2304         if (value == null) {
2305     throw new NullPointerException();
2306   }
2307   bitField0_ |= 0x00000001;
2308         startDate_ = value;
2309         onChanged();
2310         return this;
2311       }
2312       /**
2313        * <code>required string start_date = 1;</code>
2314        *
2315        * <pre>
2316        * If this znode is present, cluster is up.  Currently
2317        * the data is cluster start_date.
2318        * </pre>
2319        */
2320       public Builder clearStartDate() {
2321         bitField0_ = (bitField0_ & ~0x00000001);
2322         startDate_ = getDefaultInstance().getStartDate();
2323         onChanged();
2324         return this;
2325       }
2326       /**
2327        * <code>required string start_date = 1;</code>
2328        *
2329        * <pre>
2330        * If this znode is present, cluster is up.  Currently
2331        * the data is cluster start_date.
2332        * </pre>
2333        */
2334       public Builder setStartDateBytes(
2335           com.google.protobuf.ByteString value) {
2336         if (value == null) {
2337     throw new NullPointerException();
2338   }
2339   bitField0_ |= 0x00000001;
2340         startDate_ = value;
2341         onChanged();
2342         return this;
2343       }
2344 
2345       // @@protoc_insertion_point(builder_scope:hbase.pb.ClusterUp)
2346     }
2347 
2348     static {
2349       defaultInstance = new ClusterUp(true);
2350       defaultInstance.initFields();
2351     }
2352 
2353     // @@protoc_insertion_point(class_scope:hbase.pb.ClusterUp)
2354   }
2355 
2356   public interface SplitLogTaskOrBuilder
2357       extends com.google.protobuf.MessageOrBuilder {
2358 
2359     // required .hbase.pb.SplitLogTask.State state = 1;
2360     /**
2361      * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
2362      */
2363     boolean hasState();
2364     /**
2365      * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
2366      */
2367     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState();
2368 
2369     // required .hbase.pb.ServerName server_name = 2;
2370     /**
2371      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2372      */
2373     boolean hasServerName();
2374     /**
2375      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2376      */
2377     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
2378     /**
2379      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2380      */
2381     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
2382 
2383     // optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
2384     /**
2385      * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
2386      */
2387     boolean hasMode();
2388     /**
2389      * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
2390      */
2391     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode();
2392   }
2393   /**
2394    * Protobuf type {@code hbase.pb.SplitLogTask}
2395    *
2396    * <pre>
2397    **
2398    * WAL SplitLog directory znodes have this for content.  Used doing distributed
2399    * WAL splitting.  Holds current state and name of server that originated split.
2400    * </pre>
2401    */
2402   public static final class SplitLogTask extends
2403       com.google.protobuf.GeneratedMessage
2404       implements SplitLogTaskOrBuilder {
2405     // Use SplitLogTask.newBuilder() to construct.
2406     private SplitLogTask(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2407       super(builder);
2408       this.unknownFields = builder.getUnknownFields();
2409     }
2410     private SplitLogTask(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2411 
2412     private static final SplitLogTask defaultInstance;
2413     public static SplitLogTask getDefaultInstance() {
2414       return defaultInstance;
2415     }
2416 
2417     public SplitLogTask getDefaultInstanceForType() {
2418       return defaultInstance;
2419     }
2420 
2421     private final com.google.protobuf.UnknownFieldSet unknownFields;
2422     @java.lang.Override
2423     public final com.google.protobuf.UnknownFieldSet
2424         getUnknownFields() {
2425       return this.unknownFields;
2426     }
2427     private SplitLogTask(
2428         com.google.protobuf.CodedInputStream input,
2429         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2430         throws com.google.protobuf.InvalidProtocolBufferException {
2431       initFields();
2432       int mutable_bitField0_ = 0;
2433       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2434           com.google.protobuf.UnknownFieldSet.newBuilder();
2435       try {
2436         boolean done = false;
2437         while (!done) {
2438           int tag = input.readTag();
2439           switch (tag) {
2440             case 0:
2441               done = true;
2442               break;
2443             default: {
2444               if (!parseUnknownField(input, unknownFields,
2445                                      extensionRegistry, tag)) {
2446                 done = true;
2447               }
2448               break;
2449             }
2450             case 8: {
2451               int rawValue = input.readEnum();
2452               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.valueOf(rawValue);
2453               if (value == null) {
2454                 unknownFields.mergeVarintField(1, rawValue);
2455               } else {
2456                 bitField0_ |= 0x00000001;
2457                 state_ = value;
2458               }
2459               break;
2460             }
2461             case 18: {
2462               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
2463               if (((bitField0_ & 0x00000002) == 0x00000002)) {
2464                 subBuilder = serverName_.toBuilder();
2465               }
2466               serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
2467               if (subBuilder != null) {
2468                 subBuilder.mergeFrom(serverName_);
2469                 serverName_ = subBuilder.buildPartial();
2470               }
2471               bitField0_ |= 0x00000002;
2472               break;
2473             }
2474             case 24: {
2475               int rawValue = input.readEnum();
2476               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.valueOf(rawValue);
2477               if (value == null) {
2478                 unknownFields.mergeVarintField(3, rawValue);
2479               } else {
2480                 bitField0_ |= 0x00000004;
2481                 mode_ = value;
2482               }
2483               break;
2484             }
2485           }
2486         }
2487       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2488         throw e.setUnfinishedMessage(this);
2489       } catch (java.io.IOException e) {
2490         throw new com.google.protobuf.InvalidProtocolBufferException(
2491             e.getMessage()).setUnfinishedMessage(this);
2492       } finally {
2493         this.unknownFields = unknownFields.build();
2494         makeExtensionsImmutable();
2495       }
2496     }
2497     public static final com.google.protobuf.Descriptors.Descriptor
2498         getDescriptor() {
2499       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
2500     }
2501 
2502     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2503         internalGetFieldAccessorTable() {
2504       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_fieldAccessorTable
2505           .ensureFieldAccessorsInitialized(
2506               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
2507     }
2508 
2509     public static com.google.protobuf.Parser<SplitLogTask> PARSER =
2510         new com.google.protobuf.AbstractParser<SplitLogTask>() {
2511       public SplitLogTask parsePartialFrom(
2512           com.google.protobuf.CodedInputStream input,
2513           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2514           throws com.google.protobuf.InvalidProtocolBufferException {
2515         return new SplitLogTask(input, extensionRegistry);
2516       }
2517     };
2518 
2519     @java.lang.Override
2520     public com.google.protobuf.Parser<SplitLogTask> getParserForType() {
2521       return PARSER;
2522     }
2523 
2524     /**
2525      * Protobuf enum {@code hbase.pb.SplitLogTask.State}
2526      */
2527     public enum State
2528         implements com.google.protobuf.ProtocolMessageEnum {
2529       /**
2530        * <code>UNASSIGNED = 0;</code>
2531        */
2532       UNASSIGNED(0, 0),
2533       /**
2534        * <code>OWNED = 1;</code>
2535        */
2536       OWNED(1, 1),
2537       /**
2538        * <code>RESIGNED = 2;</code>
2539        */
2540       RESIGNED(2, 2),
2541       /**
2542        * <code>DONE = 3;</code>
2543        */
2544       DONE(3, 3),
2545       /**
2546        * <code>ERR = 4;</code>
2547        */
2548       ERR(4, 4),
2549       ;
2550 
2551       /**
2552        * <code>UNASSIGNED = 0;</code>
2553        */
2554       public static final int UNASSIGNED_VALUE = 0;
2555       /**
2556        * <code>OWNED = 1;</code>
2557        */
2558       public static final int OWNED_VALUE = 1;
2559       /**
2560        * <code>RESIGNED = 2;</code>
2561        */
2562       public static final int RESIGNED_VALUE = 2;
2563       /**
2564        * <code>DONE = 3;</code>
2565        */
2566       public static final int DONE_VALUE = 3;
2567       /**
2568        * <code>ERR = 4;</code>
2569        */
2570       public static final int ERR_VALUE = 4;
2571 
2572 
2573       public final int getNumber() { return value; }
2574 
2575       public static State valueOf(int value) {
2576         switch (value) {
2577           case 0: return UNASSIGNED;
2578           case 1: return OWNED;
2579           case 2: return RESIGNED;
2580           case 3: return DONE;
2581           case 4: return ERR;
2582           default: return null;
2583         }
2584       }
2585 
2586       public static com.google.protobuf.Internal.EnumLiteMap<State>
2587           internalGetValueMap() {
2588         return internalValueMap;
2589       }
2590       private static com.google.protobuf.Internal.EnumLiteMap<State>
2591           internalValueMap =
2592             new com.google.protobuf.Internal.EnumLiteMap<State>() {
2593               public State findValueByNumber(int number) {
2594                 return State.valueOf(number);
2595               }
2596             };
2597 
2598       public final com.google.protobuf.Descriptors.EnumValueDescriptor
2599           getValueDescriptor() {
2600         return getDescriptor().getValues().get(index);
2601       }
2602       public final com.google.protobuf.Descriptors.EnumDescriptor
2603           getDescriptorForType() {
2604         return getDescriptor();
2605       }
2606       public static final com.google.protobuf.Descriptors.EnumDescriptor
2607           getDescriptor() {
2608         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(0);
2609       }
2610 
2611       private static final State[] VALUES = values();
2612 
2613       public static State valueOf(
2614           com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
2615         if (desc.getType() != getDescriptor()) {
2616           throw new java.lang.IllegalArgumentException(
2617             "EnumValueDescriptor is not for this type.");
2618         }
2619         return VALUES[desc.getIndex()];
2620       }
2621 
2622       private final int index;
2623       private final int value;
2624 
2625       private State(int index, int value) {
2626         this.index = index;
2627         this.value = value;
2628       }
2629 
2630       // @@protoc_insertion_point(enum_scope:hbase.pb.SplitLogTask.State)
2631     }
2632 
2633     /**
2634      * Protobuf enum {@code hbase.pb.SplitLogTask.RecoveryMode}
2635      */
2636     public enum RecoveryMode
2637         implements com.google.protobuf.ProtocolMessageEnum {
2638       /**
2639        * <code>UNKNOWN = 0;</code>
2640        */
2641       UNKNOWN(0, 0),
2642       /**
2643        * <code>LOG_SPLITTING = 1;</code>
2644        */
2645       LOG_SPLITTING(1, 1),
2646       /**
2647        * <code>LOG_REPLAY = 2;</code>
2648        */
2649       LOG_REPLAY(2, 2),
2650       ;
2651 
2652       /**
2653        * <code>UNKNOWN = 0;</code>
2654        */
2655       public static final int UNKNOWN_VALUE = 0;
2656       /**
2657        * <code>LOG_SPLITTING = 1;</code>
2658        */
2659       public static final int LOG_SPLITTING_VALUE = 1;
2660       /**
2661        * <code>LOG_REPLAY = 2;</code>
2662        */
2663       public static final int LOG_REPLAY_VALUE = 2;
2664 
2665 
2666       public final int getNumber() { return value; }
2667 
2668       public static RecoveryMode valueOf(int value) {
2669         switch (value) {
2670           case 0: return UNKNOWN;
2671           case 1: return LOG_SPLITTING;
2672           case 2: return LOG_REPLAY;
2673           default: return null;
2674         }
2675       }
2676 
2677       public static com.google.protobuf.Internal.EnumLiteMap<RecoveryMode>
2678           internalGetValueMap() {
2679         return internalValueMap;
2680       }
2681       private static com.google.protobuf.Internal.EnumLiteMap<RecoveryMode>
2682           internalValueMap =
2683             new com.google.protobuf.Internal.EnumLiteMap<RecoveryMode>() {
2684               public RecoveryMode findValueByNumber(int number) {
2685                 return RecoveryMode.valueOf(number);
2686               }
2687             };
2688 
2689       public final com.google.protobuf.Descriptors.EnumValueDescriptor
2690           getValueDescriptor() {
2691         return getDescriptor().getValues().get(index);
2692       }
2693       public final com.google.protobuf.Descriptors.EnumDescriptor
2694           getDescriptorForType() {
2695         return getDescriptor();
2696       }
2697       public static final com.google.protobuf.Descriptors.EnumDescriptor
2698           getDescriptor() {
2699         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(1);
2700       }
2701 
2702       private static final RecoveryMode[] VALUES = values();
2703 
2704       public static RecoveryMode valueOf(
2705           com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
2706         if (desc.getType() != getDescriptor()) {
2707           throw new java.lang.IllegalArgumentException(
2708             "EnumValueDescriptor is not for this type.");
2709         }
2710         return VALUES[desc.getIndex()];
2711       }
2712 
2713       private final int index;
2714       private final int value;
2715 
2716       private RecoveryMode(int index, int value) {
2717         this.index = index;
2718         this.value = value;
2719       }
2720 
2721       // @@protoc_insertion_point(enum_scope:hbase.pb.SplitLogTask.RecoveryMode)
2722     }
2723 
2724     private int bitField0_;
2725     // required .hbase.pb.SplitLogTask.State state = 1;
2726     public static final int STATE_FIELD_NUMBER = 1;
2727     private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_;
2728     /**
2729      * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
2730      */
2731     public boolean hasState() {
2732       return ((bitField0_ & 0x00000001) == 0x00000001);
2733     }
2734     /**
2735      * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
2736      */
2737     public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
2738       return state_;
2739     }
2740 
2741     // required .hbase.pb.ServerName server_name = 2;
2742     public static final int SERVER_NAME_FIELD_NUMBER = 2;
2743     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
2744     /**
2745      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2746      */
2747     public boolean hasServerName() {
2748       return ((bitField0_ & 0x00000002) == 0x00000002);
2749     }
2750     /**
2751      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2752      */
2753     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
2754       return serverName_;
2755     }
2756     /**
2757      * <code>required .hbase.pb.ServerName server_name = 2;</code>
2758      */
2759     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
2760       return serverName_;
2761     }
2762 
2763     // optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
2764     public static final int MODE_FIELD_NUMBER = 3;
2765     private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode mode_;
2766     /**
2767      * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
2768      */
2769     public boolean hasMode() {
2770       return ((bitField0_ & 0x00000004) == 0x00000004);
2771     }
2772     /**
2773      * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
2774      */
2775     public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode() {
2776       return mode_;
2777     }
2778 
2779     private void initFields() {
2780       state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
2781       serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
2782       mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN;
2783     }
2784     private byte memoizedIsInitialized = -1;
2785     public final boolean isInitialized() {
2786       byte isInitialized = memoizedIsInitialized;
2787       if (isInitialized != -1) return isInitialized == 1;
2788 
2789       if (!hasState()) {
2790         memoizedIsInitialized = 0;
2791         return false;
2792       }
2793       if (!hasServerName()) {
2794         memoizedIsInitialized = 0;
2795         return false;
2796       }
2797       if (!getServerName().isInitialized()) {
2798         memoizedIsInitialized = 0;
2799         return false;
2800       }
2801       memoizedIsInitialized = 1;
2802       return true;
2803     }
2804 
2805     public void writeTo(com.google.protobuf.CodedOutputStream output)
2806                         throws java.io.IOException {
2807       getSerializedSize();
2808       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2809         output.writeEnum(1, state_.getNumber());
2810       }
2811       if (((bitField0_ & 0x00000002) == 0x00000002)) {
2812         output.writeMessage(2, serverName_);
2813       }
2814       if (((bitField0_ & 0x00000004) == 0x00000004)) {
2815         output.writeEnum(3, mode_.getNumber());
2816       }
2817       getUnknownFields().writeTo(output);
2818     }
2819 
2820     private int memoizedSerializedSize = -1;
2821     public int getSerializedSize() {
2822       int size = memoizedSerializedSize;
2823       if (size != -1) return size;
2824 
2825       size = 0;
2826       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2827         size += com.google.protobuf.CodedOutputStream
2828           .computeEnumSize(1, state_.getNumber());
2829       }
2830       if (((bitField0_ & 0x00000002) == 0x00000002)) {
2831         size += com.google.protobuf.CodedOutputStream
2832           .computeMessageSize(2, serverName_);
2833       }
2834       if (((bitField0_ & 0x00000004) == 0x00000004)) {
2835         size += com.google.protobuf.CodedOutputStream
2836           .computeEnumSize(3, mode_.getNumber());
2837       }
2838       size += getUnknownFields().getSerializedSize();
2839       memoizedSerializedSize = size;
2840       return size;
2841     }
2842 
2843     private static final long serialVersionUID = 0L;
2844     @java.lang.Override
2845     protected java.lang.Object writeReplace()
2846         throws java.io.ObjectStreamException {
2847       return super.writeReplace();
2848     }
2849 
2850     @java.lang.Override
2851     public boolean equals(final java.lang.Object obj) {
2852       if (obj == this) {
2853        return true;
2854       }
2855       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)) {
2856         return super.equals(obj);
2857       }
2858       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) obj;
2859 
2860       boolean result = true;
2861       result = result && (hasState() == other.hasState());
2862       if (hasState()) {
2863         result = result &&
2864             (getState() == other.getState());
2865       }
2866       result = result && (hasServerName() == other.hasServerName());
2867       if (hasServerName()) {
2868         result = result && getServerName()
2869             .equals(other.getServerName());
2870       }
2871       result = result && (hasMode() == other.hasMode());
2872       if (hasMode()) {
2873         result = result &&
2874             (getMode() == other.getMode());
2875       }
2876       result = result &&
2877           getUnknownFields().equals(other.getUnknownFields());
2878       return result;
2879     }
2880 
2881     private int memoizedHashCode = 0;
2882     @java.lang.Override
2883     public int hashCode() {
2884       if (memoizedHashCode != 0) {
2885         return memoizedHashCode;
2886       }
2887       int hash = 41;
2888       hash = (19 * hash) + getDescriptorForType().hashCode();
2889       if (hasState()) {
2890         hash = (37 * hash) + STATE_FIELD_NUMBER;
2891         hash = (53 * hash) + hashEnum(getState());
2892       }
2893       if (hasServerName()) {
2894         hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
2895         hash = (53 * hash) + getServerName().hashCode();
2896       }
2897       if (hasMode()) {
2898         hash = (37 * hash) + MODE_FIELD_NUMBER;
2899         hash = (53 * hash) + hashEnum(getMode());
2900       }
2901       hash = (29 * hash) + getUnknownFields().hashCode();
2902       memoizedHashCode = hash;
2903       return hash;
2904     }
2905 
2906     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2907         com.google.protobuf.ByteString data)
2908         throws com.google.protobuf.InvalidProtocolBufferException {
2909       return PARSER.parseFrom(data);
2910     }
2911     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2912         com.google.protobuf.ByteString data,
2913         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2914         throws com.google.protobuf.InvalidProtocolBufferException {
2915       return PARSER.parseFrom(data, extensionRegistry);
2916     }
2917     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(byte[] data)
2918         throws com.google.protobuf.InvalidProtocolBufferException {
2919       return PARSER.parseFrom(data);
2920     }
2921     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2922         byte[] data,
2923         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2924         throws com.google.protobuf.InvalidProtocolBufferException {
2925       return PARSER.parseFrom(data, extensionRegistry);
2926     }
2927     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(java.io.InputStream input)
2928         throws java.io.IOException {
2929       return PARSER.parseFrom(input);
2930     }
2931     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2932         java.io.InputStream input,
2933         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2934         throws java.io.IOException {
2935       return PARSER.parseFrom(input, extensionRegistry);
2936     }
2937     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(java.io.InputStream input)
2938         throws java.io.IOException {
2939       return PARSER.parseDelimitedFrom(input);
2940     }
2941     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(
2942         java.io.InputStream input,
2943         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2944         throws java.io.IOException {
2945       return PARSER.parseDelimitedFrom(input, extensionRegistry);
2946     }
2947     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2948         com.google.protobuf.CodedInputStream input)
2949         throws java.io.IOException {
2950       return PARSER.parseFrom(input);
2951     }
2952     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
2953         com.google.protobuf.CodedInputStream input,
2954         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2955         throws java.io.IOException {
2956       return PARSER.parseFrom(input, extensionRegistry);
2957     }
2958 
2959     public static Builder newBuilder() { return Builder.create(); }
2960     public Builder newBuilderForType() { return newBuilder(); }
2961     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask prototype) {
2962       return newBuilder().mergeFrom(prototype);
2963     }
2964     public Builder toBuilder() { return newBuilder(this); }
2965 
2966     @java.lang.Override
2967     protected Builder newBuilderForType(
2968         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2969       Builder builder = new Builder(parent);
2970       return builder;
2971     }
2972     /**
2973      * Protobuf type {@code hbase.pb.SplitLogTask}
2974      *
2975      * <pre>
2976      **
2977      * WAL SplitLog directory znodes have this for content.  Used doing distributed
2978      * WAL splitting.  Holds current state and name of server that originated split.
2979      * </pre>
2980      */
2981     public static final class Builder extends
2982         com.google.protobuf.GeneratedMessage.Builder<Builder>
2983        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTaskOrBuilder {
2984       public static final com.google.protobuf.Descriptors.Descriptor
2985           getDescriptor() {
2986         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
2987       }
2988 
2989       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2990           internalGetFieldAccessorTable() {
2991         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_fieldAccessorTable
2992             .ensureFieldAccessorsInitialized(
2993                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
2994       }
2995 
2996       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.newBuilder()
2997       private Builder() {
2998         maybeForceBuilderInitialization();
2999       }
3000 
3001       private Builder(
3002           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3003         super(parent);
3004         maybeForceBuilderInitialization();
3005       }
3006       private void maybeForceBuilderInitialization() {
3007         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3008           getServerNameFieldBuilder();
3009         }
3010       }
3011       private static Builder create() {
3012         return new Builder();
3013       }
3014 
3015       public Builder clear() {
3016         super.clear();
3017         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
3018         bitField0_ = (bitField0_ & ~0x00000001);
3019         if (serverNameBuilder_ == null) {
3020           serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
3021         } else {
3022           serverNameBuilder_.clear();
3023         }
3024         bitField0_ = (bitField0_ & ~0x00000002);
3025         mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN;
3026         bitField0_ = (bitField0_ & ~0x00000004);
3027         return this;
3028       }
3029 
3030       public Builder clone() {
3031         return create().mergeFrom(buildPartial());
3032       }
3033 
3034       public com.google.protobuf.Descriptors.Descriptor
3035           getDescriptorForType() {
3036         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
3037       }
3038 
3039       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask getDefaultInstanceForType() {
3040         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance();
3041       }
3042 
3043       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask build() {
3044         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial();
3045         if (!result.isInitialized()) {
3046           throw newUninitializedMessageException(result);
3047         }
3048         return result;
3049       }
3050 
3051       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildPartial() {
3052         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask(this);
3053         int from_bitField0_ = bitField0_;
3054         int to_bitField0_ = 0;
3055         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3056           to_bitField0_ |= 0x00000001;
3057         }
3058         result.state_ = state_;
3059         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3060           to_bitField0_ |= 0x00000002;
3061         }
3062         if (serverNameBuilder_ == null) {
3063           result.serverName_ = serverName_;
3064         } else {
3065           result.serverName_ = serverNameBuilder_.build();
3066         }
3067         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3068           to_bitField0_ |= 0x00000004;
3069         }
3070         result.mode_ = mode_;
3071         result.bitField0_ = to_bitField0_;
3072         onBuilt();
3073         return result;
3074       }
3075 
3076       public Builder mergeFrom(com.google.protobuf.Message other) {
3077         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) {
3078           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)other);
3079         } else {
3080           super.mergeFrom(other);
3081           return this;
3082         }
3083       }
3084 
3085       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other) {
3086         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance()) return this;
3087         if (other.hasState()) {
3088           setState(other.getState());
3089         }
3090         if (other.hasServerName()) {
3091           mergeServerName(other.getServerName());
3092         }
3093         if (other.hasMode()) {
3094           setMode(other.getMode());
3095         }
3096         this.mergeUnknownFields(other.getUnknownFields());
3097         return this;
3098       }
3099 
3100       public final boolean isInitialized() {
3101         if (!hasState()) {
3102           
3103           return false;
3104         }
3105         if (!hasServerName()) {
3106           
3107           return false;
3108         }
3109         if (!getServerName().isInitialized()) {
3110           
3111           return false;
3112         }
3113         return true;
3114       }
3115 
3116       public Builder mergeFrom(
3117           com.google.protobuf.CodedInputStream input,
3118           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3119           throws java.io.IOException {
3120         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parsedMessage = null;
3121         try {
3122           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3123         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3124           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) e.getUnfinishedMessage();
3125           throw e;
3126         } finally {
3127           if (parsedMessage != null) {
3128             mergeFrom(parsedMessage);
3129           }
3130         }
3131         return this;
3132       }
3133       private int bitField0_;
3134 
3135       // required .hbase.pb.SplitLogTask.State state = 1;
3136       private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
3137       /**
3138        * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
3139        */
3140       public boolean hasState() {
3141         return ((bitField0_ & 0x00000001) == 0x00000001);
3142       }
3143       /**
3144        * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
3145        */
3146       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
3147         return state_;
3148       }
3149       /**
3150        * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
3151        */
3152       public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value) {
3153         if (value == null) {
3154           throw new NullPointerException();
3155         }
3156         bitField0_ |= 0x00000001;
3157         state_ = value;
3158         onChanged();
3159         return this;
3160       }
3161       /**
3162        * <code>required .hbase.pb.SplitLogTask.State state = 1;</code>
3163        */
3164       public Builder clearState() {
3165         bitField0_ = (bitField0_ & ~0x00000001);
3166         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
3167         onChanged();
3168         return this;
3169       }
3170 
3171       // required .hbase.pb.ServerName server_name = 2;
3172       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
3173       private com.google.protobuf.SingleFieldBuilder<
3174           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
3175       /**
3176        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3177        */
3178       public boolean hasServerName() {
3179         return ((bitField0_ & 0x00000002) == 0x00000002);
3180       }
3181       /**
3182        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3183        */
3184       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
3185         if (serverNameBuilder_ == null) {
3186           return serverName_;
3187         } else {
3188           return serverNameBuilder_.getMessage();
3189         }
3190       }
3191       /**
3192        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3193        */
3194       public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
3195         if (serverNameBuilder_ == null) {
3196           if (value == null) {
3197             throw new NullPointerException();
3198           }
3199           serverName_ = value;
3200           onChanged();
3201         } else {
3202           serverNameBuilder_.setMessage(value);
3203         }
3204         bitField0_ |= 0x00000002;
3205         return this;
3206       }
3207       /**
3208        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3209        */
3210       public Builder setServerName(
3211           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
3212         if (serverNameBuilder_ == null) {
3213           serverName_ = builderForValue.build();
3214           onChanged();
3215         } else {
3216           serverNameBuilder_.setMessage(builderForValue.build());
3217         }
3218         bitField0_ |= 0x00000002;
3219         return this;
3220       }
3221       /**
3222        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3223        */
3224       public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
3225         if (serverNameBuilder_ == null) {
3226           if (((bitField0_ & 0x00000002) == 0x00000002) &&
3227               serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
3228             serverName_ =
3229               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
3230           } else {
3231             serverName_ = value;
3232           }
3233           onChanged();
3234         } else {
3235           serverNameBuilder_.mergeFrom(value);
3236         }
3237         bitField0_ |= 0x00000002;
3238         return this;
3239       }
3240       /**
3241        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3242        */
3243       public Builder clearServerName() {
3244         if (serverNameBuilder_ == null) {
3245           serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
3246           onChanged();
3247         } else {
3248           serverNameBuilder_.clear();
3249         }
3250         bitField0_ = (bitField0_ & ~0x00000002);
3251         return this;
3252       }
3253       /**
3254        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3255        */
3256       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
3257         bitField0_ |= 0x00000002;
3258         onChanged();
3259         return getServerNameFieldBuilder().getBuilder();
3260       }
3261       /**
3262        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3263        */
3264       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
3265         if (serverNameBuilder_ != null) {
3266           return serverNameBuilder_.getMessageOrBuilder();
3267         } else {
3268           return serverName_;
3269         }
3270       }
3271       /**
3272        * <code>required .hbase.pb.ServerName server_name = 2;</code>
3273        */
3274       private com.google.protobuf.SingleFieldBuilder<
3275           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
3276           getServerNameFieldBuilder() {
3277         if (serverNameBuilder_ == null) {
3278           serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3279               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
3280                   serverName_,
3281                   getParentForChildren(),
3282                   isClean());
3283           serverName_ = null;
3284         }
3285         return serverNameBuilder_;
3286       }
3287 
3288       // optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
3289       private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN;
3290       /**
3291        * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
3292        */
3293       public boolean hasMode() {
3294         return ((bitField0_ & 0x00000004) == 0x00000004);
3295       }
3296       /**
3297        * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
3298        */
3299       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode() {
3300         return mode_;
3301       }
3302       /**
3303        * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
3304        */
3305       public Builder setMode(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode value) {
3306         if (value == null) {
3307           throw new NullPointerException();
3308         }
3309         bitField0_ |= 0x00000004;
3310         mode_ = value;
3311         onChanged();
3312         return this;
3313       }
3314       /**
3315        * <code>optional .hbase.pb.SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];</code>
3316        */
3317       public Builder clearMode() {
3318         bitField0_ = (bitField0_ & ~0x00000004);
3319         mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN;
3320         onChanged();
3321         return this;
3322       }
3323 
3324       // @@protoc_insertion_point(builder_scope:hbase.pb.SplitLogTask)
3325     }
3326 
3327     static {
3328       defaultInstance = new SplitLogTask(true);
3329       defaultInstance.initFields();
3330     }
3331 
3332     // @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask)
3333   }
3334 
3335   public interface DeprecatedTableStateOrBuilder
3336       extends com.google.protobuf.MessageOrBuilder {
3337 
3338     // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
3339     /**
3340      * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3341      *
3342      * <pre>
3343      * This is the table's state.  If no znode for a table,
3344      * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3345      * for more.
3346      * </pre>
3347      */
3348     boolean hasState();
3349     /**
3350      * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3351      *
3352      * <pre>
3353      * This is the table's state.  If no znode for a table,
3354      * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3355      * for more.
3356      * </pre>
3357      */
3358     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState();
3359   }
3360   /**
3361    * Protobuf type {@code hbase.pb.DeprecatedTableState}
3362    *
3363    * <pre>
3364    **
3365    * The znode that holds state of table.
3366    * Deprected, table state is stored in table descriptor on HDFS.
3367    * </pre>
3368    */
3369   public static final class DeprecatedTableState extends
3370       com.google.protobuf.GeneratedMessage
3371       implements DeprecatedTableStateOrBuilder {
3372     // Use DeprecatedTableState.newBuilder() to construct.
3373     private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3374       super(builder);
3375       this.unknownFields = builder.getUnknownFields();
3376     }
3377     private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3378 
3379     private static final DeprecatedTableState defaultInstance;
3380     public static DeprecatedTableState getDefaultInstance() {
3381       return defaultInstance;
3382     }
3383 
3384     public DeprecatedTableState getDefaultInstanceForType() {
3385       return defaultInstance;
3386     }
3387 
3388     private final com.google.protobuf.UnknownFieldSet unknownFields;
3389     @java.lang.Override
3390     public final com.google.protobuf.UnknownFieldSet
3391         getUnknownFields() {
3392       return this.unknownFields;
3393     }
3394     private DeprecatedTableState(
3395         com.google.protobuf.CodedInputStream input,
3396         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3397         throws com.google.protobuf.InvalidProtocolBufferException {
3398       initFields();
3399       int mutable_bitField0_ = 0;
3400       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3401           com.google.protobuf.UnknownFieldSet.newBuilder();
3402       try {
3403         boolean done = false;
3404         while (!done) {
3405           int tag = input.readTag();
3406           switch (tag) {
3407             case 0:
3408               done = true;
3409               break;
3410             default: {
3411               if (!parseUnknownField(input, unknownFields,
3412                                      extensionRegistry, tag)) {
3413                 done = true;
3414               }
3415               break;
3416             }
3417             case 8: {
3418               int rawValue = input.readEnum();
3419               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue);
3420               if (value == null) {
3421                 unknownFields.mergeVarintField(1, rawValue);
3422               } else {
3423                 bitField0_ |= 0x00000001;
3424                 state_ = value;
3425               }
3426               break;
3427             }
3428           }
3429         }
3430       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3431         throw e.setUnfinishedMessage(this);
3432       } catch (java.io.IOException e) {
3433         throw new com.google.protobuf.InvalidProtocolBufferException(
3434             e.getMessage()).setUnfinishedMessage(this);
3435       } finally {
3436         this.unknownFields = unknownFields.build();
3437         makeExtensionsImmutable();
3438       }
3439     }
3440     public static final com.google.protobuf.Descriptors.Descriptor
3441         getDescriptor() {
3442       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
3443     }
3444 
3445     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3446         internalGetFieldAccessorTable() {
3447       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
3448           .ensureFieldAccessorsInitialized(
3449               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
3450     }
3451 
3452     public static com.google.protobuf.Parser<DeprecatedTableState> PARSER =
3453         new com.google.protobuf.AbstractParser<DeprecatedTableState>() {
3454       public DeprecatedTableState parsePartialFrom(
3455           com.google.protobuf.CodedInputStream input,
3456           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3457           throws com.google.protobuf.InvalidProtocolBufferException {
3458         return new DeprecatedTableState(input, extensionRegistry);
3459       }
3460     };
3461 
3462     @java.lang.Override
3463     public com.google.protobuf.Parser<DeprecatedTableState> getParserForType() {
3464       return PARSER;
3465     }
3466 
3467     /**
3468      * Protobuf enum {@code hbase.pb.DeprecatedTableState.State}
3469      *
3470      * <pre>
3471      * Table's current state
3472      * </pre>
3473      */
3474     public enum State
3475         implements com.google.protobuf.ProtocolMessageEnum {
3476       /**
3477        * <code>ENABLED = 0;</code>
3478        */
3479       ENABLED(0, 0),
3480       /**
3481        * <code>DISABLED = 1;</code>
3482        */
3483       DISABLED(1, 1),
3484       /**
3485        * <code>DISABLING = 2;</code>
3486        */
3487       DISABLING(2, 2),
3488       /**
3489        * <code>ENABLING = 3;</code>
3490        */
3491       ENABLING(3, 3),
3492       ;
3493 
3494       /**
3495        * <code>ENABLED = 0;</code>
3496        */
3497       public static final int ENABLED_VALUE = 0;
3498       /**
3499        * <code>DISABLED = 1;</code>
3500        */
3501       public static final int DISABLED_VALUE = 1;
3502       /**
3503        * <code>DISABLING = 2;</code>
3504        */
3505       public static final int DISABLING_VALUE = 2;
3506       /**
3507        * <code>ENABLING = 3;</code>
3508        */
3509       public static final int ENABLING_VALUE = 3;
3510 
3511 
3512       public final int getNumber() { return value; }
3513 
3514       public static State valueOf(int value) {
3515         switch (value) {
3516           case 0: return ENABLED;
3517           case 1: return DISABLED;
3518           case 2: return DISABLING;
3519           case 3: return ENABLING;
3520           default: return null;
3521         }
3522       }
3523 
3524       public static com.google.protobuf.Internal.EnumLiteMap<State>
3525           internalGetValueMap() {
3526         return internalValueMap;
3527       }
3528       private static com.google.protobuf.Internal.EnumLiteMap<State>
3529           internalValueMap =
3530             new com.google.protobuf.Internal.EnumLiteMap<State>() {
3531               public State findValueByNumber(int number) {
3532                 return State.valueOf(number);
3533               }
3534             };
3535 
3536       public final com.google.protobuf.Descriptors.EnumValueDescriptor
3537           getValueDescriptor() {
3538         return getDescriptor().getValues().get(index);
3539       }
3540       public final com.google.protobuf.Descriptors.EnumDescriptor
3541           getDescriptorForType() {
3542         return getDescriptor();
3543       }
3544       public static final com.google.protobuf.Descriptors.EnumDescriptor
3545           getDescriptor() {
3546         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
3547       }
3548 
3549       private static final State[] VALUES = values();
3550 
3551       public static State valueOf(
3552           com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
3553         if (desc.getType() != getDescriptor()) {
3554           throw new java.lang.IllegalArgumentException(
3555             "EnumValueDescriptor is not for this type.");
3556         }
3557         return VALUES[desc.getIndex()];
3558       }
3559 
3560       private final int index;
3561       private final int value;
3562 
3563       private State(int index, int value) {
3564         this.index = index;
3565         this.value = value;
3566       }
3567 
3568       // @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
3569     }
3570 
3571     private int bitField0_;
3572     // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
3573     public static final int STATE_FIELD_NUMBER = 1;
3574     private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
3575     /**
3576      * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3577      *
3578      * <pre>
3579      * This is the table's state.  If no znode for a table,
3580      * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3581      * for more.
3582      * </pre>
3583      */
3584     public boolean hasState() {
3585       return ((bitField0_ & 0x00000001) == 0x00000001);
3586     }
3587     /**
3588      * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3589      *
3590      * <pre>
3591      * This is the table's state.  If no znode for a table,
3592      * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3593      * for more.
3594      * </pre>
3595      */
3596     public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
3597       return state_;
3598     }
3599 
3600     private void initFields() {
3601       state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
3602     }
3603     private byte memoizedIsInitialized = -1;
3604     public final boolean isInitialized() {
3605       byte isInitialized = memoizedIsInitialized;
3606       if (isInitialized != -1) return isInitialized == 1;
3607 
3608       if (!hasState()) {
3609         memoizedIsInitialized = 0;
3610         return false;
3611       }
3612       memoizedIsInitialized = 1;
3613       return true;
3614     }
3615 
3616     public void writeTo(com.google.protobuf.CodedOutputStream output)
3617                         throws java.io.IOException {
3618       getSerializedSize();
3619       if (((bitField0_ & 0x00000001) == 0x00000001)) {
3620         output.writeEnum(1, state_.getNumber());
3621       }
3622       getUnknownFields().writeTo(output);
3623     }
3624 
3625     private int memoizedSerializedSize = -1;
3626     public int getSerializedSize() {
3627       int size = memoizedSerializedSize;
3628       if (size != -1) return size;
3629 
3630       size = 0;
3631       if (((bitField0_ & 0x00000001) == 0x00000001)) {
3632         size += com.google.protobuf.CodedOutputStream
3633           .computeEnumSize(1, state_.getNumber());
3634       }
3635       size += getUnknownFields().getSerializedSize();
3636       memoizedSerializedSize = size;
3637       return size;
3638     }
3639 
3640     private static final long serialVersionUID = 0L;
3641     @java.lang.Override
3642     protected java.lang.Object writeReplace()
3643         throws java.io.ObjectStreamException {
3644       return super.writeReplace();
3645     }
3646 
3647     @java.lang.Override
3648     public boolean equals(final java.lang.Object obj) {
3649       if (obj == this) {
3650        return true;
3651       }
3652       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) {
3653         return super.equals(obj);
3654       }
3655       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj;
3656 
3657       boolean result = true;
3658       result = result && (hasState() == other.hasState());
3659       if (hasState()) {
3660         result = result &&
3661             (getState() == other.getState());
3662       }
3663       result = result &&
3664           getUnknownFields().equals(other.getUnknownFields());
3665       return result;
3666     }
3667 
3668     private int memoizedHashCode = 0;
3669     @java.lang.Override
3670     public int hashCode() {
3671       if (memoizedHashCode != 0) {
3672         return memoizedHashCode;
3673       }
3674       int hash = 41;
3675       hash = (19 * hash) + getDescriptorForType().hashCode();
3676       if (hasState()) {
3677         hash = (37 * hash) + STATE_FIELD_NUMBER;
3678         hash = (53 * hash) + hashEnum(getState());
3679       }
3680       hash = (29 * hash) + getUnknownFields().hashCode();
3681       memoizedHashCode = hash;
3682       return hash;
3683     }
3684 
3685     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3686         com.google.protobuf.ByteString data)
3687         throws com.google.protobuf.InvalidProtocolBufferException {
3688       return PARSER.parseFrom(data);
3689     }
3690     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3691         com.google.protobuf.ByteString data,
3692         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3693         throws com.google.protobuf.InvalidProtocolBufferException {
3694       return PARSER.parseFrom(data, extensionRegistry);
3695     }
3696     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data)
3697         throws com.google.protobuf.InvalidProtocolBufferException {
3698       return PARSER.parseFrom(data);
3699     }
3700     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3701         byte[] data,
3702         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3703         throws com.google.protobuf.InvalidProtocolBufferException {
3704       return PARSER.parseFrom(data, extensionRegistry);
3705     }
3706     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input)
3707         throws java.io.IOException {
3708       return PARSER.parseFrom(input);
3709     }
3710     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3711         java.io.InputStream input,
3712         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3713         throws java.io.IOException {
3714       return PARSER.parseFrom(input, extensionRegistry);
3715     }
3716     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input)
3717         throws java.io.IOException {
3718       return PARSER.parseDelimitedFrom(input);
3719     }
3720     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(
3721         java.io.InputStream input,
3722         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3723         throws java.io.IOException {
3724       return PARSER.parseDelimitedFrom(input, extensionRegistry);
3725     }
3726     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3727         com.google.protobuf.CodedInputStream input)
3728         throws java.io.IOException {
3729       return PARSER.parseFrom(input);
3730     }
3731     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
3732         com.google.protobuf.CodedInputStream input,
3733         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3734         throws java.io.IOException {
3735       return PARSER.parseFrom(input, extensionRegistry);
3736     }
3737 
3738     public static Builder newBuilder() { return Builder.create(); }
3739     public Builder newBuilderForType() { return newBuilder(); }
3740     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) {
3741       return newBuilder().mergeFrom(prototype);
3742     }
3743     public Builder toBuilder() { return newBuilder(this); }
3744 
3745     @java.lang.Override
3746     protected Builder newBuilderForType(
3747         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3748       Builder builder = new Builder(parent);
3749       return builder;
3750     }
3751     /**
3752      * Protobuf type {@code hbase.pb.DeprecatedTableState}
3753      *
3754      * <pre>
3755      **
3756      * The znode that holds state of table.
3757      * Deprected, table state is stored in table descriptor on HDFS.
3758      * </pre>
3759      */
3760     public static final class Builder extends
3761         com.google.protobuf.GeneratedMessage.Builder<Builder>
3762        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder {
3763       public static final com.google.protobuf.Descriptors.Descriptor
3764           getDescriptor() {
3765         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
3766       }
3767 
3768       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3769           internalGetFieldAccessorTable() {
3770         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
3771             .ensureFieldAccessorsInitialized(
3772                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
3773       }
3774 
3775       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder()
3776       private Builder() {
3777         maybeForceBuilderInitialization();
3778       }
3779 
3780       private Builder(
3781           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3782         super(parent);
3783         maybeForceBuilderInitialization();
3784       }
3785       private void maybeForceBuilderInitialization() {
3786         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3787         }
3788       }
3789       private static Builder create() {
3790         return new Builder();
3791       }
3792 
3793       public Builder clear() {
3794         super.clear();
3795         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
3796         bitField0_ = (bitField0_ & ~0x00000001);
3797         return this;
3798       }
3799 
3800       public Builder clone() {
3801         return create().mergeFrom(buildPartial());
3802       }
3803 
3804       public com.google.protobuf.Descriptors.Descriptor
3805           getDescriptorForType() {
3806         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
3807       }
3808 
3809       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() {
3810         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance();
3811       }
3812 
3813       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() {
3814         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial();
3815         if (!result.isInitialized()) {
3816           throw newUninitializedMessageException(result);
3817         }
3818         return result;
3819       }
3820 
3821       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() {
3822         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this);
3823         int from_bitField0_ = bitField0_;
3824         int to_bitField0_ = 0;
3825         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3826           to_bitField0_ |= 0x00000001;
3827         }
3828         result.state_ = state_;
3829         result.bitField0_ = to_bitField0_;
3830         onBuilt();
3831         return result;
3832       }
3833 
3834       public Builder mergeFrom(com.google.protobuf.Message other) {
3835         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) {
3836           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other);
3837         } else {
3838           super.mergeFrom(other);
3839           return this;
3840         }
3841       }
3842 
3843       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) {
3844         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this;
3845         if (other.hasState()) {
3846           setState(other.getState());
3847         }
3848         this.mergeUnknownFields(other.getUnknownFields());
3849         return this;
3850       }
3851 
3852       public final boolean isInitialized() {
3853         if (!hasState()) {
3854           
3855           return false;
3856         }
3857         return true;
3858       }
3859 
3860       public Builder mergeFrom(
3861           com.google.protobuf.CodedInputStream input,
3862           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3863           throws java.io.IOException {
3864         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null;
3865         try {
3866           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3867         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3868           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage();
3869           throw e;
3870         } finally {
3871           if (parsedMessage != null) {
3872             mergeFrom(parsedMessage);
3873           }
3874         }
3875         return this;
3876       }
3877       private int bitField0_;
3878 
3879       // required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
3880       private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
3881       /**
3882        * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3883        *
3884        * <pre>
3885        * This is the table's state.  If no znode for a table,
3886        * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3887        * for more.
3888        * </pre>
3889        */
3890       public boolean hasState() {
3891         return ((bitField0_ & 0x00000001) == 0x00000001);
3892       }
3893       /**
3894        * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3895        *
3896        * <pre>
3897        * This is the table's state.  If no znode for a table,
3898        * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3899        * for more.
3900        * </pre>
3901        */
3902       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
3903         return state_;
3904       }
3905       /**
3906        * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3907        *
3908        * <pre>
3909        * This is the table's state.  If no znode for a table,
3910        * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3911        * for more.
3912        * </pre>
3913        */
3914       public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) {
3915         if (value == null) {
3916           throw new NullPointerException();
3917         }
3918         bitField0_ |= 0x00000001;
3919         state_ = value;
3920         onChanged();
3921         return this;
3922       }
3923       /**
3924        * <code>required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];</code>
3925        *
3926        * <pre>
3927        * This is the table's state.  If no znode for a table,
3928        * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
3929        * for more.
3930        * </pre>
3931        */
3932       public Builder clearState() {
3933         bitField0_ = (bitField0_ & ~0x00000001);
3934         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
3935         onChanged();
3936         return this;
3937       }
3938 
3939       // @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
3940     }
3941 
3942     static {
3943       defaultInstance = new DeprecatedTableState(true);
3944       defaultInstance.initFields();
3945     }
3946 
3947     // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
3948   }
3949 
3950   public interface ReplicationPeerOrBuilder
3951       extends com.google.protobuf.MessageOrBuilder {
3952 
3953     // required string clusterkey = 1;
3954     /**
3955      * <code>required string clusterkey = 1;</code>
3956      *
3957      * <pre>
3958      * clusterkey is the concatenation of the slave cluster's
3959      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
3960      * </pre>
3961      */
3962     boolean hasClusterkey();
3963     /**
3964      * <code>required string clusterkey = 1;</code>
3965      *
3966      * <pre>
3967      * clusterkey is the concatenation of the slave cluster's
3968      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
3969      * </pre>
3970      */
3971     java.lang.String getClusterkey();
3972     /**
3973      * <code>required string clusterkey = 1;</code>
3974      *
3975      * <pre>
3976      * clusterkey is the concatenation of the slave cluster's
3977      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
3978      * </pre>
3979      */
3980     com.google.protobuf.ByteString
3981         getClusterkeyBytes();
3982 
3983     // optional string replicationEndpointImpl = 2;
3984     /**
3985      * <code>optional string replicationEndpointImpl = 2;</code>
3986      */
3987     boolean hasReplicationEndpointImpl();
3988     /**
3989      * <code>optional string replicationEndpointImpl = 2;</code>
3990      */
3991     java.lang.String getReplicationEndpointImpl();
3992     /**
3993      * <code>optional string replicationEndpointImpl = 2;</code>
3994      */
3995     com.google.protobuf.ByteString
3996         getReplicationEndpointImplBytes();
3997 
3998     // repeated .hbase.pb.BytesBytesPair data = 3;
3999     /**
4000      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4001      */
4002     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> 
4003         getDataList();
4004     /**
4005      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4006      */
4007     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index);
4008     /**
4009      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4010      */
4011     int getDataCount();
4012     /**
4013      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4014      */
4015     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> 
4016         getDataOrBuilderList();
4017     /**
4018      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4019      */
4020     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
4021         int index);
4022 
4023     // repeated .hbase.pb.NameStringPair configuration = 4;
4024     /**
4025      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4026      */
4027     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> 
4028         getConfigurationList();
4029     /**
4030      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4031      */
4032     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index);
4033     /**
4034      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4035      */
4036     int getConfigurationCount();
4037     /**
4038      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4039      */
4040     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
4041         getConfigurationOrBuilderList();
4042     /**
4043      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4044      */
4045     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
4046         int index);
4047   }
4048   /**
4049    * Protobuf type {@code hbase.pb.ReplicationPeer}
4050    *
4051    * <pre>
4052    **
4053    * Used by replication. Holds a replication peer key.
4054    * </pre>
4055    */
4056   public static final class ReplicationPeer extends
4057       com.google.protobuf.GeneratedMessage
4058       implements ReplicationPeerOrBuilder {
4059     // Use ReplicationPeer.newBuilder() to construct.
4060     private ReplicationPeer(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4061       super(builder);
4062       this.unknownFields = builder.getUnknownFields();
4063     }
4064     private ReplicationPeer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4065 
4066     private static final ReplicationPeer defaultInstance;
4067     public static ReplicationPeer getDefaultInstance() {
4068       return defaultInstance;
4069     }
4070 
4071     public ReplicationPeer getDefaultInstanceForType() {
4072       return defaultInstance;
4073     }
4074 
4075     private final com.google.protobuf.UnknownFieldSet unknownFields;
4076     @java.lang.Override
4077     public final com.google.protobuf.UnknownFieldSet
4078         getUnknownFields() {
4079       return this.unknownFields;
4080     }
4081     private ReplicationPeer(
4082         com.google.protobuf.CodedInputStream input,
4083         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4084         throws com.google.protobuf.InvalidProtocolBufferException {
4085       initFields();
4086       int mutable_bitField0_ = 0;
4087       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4088           com.google.protobuf.UnknownFieldSet.newBuilder();
4089       try {
4090         boolean done = false;
4091         while (!done) {
4092           int tag = input.readTag();
4093           switch (tag) {
4094             case 0:
4095               done = true;
4096               break;
4097             default: {
4098               if (!parseUnknownField(input, unknownFields,
4099                                      extensionRegistry, tag)) {
4100                 done = true;
4101               }
4102               break;
4103             }
4104             case 10: {
4105               bitField0_ |= 0x00000001;
4106               clusterkey_ = input.readBytes();
4107               break;
4108             }
4109             case 18: {
4110               bitField0_ |= 0x00000002;
4111               replicationEndpointImpl_ = input.readBytes();
4112               break;
4113             }
4114             case 26: {
4115               if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
4116                 data_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair>();
4117                 mutable_bitField0_ |= 0x00000004;
4118               }
4119               data_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.PARSER, extensionRegistry));
4120               break;
4121             }
4122             case 34: {
4123               if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
4124                 configuration_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>();
4125                 mutable_bitField0_ |= 0x00000008;
4126               }
4127               configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry));
4128               break;
4129             }
4130           }
4131         }
4132       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4133         throw e.setUnfinishedMessage(this);
4134       } catch (java.io.IOException e) {
4135         throw new com.google.protobuf.InvalidProtocolBufferException(
4136             e.getMessage()).setUnfinishedMessage(this);
4137       } finally {
4138         if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
4139           data_ = java.util.Collections.unmodifiableList(data_);
4140         }
4141         if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
4142           configuration_ = java.util.Collections.unmodifiableList(configuration_);
4143         }
4144         this.unknownFields = unknownFields.build();
4145         makeExtensionsImmutable();
4146       }
4147     }
4148     public static final com.google.protobuf.Descriptors.Descriptor
4149         getDescriptor() {
4150       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
4151     }
4152 
4153     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4154         internalGetFieldAccessorTable() {
4155       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable
4156           .ensureFieldAccessorsInitialized(
4157               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class);
4158     }
4159 
4160     public static com.google.protobuf.Parser<ReplicationPeer> PARSER =
4161         new com.google.protobuf.AbstractParser<ReplicationPeer>() {
4162       public ReplicationPeer parsePartialFrom(
4163           com.google.protobuf.CodedInputStream input,
4164           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4165           throws com.google.protobuf.InvalidProtocolBufferException {
4166         return new ReplicationPeer(input, extensionRegistry);
4167       }
4168     };
4169 
4170     @java.lang.Override
4171     public com.google.protobuf.Parser<ReplicationPeer> getParserForType() {
4172       return PARSER;
4173     }
4174 
4175     private int bitField0_;
4176     // required string clusterkey = 1;
4177     public static final int CLUSTERKEY_FIELD_NUMBER = 1;
4178     private java.lang.Object clusterkey_;
4179     /**
4180      * <code>required string clusterkey = 1;</code>
4181      *
4182      * <pre>
4183      * clusterkey is the concatenation of the slave cluster's
4184      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4185      * </pre>
4186      */
4187     public boolean hasClusterkey() {
4188       return ((bitField0_ & 0x00000001) == 0x00000001);
4189     }
4190     /**
4191      * <code>required string clusterkey = 1;</code>
4192      *
4193      * <pre>
4194      * clusterkey is the concatenation of the slave cluster's
4195      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4196      * </pre>
4197      */
4198     public java.lang.String getClusterkey() {
4199       java.lang.Object ref = clusterkey_;
4200       if (ref instanceof java.lang.String) {
4201         return (java.lang.String) ref;
4202       } else {
4203         com.google.protobuf.ByteString bs = 
4204             (com.google.protobuf.ByteString) ref;
4205         java.lang.String s = bs.toStringUtf8();
4206         if (bs.isValidUtf8()) {
4207           clusterkey_ = s;
4208         }
4209         return s;
4210       }
4211     }
4212     /**
4213      * <code>required string clusterkey = 1;</code>
4214      *
4215      * <pre>
4216      * clusterkey is the concatenation of the slave cluster's
4217      * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4218      * </pre>
4219      */
4220     public com.google.protobuf.ByteString
4221         getClusterkeyBytes() {
4222       java.lang.Object ref = clusterkey_;
4223       if (ref instanceof java.lang.String) {
4224         com.google.protobuf.ByteString b = 
4225             com.google.protobuf.ByteString.copyFromUtf8(
4226                 (java.lang.String) ref);
4227         clusterkey_ = b;
4228         return b;
4229       } else {
4230         return (com.google.protobuf.ByteString) ref;
4231       }
4232     }
4233 
4234     // optional string replicationEndpointImpl = 2;
4235     public static final int REPLICATIONENDPOINTIMPL_FIELD_NUMBER = 2;
4236     private java.lang.Object replicationEndpointImpl_;
4237     /**
4238      * <code>optional string replicationEndpointImpl = 2;</code>
4239      */
4240     public boolean hasReplicationEndpointImpl() {
4241       return ((bitField0_ & 0x00000002) == 0x00000002);
4242     }
4243     /**
4244      * <code>optional string replicationEndpointImpl = 2;</code>
4245      */
4246     public java.lang.String getReplicationEndpointImpl() {
4247       java.lang.Object ref = replicationEndpointImpl_;
4248       if (ref instanceof java.lang.String) {
4249         return (java.lang.String) ref;
4250       } else {
4251         com.google.protobuf.ByteString bs = 
4252             (com.google.protobuf.ByteString) ref;
4253         java.lang.String s = bs.toStringUtf8();
4254         if (bs.isValidUtf8()) {
4255           replicationEndpointImpl_ = s;
4256         }
4257         return s;
4258       }
4259     }
4260     /**
4261      * <code>optional string replicationEndpointImpl = 2;</code>
4262      */
4263     public com.google.protobuf.ByteString
4264         getReplicationEndpointImplBytes() {
4265       java.lang.Object ref = replicationEndpointImpl_;
4266       if (ref instanceof java.lang.String) {
4267         com.google.protobuf.ByteString b = 
4268             com.google.protobuf.ByteString.copyFromUtf8(
4269                 (java.lang.String) ref);
4270         replicationEndpointImpl_ = b;
4271         return b;
4272       } else {
4273         return (com.google.protobuf.ByteString) ref;
4274       }
4275     }
4276 
4277     // repeated .hbase.pb.BytesBytesPair data = 3;
4278     public static final int DATA_FIELD_NUMBER = 3;
4279     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> data_;
4280     /**
4281      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4282      */
4283     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> getDataList() {
4284       return data_;
4285     }
4286     /**
4287      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4288      */
4289     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> 
4290         getDataOrBuilderList() {
4291       return data_;
4292     }
4293     /**
4294      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4295      */
4296     public int getDataCount() {
4297       return data_.size();
4298     }
4299     /**
4300      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4301      */
4302     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index) {
4303       return data_.get(index);
4304     }
4305     /**
4306      * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4307      */
4308     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
4309         int index) {
4310       return data_.get(index);
4311     }
4312 
4313     // repeated .hbase.pb.NameStringPair configuration = 4;
4314     public static final int CONFIGURATION_FIELD_NUMBER = 4;
4315     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> configuration_;
4316     /**
4317      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4318      */
4319     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getConfigurationList() {
4320       return configuration_;
4321     }
4322     /**
4323      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4324      */
4325     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
4326         getConfigurationOrBuilderList() {
4327       return configuration_;
4328     }
4329     /**
4330      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4331      */
4332     public int getConfigurationCount() {
4333       return configuration_.size();
4334     }
4335     /**
4336      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4337      */
4338     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) {
4339       return configuration_.get(index);
4340     }
4341     /**
4342      * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
4343      */
4344     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
4345         int index) {
4346       return configuration_.get(index);
4347     }
4348 
4349     private void initFields() {
4350       clusterkey_ = "";
4351       replicationEndpointImpl_ = "";
4352       data_ = java.util.Collections.emptyList();
4353       configuration_ = java.util.Collections.emptyList();
4354     }
4355     private byte memoizedIsInitialized = -1;
4356     public final boolean isInitialized() {
4357       byte isInitialized = memoizedIsInitialized;
4358       if (isInitialized != -1) return isInitialized == 1;
4359 
4360       if (!hasClusterkey()) {
4361         memoizedIsInitialized = 0;
4362         return false;
4363       }
4364       for (int i = 0; i < getDataCount(); i++) {
4365         if (!getData(i).isInitialized()) {
4366           memoizedIsInitialized = 0;
4367           return false;
4368         }
4369       }
4370       for (int i = 0; i < getConfigurationCount(); i++) {
4371         if (!getConfiguration(i).isInitialized()) {
4372           memoizedIsInitialized = 0;
4373           return false;
4374         }
4375       }
4376       memoizedIsInitialized = 1;
4377       return true;
4378     }
4379 
4380     public void writeTo(com.google.protobuf.CodedOutputStream output)
4381                         throws java.io.IOException {
4382       getSerializedSize();
4383       if (((bitField0_ & 0x00000001) == 0x00000001)) {
4384         output.writeBytes(1, getClusterkeyBytes());
4385       }
4386       if (((bitField0_ & 0x00000002) == 0x00000002)) {
4387         output.writeBytes(2, getReplicationEndpointImplBytes());
4388       }
4389       for (int i = 0; i < data_.size(); i++) {
4390         output.writeMessage(3, data_.get(i));
4391       }
4392       for (int i = 0; i < configuration_.size(); i++) {
4393         output.writeMessage(4, configuration_.get(i));
4394       }
4395       getUnknownFields().writeTo(output);
4396     }
4397 
4398     private int memoizedSerializedSize = -1;
4399     public int getSerializedSize() {
4400       int size = memoizedSerializedSize;
4401       if (size != -1) return size;
4402 
4403       size = 0;
4404       if (((bitField0_ & 0x00000001) == 0x00000001)) {
4405         size += com.google.protobuf.CodedOutputStream
4406           .computeBytesSize(1, getClusterkeyBytes());
4407       }
4408       if (((bitField0_ & 0x00000002) == 0x00000002)) {
4409         size += com.google.protobuf.CodedOutputStream
4410           .computeBytesSize(2, getReplicationEndpointImplBytes());
4411       }
4412       for (int i = 0; i < data_.size(); i++) {
4413         size += com.google.protobuf.CodedOutputStream
4414           .computeMessageSize(3, data_.get(i));
4415       }
4416       for (int i = 0; i < configuration_.size(); i++) {
4417         size += com.google.protobuf.CodedOutputStream
4418           .computeMessageSize(4, configuration_.get(i));
4419       }
4420       size += getUnknownFields().getSerializedSize();
4421       memoizedSerializedSize = size;
4422       return size;
4423     }
4424 
4425     private static final long serialVersionUID = 0L;
4426     @java.lang.Override
4427     protected java.lang.Object writeReplace()
4428         throws java.io.ObjectStreamException {
4429       return super.writeReplace();
4430     }
4431 
4432     @java.lang.Override
4433     public boolean equals(final java.lang.Object obj) {
4434       if (obj == this) {
4435        return true;
4436       }
4437       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)) {
4438         return super.equals(obj);
4439       }
4440       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) obj;
4441 
4442       boolean result = true;
4443       result = result && (hasClusterkey() == other.hasClusterkey());
4444       if (hasClusterkey()) {
4445         result = result && getClusterkey()
4446             .equals(other.getClusterkey());
4447       }
4448       result = result && (hasReplicationEndpointImpl() == other.hasReplicationEndpointImpl());
4449       if (hasReplicationEndpointImpl()) {
4450         result = result && getReplicationEndpointImpl()
4451             .equals(other.getReplicationEndpointImpl());
4452       }
4453       result = result && getDataList()
4454           .equals(other.getDataList());
4455       result = result && getConfigurationList()
4456           .equals(other.getConfigurationList());
4457       result = result &&
4458           getUnknownFields().equals(other.getUnknownFields());
4459       return result;
4460     }
4461 
4462     private int memoizedHashCode = 0;
4463     @java.lang.Override
4464     public int hashCode() {
4465       if (memoizedHashCode != 0) {
4466         return memoizedHashCode;
4467       }
4468       int hash = 41;
4469       hash = (19 * hash) + getDescriptorForType().hashCode();
4470       if (hasClusterkey()) {
4471         hash = (37 * hash) + CLUSTERKEY_FIELD_NUMBER;
4472         hash = (53 * hash) + getClusterkey().hashCode();
4473       }
4474       if (hasReplicationEndpointImpl()) {
4475         hash = (37 * hash) + REPLICATIONENDPOINTIMPL_FIELD_NUMBER;
4476         hash = (53 * hash) + getReplicationEndpointImpl().hashCode();
4477       }
4478       if (getDataCount() > 0) {
4479         hash = (37 * hash) + DATA_FIELD_NUMBER;
4480         hash = (53 * hash) + getDataList().hashCode();
4481       }
4482       if (getConfigurationCount() > 0) {
4483         hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER;
4484         hash = (53 * hash) + getConfigurationList().hashCode();
4485       }
4486       hash = (29 * hash) + getUnknownFields().hashCode();
4487       memoizedHashCode = hash;
4488       return hash;
4489     }
4490 
4491     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4492         com.google.protobuf.ByteString data)
4493         throws com.google.protobuf.InvalidProtocolBufferException {
4494       return PARSER.parseFrom(data);
4495     }
4496     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4497         com.google.protobuf.ByteString data,
4498         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4499         throws com.google.protobuf.InvalidProtocolBufferException {
4500       return PARSER.parseFrom(data, extensionRegistry);
4501     }
4502     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(byte[] data)
4503         throws com.google.protobuf.InvalidProtocolBufferException {
4504       return PARSER.parseFrom(data);
4505     }
4506     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4507         byte[] data,
4508         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4509         throws com.google.protobuf.InvalidProtocolBufferException {
4510       return PARSER.parseFrom(data, extensionRegistry);
4511     }
4512     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(java.io.InputStream input)
4513         throws java.io.IOException {
4514       return PARSER.parseFrom(input);
4515     }
4516     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4517         java.io.InputStream input,
4518         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4519         throws java.io.IOException {
4520       return PARSER.parseFrom(input, extensionRegistry);
4521     }
4522     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(java.io.InputStream input)
4523         throws java.io.IOException {
4524       return PARSER.parseDelimitedFrom(input);
4525     }
4526     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(
4527         java.io.InputStream input,
4528         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4529         throws java.io.IOException {
4530       return PARSER.parseDelimitedFrom(input, extensionRegistry);
4531     }
4532     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4533         com.google.protobuf.CodedInputStream input)
4534         throws java.io.IOException {
4535       return PARSER.parseFrom(input);
4536     }
4537     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
4538         com.google.protobuf.CodedInputStream input,
4539         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4540         throws java.io.IOException {
4541       return PARSER.parseFrom(input, extensionRegistry);
4542     }
4543 
4544     public static Builder newBuilder() { return Builder.create(); }
4545     public Builder newBuilderForType() { return newBuilder(); }
4546     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer prototype) {
4547       return newBuilder().mergeFrom(prototype);
4548     }
4549     public Builder toBuilder() { return newBuilder(this); }
4550 
4551     @java.lang.Override
4552     protected Builder newBuilderForType(
4553         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4554       Builder builder = new Builder(parent);
4555       return builder;
4556     }
4557     /**
4558      * Protobuf type {@code hbase.pb.ReplicationPeer}
4559      *
4560      * <pre>
4561      **
4562      * Used by replication. Holds a replication peer key.
4563      * </pre>
4564      */
4565     public static final class Builder extends
4566         com.google.protobuf.GeneratedMessage.Builder<Builder>
4567        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder {
4568       public static final com.google.protobuf.Descriptors.Descriptor
4569           getDescriptor() {
4570         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
4571       }
4572 
4573       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4574           internalGetFieldAccessorTable() {
4575         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable
4576             .ensureFieldAccessorsInitialized(
4577                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class);
4578       }
4579 
4580       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder()
4581       private Builder() {
4582         maybeForceBuilderInitialization();
4583       }
4584 
4585       private Builder(
4586           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4587         super(parent);
4588         maybeForceBuilderInitialization();
4589       }
4590       private void maybeForceBuilderInitialization() {
4591         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4592           getDataFieldBuilder();
4593           getConfigurationFieldBuilder();
4594         }
4595       }
4596       private static Builder create() {
4597         return new Builder();
4598       }
4599 
4600       public Builder clear() {
4601         super.clear();
4602         clusterkey_ = "";
4603         bitField0_ = (bitField0_ & ~0x00000001);
4604         replicationEndpointImpl_ = "";
4605         bitField0_ = (bitField0_ & ~0x00000002);
4606         if (dataBuilder_ == null) {
4607           data_ = java.util.Collections.emptyList();
4608           bitField0_ = (bitField0_ & ~0x00000004);
4609         } else {
4610           dataBuilder_.clear();
4611         }
4612         if (configurationBuilder_ == null) {
4613           configuration_ = java.util.Collections.emptyList();
4614           bitField0_ = (bitField0_ & ~0x00000008);
4615         } else {
4616           configurationBuilder_.clear();
4617         }
4618         return this;
4619       }
4620 
4621       public Builder clone() {
4622         return create().mergeFrom(buildPartial());
4623       }
4624 
4625       public com.google.protobuf.Descriptors.Descriptor
4626           getDescriptorForType() {
4627         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
4628       }
4629 
4630       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer getDefaultInstanceForType() {
4631         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance();
4632       }
4633 
4634       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer build() {
4635         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial();
4636         if (!result.isInitialized()) {
4637           throw newUninitializedMessageException(result);
4638         }
4639         return result;
4640       }
4641 
4642       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildPartial() {
4643         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer(this);
4644         int from_bitField0_ = bitField0_;
4645         int to_bitField0_ = 0;
4646         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4647           to_bitField0_ |= 0x00000001;
4648         }
4649         result.clusterkey_ = clusterkey_;
4650         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
4651           to_bitField0_ |= 0x00000002;
4652         }
4653         result.replicationEndpointImpl_ = replicationEndpointImpl_;
4654         if (dataBuilder_ == null) {
4655           if (((bitField0_ & 0x00000004) == 0x00000004)) {
4656             data_ = java.util.Collections.unmodifiableList(data_);
4657             bitField0_ = (bitField0_ & ~0x00000004);
4658           }
4659           result.data_ = data_;
4660         } else {
4661           result.data_ = dataBuilder_.build();
4662         }
4663         if (configurationBuilder_ == null) {
4664           if (((bitField0_ & 0x00000008) == 0x00000008)) {
4665             configuration_ = java.util.Collections.unmodifiableList(configuration_);
4666             bitField0_ = (bitField0_ & ~0x00000008);
4667           }
4668           result.configuration_ = configuration_;
4669         } else {
4670           result.configuration_ = configurationBuilder_.build();
4671         }
4672         result.bitField0_ = to_bitField0_;
4673         onBuilt();
4674         return result;
4675       }
4676 
4677       public Builder mergeFrom(com.google.protobuf.Message other) {
4678         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) {
4679           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)other);
4680         } else {
4681           super.mergeFrom(other);
4682           return this;
4683         }
4684       }
4685 
4686       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other) {
4687         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) return this;
4688         if (other.hasClusterkey()) {
4689           bitField0_ |= 0x00000001;
4690           clusterkey_ = other.clusterkey_;
4691           onChanged();
4692         }
4693         if (other.hasReplicationEndpointImpl()) {
4694           bitField0_ |= 0x00000002;
4695           replicationEndpointImpl_ = other.replicationEndpointImpl_;
4696           onChanged();
4697         }
4698         if (dataBuilder_ == null) {
4699           if (!other.data_.isEmpty()) {
4700             if (data_.isEmpty()) {
4701               data_ = other.data_;
4702               bitField0_ = (bitField0_ & ~0x00000004);
4703             } else {
4704               ensureDataIsMutable();
4705               data_.addAll(other.data_);
4706             }
4707             onChanged();
4708           }
4709         } else {
4710           if (!other.data_.isEmpty()) {
4711             if (dataBuilder_.isEmpty()) {
4712               dataBuilder_.dispose();
4713               dataBuilder_ = null;
4714               data_ = other.data_;
4715               bitField0_ = (bitField0_ & ~0x00000004);
4716               dataBuilder_ = 
4717                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
4718                    getDataFieldBuilder() : null;
4719             } else {
4720               dataBuilder_.addAllMessages(other.data_);
4721             }
4722           }
4723         }
4724         if (configurationBuilder_ == null) {
4725           if (!other.configuration_.isEmpty()) {
4726             if (configuration_.isEmpty()) {
4727               configuration_ = other.configuration_;
4728               bitField0_ = (bitField0_ & ~0x00000008);
4729             } else {
4730               ensureConfigurationIsMutable();
4731               configuration_.addAll(other.configuration_);
4732             }
4733             onChanged();
4734           }
4735         } else {
4736           if (!other.configuration_.isEmpty()) {
4737             if (configurationBuilder_.isEmpty()) {
4738               configurationBuilder_.dispose();
4739               configurationBuilder_ = null;
4740               configuration_ = other.configuration_;
4741               bitField0_ = (bitField0_ & ~0x00000008);
4742               configurationBuilder_ = 
4743                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
4744                    getConfigurationFieldBuilder() : null;
4745             } else {
4746               configurationBuilder_.addAllMessages(other.configuration_);
4747             }
4748           }
4749         }
4750         this.mergeUnknownFields(other.getUnknownFields());
4751         return this;
4752       }
4753 
4754       public final boolean isInitialized() {
4755         if (!hasClusterkey()) {
4756           
4757           return false;
4758         }
4759         for (int i = 0; i < getDataCount(); i++) {
4760           if (!getData(i).isInitialized()) {
4761             
4762             return false;
4763           }
4764         }
4765         for (int i = 0; i < getConfigurationCount(); i++) {
4766           if (!getConfiguration(i).isInitialized()) {
4767             
4768             return false;
4769           }
4770         }
4771         return true;
4772       }
4773 
4774       public Builder mergeFrom(
4775           com.google.protobuf.CodedInputStream input,
4776           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4777           throws java.io.IOException {
4778         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parsedMessage = null;
4779         try {
4780           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4781         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4782           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) e.getUnfinishedMessage();
4783           throw e;
4784         } finally {
4785           if (parsedMessage != null) {
4786             mergeFrom(parsedMessage);
4787           }
4788         }
4789         return this;
4790       }
4791       private int bitField0_;
4792 
4793       // required string clusterkey = 1;
4794       private java.lang.Object clusterkey_ = "";
4795       /**
4796        * <code>required string clusterkey = 1;</code>
4797        *
4798        * <pre>
4799        * clusterkey is the concatenation of the slave cluster's
4800        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4801        * </pre>
4802        */
4803       public boolean hasClusterkey() {
4804         return ((bitField0_ & 0x00000001) == 0x00000001);
4805       }
4806       /**
4807        * <code>required string clusterkey = 1;</code>
4808        *
4809        * <pre>
4810        * clusterkey is the concatenation of the slave cluster's
4811        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4812        * </pre>
4813        */
4814       public java.lang.String getClusterkey() {
4815         java.lang.Object ref = clusterkey_;
4816         if (!(ref instanceof java.lang.String)) {
4817           java.lang.String s = ((com.google.protobuf.ByteString) ref)
4818               .toStringUtf8();
4819           clusterkey_ = s;
4820           return s;
4821         } else {
4822           return (java.lang.String) ref;
4823         }
4824       }
4825       /**
4826        * <code>required string clusterkey = 1;</code>
4827        *
4828        * <pre>
4829        * clusterkey is the concatenation of the slave cluster's
4830        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4831        * </pre>
4832        */
4833       public com.google.protobuf.ByteString
4834           getClusterkeyBytes() {
4835         java.lang.Object ref = clusterkey_;
4836         if (ref instanceof String) {
4837           com.google.protobuf.ByteString b = 
4838               com.google.protobuf.ByteString.copyFromUtf8(
4839                   (java.lang.String) ref);
4840           clusterkey_ = b;
4841           return b;
4842         } else {
4843           return (com.google.protobuf.ByteString) ref;
4844         }
4845       }
4846       /**
4847        * <code>required string clusterkey = 1;</code>
4848        *
4849        * <pre>
4850        * clusterkey is the concatenation of the slave cluster's
4851        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4852        * </pre>
4853        */
4854       public Builder setClusterkey(
4855           java.lang.String value) {
4856         if (value == null) {
4857     throw new NullPointerException();
4858   }
4859   bitField0_ |= 0x00000001;
4860         clusterkey_ = value;
4861         onChanged();
4862         return this;
4863       }
4864       /**
4865        * <code>required string clusterkey = 1;</code>
4866        *
4867        * <pre>
4868        * clusterkey is the concatenation of the slave cluster's
4869        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4870        * </pre>
4871        */
4872       public Builder clearClusterkey() {
4873         bitField0_ = (bitField0_ & ~0x00000001);
4874         clusterkey_ = getDefaultInstance().getClusterkey();
4875         onChanged();
4876         return this;
4877       }
4878       /**
4879        * <code>required string clusterkey = 1;</code>
4880        *
4881        * <pre>
4882        * clusterkey is the concatenation of the slave cluster's
4883        * hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
4884        * </pre>
4885        */
4886       public Builder setClusterkeyBytes(
4887           com.google.protobuf.ByteString value) {
4888         if (value == null) {
4889     throw new NullPointerException();
4890   }
4891   bitField0_ |= 0x00000001;
4892         clusterkey_ = value;
4893         onChanged();
4894         return this;
4895       }
4896 
4897       // optional string replicationEndpointImpl = 2;
4898       private java.lang.Object replicationEndpointImpl_ = "";
4899       /**
4900        * <code>optional string replicationEndpointImpl = 2;</code>
4901        */
4902       public boolean hasReplicationEndpointImpl() {
4903         return ((bitField0_ & 0x00000002) == 0x00000002);
4904       }
4905       /**
4906        * <code>optional string replicationEndpointImpl = 2;</code>
4907        */
4908       public java.lang.String getReplicationEndpointImpl() {
4909         java.lang.Object ref = replicationEndpointImpl_;
4910         if (!(ref instanceof java.lang.String)) {
4911           java.lang.String s = ((com.google.protobuf.ByteString) ref)
4912               .toStringUtf8();
4913           replicationEndpointImpl_ = s;
4914           return s;
4915         } else {
4916           return (java.lang.String) ref;
4917         }
4918       }
4919       /**
4920        * <code>optional string replicationEndpointImpl = 2;</code>
4921        */
4922       public com.google.protobuf.ByteString
4923           getReplicationEndpointImplBytes() {
4924         java.lang.Object ref = replicationEndpointImpl_;
4925         if (ref instanceof String) {
4926           com.google.protobuf.ByteString b = 
4927               com.google.protobuf.ByteString.copyFromUtf8(
4928                   (java.lang.String) ref);
4929           replicationEndpointImpl_ = b;
4930           return b;
4931         } else {
4932           return (com.google.protobuf.ByteString) ref;
4933         }
4934       }
4935       /**
4936        * <code>optional string replicationEndpointImpl = 2;</code>
4937        */
4938       public Builder setReplicationEndpointImpl(
4939           java.lang.String value) {
4940         if (value == null) {
4941     throw new NullPointerException();
4942   }
4943   bitField0_ |= 0x00000002;
4944         replicationEndpointImpl_ = value;
4945         onChanged();
4946         return this;
4947       }
4948       /**
4949        * <code>optional string replicationEndpointImpl = 2;</code>
4950        */
4951       public Builder clearReplicationEndpointImpl() {
4952         bitField0_ = (bitField0_ & ~0x00000002);
4953         replicationEndpointImpl_ = getDefaultInstance().getReplicationEndpointImpl();
4954         onChanged();
4955         return this;
4956       }
4957       /**
4958        * <code>optional string replicationEndpointImpl = 2;</code>
4959        */
4960       public Builder setReplicationEndpointImplBytes(
4961           com.google.protobuf.ByteString value) {
4962         if (value == null) {
4963     throw new NullPointerException();
4964   }
4965   bitField0_ |= 0x00000002;
4966         replicationEndpointImpl_ = value;
4967         onChanged();
4968         return this;
4969       }
4970 
4971       // repeated .hbase.pb.BytesBytesPair data = 3;
4972       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> data_ =
4973         java.util.Collections.emptyList();
4974       private void ensureDataIsMutable() {
4975         if (!((bitField0_ & 0x00000004) == 0x00000004)) {
4976           data_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair>(data_);
4977           bitField0_ |= 0x00000004;
4978          }
4979       }
4980 
4981       private com.google.protobuf.RepeatedFieldBuilder<
4982           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> dataBuilder_;
4983 
4984       /**
4985        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4986        */
4987       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> getDataList() {
4988         if (dataBuilder_ == null) {
4989           return java.util.Collections.unmodifiableList(data_);
4990         } else {
4991           return dataBuilder_.getMessageList();
4992         }
4993       }
4994       /**
4995        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
4996        */
4997       public int getDataCount() {
4998         if (dataBuilder_ == null) {
4999           return data_.size();
5000         } else {
5001           return dataBuilder_.getCount();
5002         }
5003       }
5004       /**
5005        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5006        */
5007       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index) {
5008         if (dataBuilder_ == null) {
5009           return data_.get(index);
5010         } else {
5011           return dataBuilder_.getMessage(index);
5012         }
5013       }
5014       /**
5015        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5016        */
5017       public Builder setData(
5018           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
5019         if (dataBuilder_ == null) {
5020           if (value == null) {
5021             throw new NullPointerException();
5022           }
5023           ensureDataIsMutable();
5024           data_.set(index, value);
5025           onChanged();
5026         } else {
5027           dataBuilder_.setMessage(index, value);
5028         }
5029         return this;
5030       }
5031       /**
5032        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5033        */
5034       public Builder setData(
5035           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
5036         if (dataBuilder_ == null) {
5037           ensureDataIsMutable();
5038           data_.set(index, builderForValue.build());
5039           onChanged();
5040         } else {
5041           dataBuilder_.setMessage(index, builderForValue.build());
5042         }
5043         return this;
5044       }
5045       /**
5046        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5047        */
5048       public Builder addData(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
5049         if (dataBuilder_ == null) {
5050           if (value == null) {
5051             throw new NullPointerException();
5052           }
5053           ensureDataIsMutable();
5054           data_.add(value);
5055           onChanged();
5056         } else {
5057           dataBuilder_.addMessage(value);
5058         }
5059         return this;
5060       }
5061       /**
5062        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5063        */
5064       public Builder addData(
5065           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
5066         if (dataBuilder_ == null) {
5067           if (value == null) {
5068             throw new NullPointerException();
5069           }
5070           ensureDataIsMutable();
5071           data_.add(index, value);
5072           onChanged();
5073         } else {
5074           dataBuilder_.addMessage(index, value);
5075         }
5076         return this;
5077       }
5078       /**
5079        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5080        */
5081       public Builder addData(
5082           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
5083         if (dataBuilder_ == null) {
5084           ensureDataIsMutable();
5085           data_.add(builderForValue.build());
5086           onChanged();
5087         } else {
5088           dataBuilder_.addMessage(builderForValue.build());
5089         }
5090         return this;
5091       }
5092       /**
5093        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5094        */
5095       public Builder addData(
5096           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
5097         if (dataBuilder_ == null) {
5098           ensureDataIsMutable();
5099           data_.add(index, builderForValue.build());
5100           onChanged();
5101         } else {
5102           dataBuilder_.addMessage(index, builderForValue.build());
5103         }
5104         return this;
5105       }
5106       /**
5107        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5108        */
5109       public Builder addAllData(
5110           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> values) {
5111         if (dataBuilder_ == null) {
5112           ensureDataIsMutable();
5113           super.addAll(values, data_);
5114           onChanged();
5115         } else {
5116           dataBuilder_.addAllMessages(values);
5117         }
5118         return this;
5119       }
5120       /**
5121        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5122        */
5123       public Builder clearData() {
5124         if (dataBuilder_ == null) {
5125           data_ = java.util.Collections.emptyList();
5126           bitField0_ = (bitField0_ & ~0x00000004);
5127           onChanged();
5128         } else {
5129           dataBuilder_.clear();
5130         }
5131         return this;
5132       }
5133       /**
5134        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5135        */
5136       public Builder removeData(int index) {
5137         if (dataBuilder_ == null) {
5138           ensureDataIsMutable();
5139           data_.remove(index);
5140           onChanged();
5141         } else {
5142           dataBuilder_.remove(index);
5143         }
5144         return this;
5145       }
5146       /**
5147        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5148        */
5149       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getDataBuilder(
5150           int index) {
5151         return getDataFieldBuilder().getBuilder(index);
5152       }
5153       /**
5154        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5155        */
5156       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
5157           int index) {
5158         if (dataBuilder_ == null) {
5159           return data_.get(index);  } else {
5160           return dataBuilder_.getMessageOrBuilder(index);
5161         }
5162       }
5163       /**
5164        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5165        */
5166       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> 
5167            getDataOrBuilderList() {
5168         if (dataBuilder_ != null) {
5169           return dataBuilder_.getMessageOrBuilderList();
5170         } else {
5171           return java.util.Collections.unmodifiableList(data_);
5172         }
5173       }
5174       /**
5175        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5176        */
5177       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addDataBuilder() {
5178         return getDataFieldBuilder().addBuilder(
5179             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance());
5180       }
5181       /**
5182        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5183        */
5184       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addDataBuilder(
5185           int index) {
5186         return getDataFieldBuilder().addBuilder(
5187             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance());
5188       }
5189       /**
5190        * <code>repeated .hbase.pb.BytesBytesPair data = 3;</code>
5191        */
5192       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder> 
5193            getDataBuilderList() {
5194         return getDataFieldBuilder().getBuilderList();
5195       }
5196       private com.google.protobuf.RepeatedFieldBuilder<
5197           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> 
5198           getDataFieldBuilder() {
5199         if (dataBuilder_ == null) {
5200           dataBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
5201               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>(
5202                   data_,
5203                   ((bitField0_ & 0x00000004) == 0x00000004),
5204                   getParentForChildren(),
5205                   isClean());
5206           data_ = null;
5207         }
5208         return dataBuilder_;
5209       }
5210 
5211       // repeated .hbase.pb.NameStringPair configuration = 4;
5212       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> configuration_ =
5213         java.util.Collections.emptyList();
5214       private void ensureConfigurationIsMutable() {
5215         if (!((bitField0_ & 0x00000008) == 0x00000008)) {
5216           configuration_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>(configuration_);
5217           bitField0_ |= 0x00000008;
5218          }
5219       }
5220 
5221       private com.google.protobuf.RepeatedFieldBuilder<
5222           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_;
5223 
5224       /**
5225        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5226        */
5227       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getConfigurationList() {
5228         if (configurationBuilder_ == null) {
5229           return java.util.Collections.unmodifiableList(configuration_);
5230         } else {
5231           return configurationBuilder_.getMessageList();
5232         }
5233       }
5234       /**
5235        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5236        */
5237       public int getConfigurationCount() {
5238         if (configurationBuilder_ == null) {
5239           return configuration_.size();
5240         } else {
5241           return configurationBuilder_.getCount();
5242         }
5243       }
5244       /**
5245        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5246        */
5247       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) {
5248         if (configurationBuilder_ == null) {
5249           return configuration_.get(index);
5250         } else {
5251           return configurationBuilder_.getMessage(index);
5252         }
5253       }
5254       /**
5255        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5256        */
5257       public Builder setConfiguration(
5258           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
5259         if (configurationBuilder_ == null) {
5260           if (value == null) {
5261             throw new NullPointerException();
5262           }
5263           ensureConfigurationIsMutable();
5264           configuration_.set(index, value);
5265           onChanged();
5266         } else {
5267           configurationBuilder_.setMessage(index, value);
5268         }
5269         return this;
5270       }
5271       /**
5272        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5273        */
5274       public Builder setConfiguration(
5275           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
5276         if (configurationBuilder_ == null) {
5277           ensureConfigurationIsMutable();
5278           configuration_.set(index, builderForValue.build());
5279           onChanged();
5280         } else {
5281           configurationBuilder_.setMessage(index, builderForValue.build());
5282         }
5283         return this;
5284       }
5285       /**
5286        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5287        */
5288       public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
5289         if (configurationBuilder_ == null) {
5290           if (value == null) {
5291             throw new NullPointerException();
5292           }
5293           ensureConfigurationIsMutable();
5294           configuration_.add(value);
5295           onChanged();
5296         } else {
5297           configurationBuilder_.addMessage(value);
5298         }
5299         return this;
5300       }
5301       /**
5302        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5303        */
5304       public Builder addConfiguration(
5305           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
5306         if (configurationBuilder_ == null) {
5307           if (value == null) {
5308             throw new NullPointerException();
5309           }
5310           ensureConfigurationIsMutable();
5311           configuration_.add(index, value);
5312           onChanged();
5313         } else {
5314           configurationBuilder_.addMessage(index, value);
5315         }
5316         return this;
5317       }
5318       /**
5319        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5320        */
5321       public Builder addConfiguration(
5322           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
5323         if (configurationBuilder_ == null) {
5324           ensureConfigurationIsMutable();
5325           configuration_.add(builderForValue.build());
5326           onChanged();
5327         } else {
5328           configurationBuilder_.addMessage(builderForValue.build());
5329         }
5330         return this;
5331       }
5332       /**
5333        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5334        */
5335       public Builder addConfiguration(
5336           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
5337         if (configurationBuilder_ == null) {
5338           ensureConfigurationIsMutable();
5339           configuration_.add(index, builderForValue.build());
5340           onChanged();
5341         } else {
5342           configurationBuilder_.addMessage(index, builderForValue.build());
5343         }
5344         return this;
5345       }
5346       /**
5347        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5348        */
5349       public Builder addAllConfiguration(
5350           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> values) {
5351         if (configurationBuilder_ == null) {
5352           ensureConfigurationIsMutable();
5353           super.addAll(values, configuration_);
5354           onChanged();
5355         } else {
5356           configurationBuilder_.addAllMessages(values);
5357         }
5358         return this;
5359       }
5360       /**
5361        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5362        */
5363       public Builder clearConfiguration() {
5364         if (configurationBuilder_ == null) {
5365           configuration_ = java.util.Collections.emptyList();
5366           bitField0_ = (bitField0_ & ~0x00000008);
5367           onChanged();
5368         } else {
5369           configurationBuilder_.clear();
5370         }
5371         return this;
5372       }
5373       /**
5374        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5375        */
5376       public Builder removeConfiguration(int index) {
5377         if (configurationBuilder_ == null) {
5378           ensureConfigurationIsMutable();
5379           configuration_.remove(index);
5380           onChanged();
5381         } else {
5382           configurationBuilder_.remove(index);
5383         }
5384         return this;
5385       }
5386       /**
5387        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5388        */
5389       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder(
5390           int index) {
5391         return getConfigurationFieldBuilder().getBuilder(index);
5392       }
5393       /**
5394        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5395        */
5396       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
5397           int index) {
5398         if (configurationBuilder_ == null) {
5399           return configuration_.get(index);  } else {
5400           return configurationBuilder_.getMessageOrBuilder(index);
5401         }
5402       }
5403       /**
5404        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5405        */
5406       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
5407            getConfigurationOrBuilderList() {
5408         if (configurationBuilder_ != null) {
5409           return configurationBuilder_.getMessageOrBuilderList();
5410         } else {
5411           return java.util.Collections.unmodifiableList(configuration_);
5412         }
5413       }
5414       /**
5415        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5416        */
5417       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() {
5418         return getConfigurationFieldBuilder().addBuilder(
5419             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
5420       }
5421       /**
5422        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5423        */
5424       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder(
5425           int index) {
5426         return getConfigurationFieldBuilder().addBuilder(
5427             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
5428       }
5429       /**
5430        * <code>repeated .hbase.pb.NameStringPair configuration = 4;</code>
5431        */
5432       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder> 
5433            getConfigurationBuilderList() {
5434         return getConfigurationFieldBuilder().getBuilderList();
5435       }
5436       private com.google.protobuf.RepeatedFieldBuilder<
5437           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
5438           getConfigurationFieldBuilder() {
5439         if (configurationBuilder_ == null) {
5440           configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
5441               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>(
5442                   configuration_,
5443                   ((bitField0_ & 0x00000008) == 0x00000008),
5444                   getParentForChildren(),
5445                   isClean());
5446           configuration_ = null;
5447         }
5448         return configurationBuilder_;
5449       }
5450 
5451       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationPeer)
5452     }
5453 
5454     static {
5455       defaultInstance = new ReplicationPeer(true);
5456       defaultInstance.initFields();
5457     }
5458 
5459     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationPeer)
5460   }
5461 
5462   public interface ReplicationStateOrBuilder
5463       extends com.google.protobuf.MessageOrBuilder {
5464 
5465     // required .hbase.pb.ReplicationState.State state = 1;
5466     /**
5467      * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5468      */
5469     boolean hasState();
5470     /**
5471      * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5472      */
5473     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState();
5474   }
5475   /**
5476    * Protobuf type {@code hbase.pb.ReplicationState}
5477    *
5478    * <pre>
5479    **
5480    * Used by replication. Holds whether enabled or disabled
5481    * </pre>
5482    */
5483   public static final class ReplicationState extends
5484       com.google.protobuf.GeneratedMessage
5485       implements ReplicationStateOrBuilder {
5486     // Use ReplicationState.newBuilder() to construct.
5487     private ReplicationState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5488       super(builder);
5489       this.unknownFields = builder.getUnknownFields();
5490     }
5491     private ReplicationState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5492 
5493     private static final ReplicationState defaultInstance;
5494     public static ReplicationState getDefaultInstance() {
5495       return defaultInstance;
5496     }
5497 
5498     public ReplicationState getDefaultInstanceForType() {
5499       return defaultInstance;
5500     }
5501 
5502     private final com.google.protobuf.UnknownFieldSet unknownFields;
5503     @java.lang.Override
5504     public final com.google.protobuf.UnknownFieldSet
5505         getUnknownFields() {
5506       return this.unknownFields;
5507     }
5508     private ReplicationState(
5509         com.google.protobuf.CodedInputStream input,
5510         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5511         throws com.google.protobuf.InvalidProtocolBufferException {
5512       initFields();
5513       int mutable_bitField0_ = 0;
5514       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5515           com.google.protobuf.UnknownFieldSet.newBuilder();
5516       try {
5517         boolean done = false;
5518         while (!done) {
5519           int tag = input.readTag();
5520           switch (tag) {
5521             case 0:
5522               done = true;
5523               break;
5524             default: {
5525               if (!parseUnknownField(input, unknownFields,
5526                                      extensionRegistry, tag)) {
5527                 done = true;
5528               }
5529               break;
5530             }
5531             case 8: {
5532               int rawValue = input.readEnum();
5533               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.valueOf(rawValue);
5534               if (value == null) {
5535                 unknownFields.mergeVarintField(1, rawValue);
5536               } else {
5537                 bitField0_ |= 0x00000001;
5538                 state_ = value;
5539               }
5540               break;
5541             }
5542           }
5543         }
5544       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5545         throw e.setUnfinishedMessage(this);
5546       } catch (java.io.IOException e) {
5547         throw new com.google.protobuf.InvalidProtocolBufferException(
5548             e.getMessage()).setUnfinishedMessage(this);
5549       } finally {
5550         this.unknownFields = unknownFields.build();
5551         makeExtensionsImmutable();
5552       }
5553     }
5554     public static final com.google.protobuf.Descriptors.Descriptor
5555         getDescriptor() {
5556       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
5557     }
5558 
5559     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5560         internalGetFieldAccessorTable() {
5561       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_fieldAccessorTable
5562           .ensureFieldAccessorsInitialized(
5563               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class);
5564     }
5565 
5566     public static com.google.protobuf.Parser<ReplicationState> PARSER =
5567         new com.google.protobuf.AbstractParser<ReplicationState>() {
5568       public ReplicationState parsePartialFrom(
5569           com.google.protobuf.CodedInputStream input,
5570           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5571           throws com.google.protobuf.InvalidProtocolBufferException {
5572         return new ReplicationState(input, extensionRegistry);
5573       }
5574     };
5575 
5576     @java.lang.Override
5577     public com.google.protobuf.Parser<ReplicationState> getParserForType() {
5578       return PARSER;
5579     }
5580 
5581     /**
5582      * Protobuf enum {@code hbase.pb.ReplicationState.State}
5583      */
5584     public enum State
5585         implements com.google.protobuf.ProtocolMessageEnum {
5586       /**
5587        * <code>ENABLED = 0;</code>
5588        */
5589       ENABLED(0, 0),
5590       /**
5591        * <code>DISABLED = 1;</code>
5592        */
5593       DISABLED(1, 1),
5594       ;
5595 
5596       /**
5597        * <code>ENABLED = 0;</code>
5598        */
5599       public static final int ENABLED_VALUE = 0;
5600       /**
5601        * <code>DISABLED = 1;</code>
5602        */
5603       public static final int DISABLED_VALUE = 1;
5604 
5605 
5606       public final int getNumber() { return value; }
5607 
5608       public static State valueOf(int value) {
5609         switch (value) {
5610           case 0: return ENABLED;
5611           case 1: return DISABLED;
5612           default: return null;
5613         }
5614       }
5615 
5616       public static com.google.protobuf.Internal.EnumLiteMap<State>
5617           internalGetValueMap() {
5618         return internalValueMap;
5619       }
5620       private static com.google.protobuf.Internal.EnumLiteMap<State>
5621           internalValueMap =
5622             new com.google.protobuf.Internal.EnumLiteMap<State>() {
5623               public State findValueByNumber(int number) {
5624                 return State.valueOf(number);
5625               }
5626             };
5627 
5628       public final com.google.protobuf.Descriptors.EnumValueDescriptor
5629           getValueDescriptor() {
5630         return getDescriptor().getValues().get(index);
5631       }
5632       public final com.google.protobuf.Descriptors.EnumDescriptor
5633           getDescriptorForType() {
5634         return getDescriptor();
5635       }
5636       public static final com.google.protobuf.Descriptors.EnumDescriptor
5637           getDescriptor() {
5638         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor().getEnumTypes().get(0);
5639       }
5640 
5641       private static final State[] VALUES = values();
5642 
5643       public static State valueOf(
5644           com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
5645         if (desc.getType() != getDescriptor()) {
5646           throw new java.lang.IllegalArgumentException(
5647             "EnumValueDescriptor is not for this type.");
5648         }
5649         return VALUES[desc.getIndex()];
5650       }
5651 
5652       private final int index;
5653       private final int value;
5654 
5655       private State(int index, int value) {
5656         this.index = index;
5657         this.value = value;
5658       }
5659 
5660       // @@protoc_insertion_point(enum_scope:hbase.pb.ReplicationState.State)
5661     }
5662 
5663     private int bitField0_;
5664     // required .hbase.pb.ReplicationState.State state = 1;
5665     public static final int STATE_FIELD_NUMBER = 1;
5666     private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_;
5667     /**
5668      * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5669      */
5670     public boolean hasState() {
5671       return ((bitField0_ & 0x00000001) == 0x00000001);
5672     }
5673     /**
5674      * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5675      */
5676     public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() {
5677       return state_;
5678     }
5679 
5680     private void initFields() {
5681       state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
5682     }
5683     private byte memoizedIsInitialized = -1;
5684     public final boolean isInitialized() {
5685       byte isInitialized = memoizedIsInitialized;
5686       if (isInitialized != -1) return isInitialized == 1;
5687 
5688       if (!hasState()) {
5689         memoizedIsInitialized = 0;
5690         return false;
5691       }
5692       memoizedIsInitialized = 1;
5693       return true;
5694     }
5695 
5696     public void writeTo(com.google.protobuf.CodedOutputStream output)
5697                         throws java.io.IOException {
5698       getSerializedSize();
5699       if (((bitField0_ & 0x00000001) == 0x00000001)) {
5700         output.writeEnum(1, state_.getNumber());
5701       }
5702       getUnknownFields().writeTo(output);
5703     }
5704 
5705     private int memoizedSerializedSize = -1;
5706     public int getSerializedSize() {
5707       int size = memoizedSerializedSize;
5708       if (size != -1) return size;
5709 
5710       size = 0;
5711       if (((bitField0_ & 0x00000001) == 0x00000001)) {
5712         size += com.google.protobuf.CodedOutputStream
5713           .computeEnumSize(1, state_.getNumber());
5714       }
5715       size += getUnknownFields().getSerializedSize();
5716       memoizedSerializedSize = size;
5717       return size;
5718     }
5719 
5720     private static final long serialVersionUID = 0L;
5721     @java.lang.Override
5722     protected java.lang.Object writeReplace()
5723         throws java.io.ObjectStreamException {
5724       return super.writeReplace();
5725     }
5726 
5727     @java.lang.Override
5728     public boolean equals(final java.lang.Object obj) {
5729       if (obj == this) {
5730        return true;
5731       }
5732       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)) {
5733         return super.equals(obj);
5734       }
5735       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) obj;
5736 
5737       boolean result = true;
5738       result = result && (hasState() == other.hasState());
5739       if (hasState()) {
5740         result = result &&
5741             (getState() == other.getState());
5742       }
5743       result = result &&
5744           getUnknownFields().equals(other.getUnknownFields());
5745       return result;
5746     }
5747 
5748     private int memoizedHashCode = 0;
5749     @java.lang.Override
5750     public int hashCode() {
5751       if (memoizedHashCode != 0) {
5752         return memoizedHashCode;
5753       }
5754       int hash = 41;
5755       hash = (19 * hash) + getDescriptorForType().hashCode();
5756       if (hasState()) {
5757         hash = (37 * hash) + STATE_FIELD_NUMBER;
5758         hash = (53 * hash) + hashEnum(getState());
5759       }
5760       hash = (29 * hash) + getUnknownFields().hashCode();
5761       memoizedHashCode = hash;
5762       return hash;
5763     }
5764 
5765     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5766         com.google.protobuf.ByteString data)
5767         throws com.google.protobuf.InvalidProtocolBufferException {
5768       return PARSER.parseFrom(data);
5769     }
5770     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5771         com.google.protobuf.ByteString data,
5772         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5773         throws com.google.protobuf.InvalidProtocolBufferException {
5774       return PARSER.parseFrom(data, extensionRegistry);
5775     }
5776     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(byte[] data)
5777         throws com.google.protobuf.InvalidProtocolBufferException {
5778       return PARSER.parseFrom(data);
5779     }
5780     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5781         byte[] data,
5782         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5783         throws com.google.protobuf.InvalidProtocolBufferException {
5784       return PARSER.parseFrom(data, extensionRegistry);
5785     }
5786     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(java.io.InputStream input)
5787         throws java.io.IOException {
5788       return PARSER.parseFrom(input);
5789     }
5790     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5791         java.io.InputStream input,
5792         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5793         throws java.io.IOException {
5794       return PARSER.parseFrom(input, extensionRegistry);
5795     }
5796     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(java.io.InputStream input)
5797         throws java.io.IOException {
5798       return PARSER.parseDelimitedFrom(input);
5799     }
5800     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(
5801         java.io.InputStream input,
5802         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5803         throws java.io.IOException {
5804       return PARSER.parseDelimitedFrom(input, extensionRegistry);
5805     }
5806     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5807         com.google.protobuf.CodedInputStream input)
5808         throws java.io.IOException {
5809       return PARSER.parseFrom(input);
5810     }
5811     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
5812         com.google.protobuf.CodedInputStream input,
5813         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5814         throws java.io.IOException {
5815       return PARSER.parseFrom(input, extensionRegistry);
5816     }
5817 
5818     public static Builder newBuilder() { return Builder.create(); }
5819     public Builder newBuilderForType() { return newBuilder(); }
5820     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState prototype) {
5821       return newBuilder().mergeFrom(prototype);
5822     }
5823     public Builder toBuilder() { return newBuilder(this); }
5824 
5825     @java.lang.Override
5826     protected Builder newBuilderForType(
5827         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5828       Builder builder = new Builder(parent);
5829       return builder;
5830     }
5831     /**
5832      * Protobuf type {@code hbase.pb.ReplicationState}
5833      *
5834      * <pre>
5835      **
5836      * Used by replication. Holds whether enabled or disabled
5837      * </pre>
5838      */
5839     public static final class Builder extends
5840         com.google.protobuf.GeneratedMessage.Builder<Builder>
5841        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationStateOrBuilder {
5842       public static final com.google.protobuf.Descriptors.Descriptor
5843           getDescriptor() {
5844         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
5845       }
5846 
5847       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5848           internalGetFieldAccessorTable() {
5849         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_fieldAccessorTable
5850             .ensureFieldAccessorsInitialized(
5851                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class);
5852       }
5853 
5854       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.newBuilder()
5855       private Builder() {
5856         maybeForceBuilderInitialization();
5857       }
5858 
5859       private Builder(
5860           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5861         super(parent);
5862         maybeForceBuilderInitialization();
5863       }
5864       private void maybeForceBuilderInitialization() {
5865         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5866         }
5867       }
5868       private static Builder create() {
5869         return new Builder();
5870       }
5871 
5872       public Builder clear() {
5873         super.clear();
5874         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
5875         bitField0_ = (bitField0_ & ~0x00000001);
5876         return this;
5877       }
5878 
5879       public Builder clone() {
5880         return create().mergeFrom(buildPartial());
5881       }
5882 
5883       public com.google.protobuf.Descriptors.Descriptor
5884           getDescriptorForType() {
5885         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
5886       }
5887 
5888       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState getDefaultInstanceForType() {
5889         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance();
5890       }
5891 
5892       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState build() {
5893         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial();
5894         if (!result.isInitialized()) {
5895           throw newUninitializedMessageException(result);
5896         }
5897         return result;
5898       }
5899 
5900       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildPartial() {
5901         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState(this);
5902         int from_bitField0_ = bitField0_;
5903         int to_bitField0_ = 0;
5904         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5905           to_bitField0_ |= 0x00000001;
5906         }
5907         result.state_ = state_;
5908         result.bitField0_ = to_bitField0_;
5909         onBuilt();
5910         return result;
5911       }
5912 
5913       public Builder mergeFrom(com.google.protobuf.Message other) {
5914         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) {
5915           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)other);
5916         } else {
5917           super.mergeFrom(other);
5918           return this;
5919         }
5920       }
5921 
5922       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other) {
5923         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance()) return this;
5924         if (other.hasState()) {
5925           setState(other.getState());
5926         }
5927         this.mergeUnknownFields(other.getUnknownFields());
5928         return this;
5929       }
5930 
5931       public final boolean isInitialized() {
5932         if (!hasState()) {
5933           
5934           return false;
5935         }
5936         return true;
5937       }
5938 
5939       public Builder mergeFrom(
5940           com.google.protobuf.CodedInputStream input,
5941           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5942           throws java.io.IOException {
5943         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parsedMessage = null;
5944         try {
5945           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5946         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5947           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) e.getUnfinishedMessage();
5948           throw e;
5949         } finally {
5950           if (parsedMessage != null) {
5951             mergeFrom(parsedMessage);
5952           }
5953         }
5954         return this;
5955       }
5956       private int bitField0_;
5957 
5958       // required .hbase.pb.ReplicationState.State state = 1;
5959       private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
5960       /**
5961        * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5962        */
5963       public boolean hasState() {
5964         return ((bitField0_ & 0x00000001) == 0x00000001);
5965       }
5966       /**
5967        * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5968        */
5969       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() {
5970         return state_;
5971       }
5972       /**
5973        * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5974        */
5975       public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value) {
5976         if (value == null) {
5977           throw new NullPointerException();
5978         }
5979         bitField0_ |= 0x00000001;
5980         state_ = value;
5981         onChanged();
5982         return this;
5983       }
5984       /**
5985        * <code>required .hbase.pb.ReplicationState.State state = 1;</code>
5986        */
5987       public Builder clearState() {
5988         bitField0_ = (bitField0_ & ~0x00000001);
5989         state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
5990         onChanged();
5991         return this;
5992       }
5993 
5994       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationState)
5995     }
5996 
5997     static {
5998       defaultInstance = new ReplicationState(true);
5999       defaultInstance.initFields();
6000     }
6001 
6002     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationState)
6003   }
6004 
6005   public interface ReplicationHLogPositionOrBuilder
6006       extends com.google.protobuf.MessageOrBuilder {
6007 
6008     // required int64 position = 1;
6009     /**
6010      * <code>required int64 position = 1;</code>
6011      */
6012     boolean hasPosition();
6013     /**
6014      * <code>required int64 position = 1;</code>
6015      */
6016     long getPosition();
6017   }
6018   /**
6019    * Protobuf type {@code hbase.pb.ReplicationHLogPosition}
6020    *
6021    * <pre>
6022    **
6023    * Used by replication. Holds the current position in an WAL file.
6024    * </pre>
6025    */
6026   public static final class ReplicationHLogPosition extends
6027       com.google.protobuf.GeneratedMessage
6028       implements ReplicationHLogPositionOrBuilder {
6029     // Use ReplicationHLogPosition.newBuilder() to construct.
6030     private ReplicationHLogPosition(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6031       super(builder);
6032       this.unknownFields = builder.getUnknownFields();
6033     }
6034     private ReplicationHLogPosition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6035 
6036     private static final ReplicationHLogPosition defaultInstance;
6037     public static ReplicationHLogPosition getDefaultInstance() {
6038       return defaultInstance;
6039     }
6040 
6041     public ReplicationHLogPosition getDefaultInstanceForType() {
6042       return defaultInstance;
6043     }
6044 
6045     private final com.google.protobuf.UnknownFieldSet unknownFields;
6046     @java.lang.Override
6047     public final com.google.protobuf.UnknownFieldSet
6048         getUnknownFields() {
6049       return this.unknownFields;
6050     }
6051     private ReplicationHLogPosition(
6052         com.google.protobuf.CodedInputStream input,
6053         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6054         throws com.google.protobuf.InvalidProtocolBufferException {
6055       initFields();
6056       int mutable_bitField0_ = 0;
6057       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6058           com.google.protobuf.UnknownFieldSet.newBuilder();
6059       try {
6060         boolean done = false;
6061         while (!done) {
6062           int tag = input.readTag();
6063           switch (tag) {
6064             case 0:
6065               done = true;
6066               break;
6067             default: {
6068               if (!parseUnknownField(input, unknownFields,
6069                                      extensionRegistry, tag)) {
6070                 done = true;
6071               }
6072               break;
6073             }
6074             case 8: {
6075               bitField0_ |= 0x00000001;
6076               position_ = input.readInt64();
6077               break;
6078             }
6079           }
6080         }
6081       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6082         throw e.setUnfinishedMessage(this);
6083       } catch (java.io.IOException e) {
6084         throw new com.google.protobuf.InvalidProtocolBufferException(
6085             e.getMessage()).setUnfinishedMessage(this);
6086       } finally {
6087         this.unknownFields = unknownFields.build();
6088         makeExtensionsImmutable();
6089       }
6090     }
6091     public static final com.google.protobuf.Descriptors.Descriptor
6092         getDescriptor() {
6093       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
6094     }
6095 
6096     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6097         internalGetFieldAccessorTable() {
6098       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable
6099           .ensureFieldAccessorsInitialized(
6100               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class);
6101     }
6102 
6103     public static com.google.protobuf.Parser<ReplicationHLogPosition> PARSER =
6104         new com.google.protobuf.AbstractParser<ReplicationHLogPosition>() {
6105       public ReplicationHLogPosition parsePartialFrom(
6106           com.google.protobuf.CodedInputStream input,
6107           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6108           throws com.google.protobuf.InvalidProtocolBufferException {
6109         return new ReplicationHLogPosition(input, extensionRegistry);
6110       }
6111     };
6112 
6113     @java.lang.Override
6114     public com.google.protobuf.Parser<ReplicationHLogPosition> getParserForType() {
6115       return PARSER;
6116     }
6117 
6118     private int bitField0_;
6119     // required int64 position = 1;
6120     public static final int POSITION_FIELD_NUMBER = 1;
6121     private long position_;
6122     /**
6123      * <code>required int64 position = 1;</code>
6124      */
6125     public boolean hasPosition() {
6126       return ((bitField0_ & 0x00000001) == 0x00000001);
6127     }
6128     /**
6129      * <code>required int64 position = 1;</code>
6130      */
6131     public long getPosition() {
6132       return position_;
6133     }
6134 
6135     private void initFields() {
6136       position_ = 0L;
6137     }
6138     private byte memoizedIsInitialized = -1;
6139     public final boolean isInitialized() {
6140       byte isInitialized = memoizedIsInitialized;
6141       if (isInitialized != -1) return isInitialized == 1;
6142 
6143       if (!hasPosition()) {
6144         memoizedIsInitialized = 0;
6145         return false;
6146       }
6147       memoizedIsInitialized = 1;
6148       return true;
6149     }
6150 
6151     public void writeTo(com.google.protobuf.CodedOutputStream output)
6152                         throws java.io.IOException {
6153       getSerializedSize();
6154       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6155         output.writeInt64(1, position_);
6156       }
6157       getUnknownFields().writeTo(output);
6158     }
6159 
6160     private int memoizedSerializedSize = -1;
6161     public int getSerializedSize() {
6162       int size = memoizedSerializedSize;
6163       if (size != -1) return size;
6164 
6165       size = 0;
6166       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6167         size += com.google.protobuf.CodedOutputStream
6168           .computeInt64Size(1, position_);
6169       }
6170       size += getUnknownFields().getSerializedSize();
6171       memoizedSerializedSize = size;
6172       return size;
6173     }
6174 
6175     private static final long serialVersionUID = 0L;
6176     @java.lang.Override
6177     protected java.lang.Object writeReplace()
6178         throws java.io.ObjectStreamException {
6179       return super.writeReplace();
6180     }
6181 
6182     @java.lang.Override
6183     public boolean equals(final java.lang.Object obj) {
6184       if (obj == this) {
6185        return true;
6186       }
6187       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)) {
6188         return super.equals(obj);
6189       }
6190       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) obj;
6191 
6192       boolean result = true;
6193       result = result && (hasPosition() == other.hasPosition());
6194       if (hasPosition()) {
6195         result = result && (getPosition()
6196             == other.getPosition());
6197       }
6198       result = result &&
6199           getUnknownFields().equals(other.getUnknownFields());
6200       return result;
6201     }
6202 
6203     private int memoizedHashCode = 0;
6204     @java.lang.Override
6205     public int hashCode() {
6206       if (memoizedHashCode != 0) {
6207         return memoizedHashCode;
6208       }
6209       int hash = 41;
6210       hash = (19 * hash) + getDescriptorForType().hashCode();
6211       if (hasPosition()) {
6212         hash = (37 * hash) + POSITION_FIELD_NUMBER;
6213         hash = (53 * hash) + hashLong(getPosition());
6214       }
6215       hash = (29 * hash) + getUnknownFields().hashCode();
6216       memoizedHashCode = hash;
6217       return hash;
6218     }
6219 
6220     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6221         com.google.protobuf.ByteString data)
6222         throws com.google.protobuf.InvalidProtocolBufferException {
6223       return PARSER.parseFrom(data);
6224     }
6225     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6226         com.google.protobuf.ByteString data,
6227         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6228         throws com.google.protobuf.InvalidProtocolBufferException {
6229       return PARSER.parseFrom(data, extensionRegistry);
6230     }
6231     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(byte[] data)
6232         throws com.google.protobuf.InvalidProtocolBufferException {
6233       return PARSER.parseFrom(data);
6234     }
6235     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6236         byte[] data,
6237         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6238         throws com.google.protobuf.InvalidProtocolBufferException {
6239       return PARSER.parseFrom(data, extensionRegistry);
6240     }
6241     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(java.io.InputStream input)
6242         throws java.io.IOException {
6243       return PARSER.parseFrom(input);
6244     }
6245     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6246         java.io.InputStream input,
6247         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6248         throws java.io.IOException {
6249       return PARSER.parseFrom(input, extensionRegistry);
6250     }
6251     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(java.io.InputStream input)
6252         throws java.io.IOException {
6253       return PARSER.parseDelimitedFrom(input);
6254     }
6255     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(
6256         java.io.InputStream input,
6257         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6258         throws java.io.IOException {
6259       return PARSER.parseDelimitedFrom(input, extensionRegistry);
6260     }
6261     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6262         com.google.protobuf.CodedInputStream input)
6263         throws java.io.IOException {
6264       return PARSER.parseFrom(input);
6265     }
6266     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
6267         com.google.protobuf.CodedInputStream input,
6268         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6269         throws java.io.IOException {
6270       return PARSER.parseFrom(input, extensionRegistry);
6271     }
6272 
6273     public static Builder newBuilder() { return Builder.create(); }
6274     public Builder newBuilderForType() { return newBuilder(); }
6275     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition prototype) {
6276       return newBuilder().mergeFrom(prototype);
6277     }
6278     public Builder toBuilder() { return newBuilder(this); }
6279 
6280     @java.lang.Override
6281     protected Builder newBuilderForType(
6282         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6283       Builder builder = new Builder(parent);
6284       return builder;
6285     }
6286     /**
6287      * Protobuf type {@code hbase.pb.ReplicationHLogPosition}
6288      *
6289      * <pre>
6290      **
6291      * Used by replication. Holds the current position in an WAL file.
6292      * </pre>
6293      */
6294     public static final class Builder extends
6295         com.google.protobuf.GeneratedMessage.Builder<Builder>
6296        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPositionOrBuilder {
6297       public static final com.google.protobuf.Descriptors.Descriptor
6298           getDescriptor() {
6299         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
6300       }
6301 
6302       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6303           internalGetFieldAccessorTable() {
6304         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable
6305             .ensureFieldAccessorsInitialized(
6306                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class);
6307       }
6308 
6309       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.newBuilder()
6310       private Builder() {
6311         maybeForceBuilderInitialization();
6312       }
6313 
6314       private Builder(
6315           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6316         super(parent);
6317         maybeForceBuilderInitialization();
6318       }
6319       private void maybeForceBuilderInitialization() {
6320         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6321         }
6322       }
6323       private static Builder create() {
6324         return new Builder();
6325       }
6326 
6327       public Builder clear() {
6328         super.clear();
6329         position_ = 0L;
6330         bitField0_ = (bitField0_ & ~0x00000001);
6331         return this;
6332       }
6333 
6334       public Builder clone() {
6335         return create().mergeFrom(buildPartial());
6336       }
6337 
6338       public com.google.protobuf.Descriptors.Descriptor
6339           getDescriptorForType() {
6340         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
6341       }
6342 
6343       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition getDefaultInstanceForType() {
6344         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance();
6345       }
6346 
6347       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition build() {
6348         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial();
6349         if (!result.isInitialized()) {
6350           throw newUninitializedMessageException(result);
6351         }
6352         return result;
6353       }
6354 
6355       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildPartial() {
6356         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition(this);
6357         int from_bitField0_ = bitField0_;
6358         int to_bitField0_ = 0;
6359         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6360           to_bitField0_ |= 0x00000001;
6361         }
6362         result.position_ = position_;
6363         result.bitField0_ = to_bitField0_;
6364         onBuilt();
6365         return result;
6366       }
6367 
6368       public Builder mergeFrom(com.google.protobuf.Message other) {
6369         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) {
6370           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)other);
6371         } else {
6372           super.mergeFrom(other);
6373           return this;
6374         }
6375       }
6376 
6377       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other) {
6378         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance()) return this;
6379         if (other.hasPosition()) {
6380           setPosition(other.getPosition());
6381         }
6382         this.mergeUnknownFields(other.getUnknownFields());
6383         return this;
6384       }
6385 
6386       public final boolean isInitialized() {
6387         if (!hasPosition()) {
6388           
6389           return false;
6390         }
6391         return true;
6392       }
6393 
6394       public Builder mergeFrom(
6395           com.google.protobuf.CodedInputStream input,
6396           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6397           throws java.io.IOException {
6398         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parsedMessage = null;
6399         try {
6400           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6401         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6402           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) e.getUnfinishedMessage();
6403           throw e;
6404         } finally {
6405           if (parsedMessage != null) {
6406             mergeFrom(parsedMessage);
6407           }
6408         }
6409         return this;
6410       }
6411       private int bitField0_;
6412 
6413       // required int64 position = 1;
6414       private long position_ ;
6415       /**
6416        * <code>required int64 position = 1;</code>
6417        */
6418       public boolean hasPosition() {
6419         return ((bitField0_ & 0x00000001) == 0x00000001);
6420       }
6421       /**
6422        * <code>required int64 position = 1;</code>
6423        */
6424       public long getPosition() {
6425         return position_;
6426       }
6427       /**
6428        * <code>required int64 position = 1;</code>
6429        */
6430       public Builder setPosition(long value) {
6431         bitField0_ |= 0x00000001;
6432         position_ = value;
6433         onChanged();
6434         return this;
6435       }
6436       /**
6437        * <code>required int64 position = 1;</code>
6438        */
6439       public Builder clearPosition() {
6440         bitField0_ = (bitField0_ & ~0x00000001);
6441         position_ = 0L;
6442         onChanged();
6443         return this;
6444       }
6445 
6446       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationHLogPosition)
6447     }
6448 
6449     static {
6450       defaultInstance = new ReplicationHLogPosition(true);
6451       defaultInstance.initFields();
6452     }
6453 
6454     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationHLogPosition)
6455   }
6456 
6457   public interface ReplicationLockOrBuilder
6458       extends com.google.protobuf.MessageOrBuilder {
6459 
6460     // required string lock_owner = 1;
6461     /**
6462      * <code>required string lock_owner = 1;</code>
6463      */
6464     boolean hasLockOwner();
6465     /**
6466      * <code>required string lock_owner = 1;</code>
6467      */
6468     java.lang.String getLockOwner();
6469     /**
6470      * <code>required string lock_owner = 1;</code>
6471      */
6472     com.google.protobuf.ByteString
6473         getLockOwnerBytes();
6474   }
6475   /**
6476    * Protobuf type {@code hbase.pb.ReplicationLock}
6477    *
6478    * <pre>
6479    **
6480    * Used by replication. Used to lock a region server during failover.
6481    * </pre>
6482    */
6483   public static final class ReplicationLock extends
6484       com.google.protobuf.GeneratedMessage
6485       implements ReplicationLockOrBuilder {
6486     // Use ReplicationLock.newBuilder() to construct.
6487     private ReplicationLock(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6488       super(builder);
6489       this.unknownFields = builder.getUnknownFields();
6490     }
6491     private ReplicationLock(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6492 
6493     private static final ReplicationLock defaultInstance;
6494     public static ReplicationLock getDefaultInstance() {
6495       return defaultInstance;
6496     }
6497 
6498     public ReplicationLock getDefaultInstanceForType() {
6499       return defaultInstance;
6500     }
6501 
6502     private final com.google.protobuf.UnknownFieldSet unknownFields;
6503     @java.lang.Override
6504     public final com.google.protobuf.UnknownFieldSet
6505         getUnknownFields() {
6506       return this.unknownFields;
6507     }
6508     private ReplicationLock(
6509         com.google.protobuf.CodedInputStream input,
6510         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6511         throws com.google.protobuf.InvalidProtocolBufferException {
6512       initFields();
6513       int mutable_bitField0_ = 0;
6514       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6515           com.google.protobuf.UnknownFieldSet.newBuilder();
6516       try {
6517         boolean done = false;
6518         while (!done) {
6519           int tag = input.readTag();
6520           switch (tag) {
6521             case 0:
6522               done = true;
6523               break;
6524             default: {
6525               if (!parseUnknownField(input, unknownFields,
6526                                      extensionRegistry, tag)) {
6527                 done = true;
6528               }
6529               break;
6530             }
6531             case 10: {
6532               bitField0_ |= 0x00000001;
6533               lockOwner_ = input.readBytes();
6534               break;
6535             }
6536           }
6537         }
6538       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6539         throw e.setUnfinishedMessage(this);
6540       } catch (java.io.IOException e) {
6541         throw new com.google.protobuf.InvalidProtocolBufferException(
6542             e.getMessage()).setUnfinishedMessage(this);
6543       } finally {
6544         this.unknownFields = unknownFields.build();
6545         makeExtensionsImmutable();
6546       }
6547     }
6548     public static final com.google.protobuf.Descriptors.Descriptor
6549         getDescriptor() {
6550       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationLock_descriptor;
6551     }
6552 
6553     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6554         internalGetFieldAccessorTable() {
6555       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationLock_fieldAccessorTable
6556           .ensureFieldAccessorsInitialized(
6557               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder.class);
6558     }
6559 
6560     public static com.google.protobuf.Parser<ReplicationLock> PARSER =
6561         new com.google.protobuf.AbstractParser<ReplicationLock>() {
6562       public ReplicationLock parsePartialFrom(
6563           com.google.protobuf.CodedInputStream input,
6564           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6565           throws com.google.protobuf.InvalidProtocolBufferException {
6566         return new ReplicationLock(input, extensionRegistry);
6567       }
6568     };
6569 
6570     @java.lang.Override
6571     public com.google.protobuf.Parser<ReplicationLock> getParserForType() {
6572       return PARSER;
6573     }
6574 
6575     private int bitField0_;
6576     // required string lock_owner = 1;
6577     public static final int LOCK_OWNER_FIELD_NUMBER = 1;
6578     private java.lang.Object lockOwner_;
6579     /**
6580      * <code>required string lock_owner = 1;</code>
6581      */
6582     public boolean hasLockOwner() {
6583       return ((bitField0_ & 0x00000001) == 0x00000001);
6584     }
6585     /**
6586      * <code>required string lock_owner = 1;</code>
6587      */
6588     public java.lang.String getLockOwner() {
6589       java.lang.Object ref = lockOwner_;
6590       if (ref instanceof java.lang.String) {
6591         return (java.lang.String) ref;
6592       } else {
6593         com.google.protobuf.ByteString bs = 
6594             (com.google.protobuf.ByteString) ref;
6595         java.lang.String s = bs.toStringUtf8();
6596         if (bs.isValidUtf8()) {
6597           lockOwner_ = s;
6598         }
6599         return s;
6600       }
6601     }
6602     /**
6603      * <code>required string lock_owner = 1;</code>
6604      */
6605     public com.google.protobuf.ByteString
6606         getLockOwnerBytes() {
6607       java.lang.Object ref = lockOwner_;
6608       if (ref instanceof java.lang.String) {
6609         com.google.protobuf.ByteString b = 
6610             com.google.protobuf.ByteString.copyFromUtf8(
6611                 (java.lang.String) ref);
6612         lockOwner_ = b;
6613         return b;
6614       } else {
6615         return (com.google.protobuf.ByteString) ref;
6616       }
6617     }
6618 
6619     private void initFields() {
6620       lockOwner_ = "";
6621     }
6622     private byte memoizedIsInitialized = -1;
6623     public final boolean isInitialized() {
6624       byte isInitialized = memoizedIsInitialized;
6625       if (isInitialized != -1) return isInitialized == 1;
6626 
6627       if (!hasLockOwner()) {
6628         memoizedIsInitialized = 0;
6629         return false;
6630       }
6631       memoizedIsInitialized = 1;
6632       return true;
6633     }
6634 
6635     public void writeTo(com.google.protobuf.CodedOutputStream output)
6636                         throws java.io.IOException {
6637       getSerializedSize();
6638       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6639         output.writeBytes(1, getLockOwnerBytes());
6640       }
6641       getUnknownFields().writeTo(output);
6642     }
6643 
6644     private int memoizedSerializedSize = -1;
6645     public int getSerializedSize() {
6646       int size = memoizedSerializedSize;
6647       if (size != -1) return size;
6648 
6649       size = 0;
6650       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6651         size += com.google.protobuf.CodedOutputStream
6652           .computeBytesSize(1, getLockOwnerBytes());
6653       }
6654       size += getUnknownFields().getSerializedSize();
6655       memoizedSerializedSize = size;
6656       return size;
6657     }
6658 
6659     private static final long serialVersionUID = 0L;
6660     @java.lang.Override
6661     protected java.lang.Object writeReplace()
6662         throws java.io.ObjectStreamException {
6663       return super.writeReplace();
6664     }
6665 
6666     @java.lang.Override
6667     public boolean equals(final java.lang.Object obj) {
6668       if (obj == this) {
6669        return true;
6670       }
6671       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)) {
6672         return super.equals(obj);
6673       }
6674       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) obj;
6675 
6676       boolean result = true;
6677       result = result && (hasLockOwner() == other.hasLockOwner());
6678       if (hasLockOwner()) {
6679         result = result && getLockOwner()
6680             .equals(other.getLockOwner());
6681       }
6682       result = result &&
6683           getUnknownFields().equals(other.getUnknownFields());
6684       return result;
6685     }
6686 
6687     private int memoizedHashCode = 0;
6688     @java.lang.Override
6689     public int hashCode() {
6690       if (memoizedHashCode != 0) {
6691         return memoizedHashCode;
6692       }
6693       int hash = 41;
6694       hash = (19 * hash) + getDescriptorForType().hashCode();
6695       if (hasLockOwner()) {
6696         hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
6697         hash = (53 * hash) + getLockOwner().hashCode();
6698       }
6699       hash = (29 * hash) + getUnknownFields().hashCode();
6700       memoizedHashCode = hash;
6701       return hash;
6702     }
6703 
6704     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6705         com.google.protobuf.ByteString data)
6706         throws com.google.protobuf.InvalidProtocolBufferException {
6707       return PARSER.parseFrom(data);
6708     }
6709     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6710         com.google.protobuf.ByteString data,
6711         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6712         throws com.google.protobuf.InvalidProtocolBufferException {
6713       return PARSER.parseFrom(data, extensionRegistry);
6714     }
6715     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(byte[] data)
6716         throws com.google.protobuf.InvalidProtocolBufferException {
6717       return PARSER.parseFrom(data);
6718     }
6719     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6720         byte[] data,
6721         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6722         throws com.google.protobuf.InvalidProtocolBufferException {
6723       return PARSER.parseFrom(data, extensionRegistry);
6724     }
6725     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(java.io.InputStream input)
6726         throws java.io.IOException {
6727       return PARSER.parseFrom(input);
6728     }
6729     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6730         java.io.InputStream input,
6731         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6732         throws java.io.IOException {
6733       return PARSER.parseFrom(input, extensionRegistry);
6734     }
6735     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom(java.io.InputStream input)
6736         throws java.io.IOException {
6737       return PARSER.parseDelimitedFrom(input);
6738     }
6739     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom(
6740         java.io.InputStream input,
6741         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6742         throws java.io.IOException {
6743       return PARSER.parseDelimitedFrom(input, extensionRegistry);
6744     }
6745     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6746         com.google.protobuf.CodedInputStream input)
6747         throws java.io.IOException {
6748       return PARSER.parseFrom(input);
6749     }
6750     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(
6751         com.google.protobuf.CodedInputStream input,
6752         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6753         throws java.io.IOException {
6754       return PARSER.parseFrom(input, extensionRegistry);
6755     }
6756 
6757     public static Builder newBuilder() { return Builder.create(); }
6758     public Builder newBuilderForType() { return newBuilder(); }
6759     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock prototype) {
6760       return newBuilder().mergeFrom(prototype);
6761     }
6762     public Builder toBuilder() { return newBuilder(this); }
6763 
6764     @java.lang.Override
6765     protected Builder newBuilderForType(
6766         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6767       Builder builder = new Builder(parent);
6768       return builder;
6769     }
6770     /**
6771      * Protobuf type {@code hbase.pb.ReplicationLock}
6772      *
6773      * <pre>
6774      **
6775      * Used by replication. Used to lock a region server during failover.
6776      * </pre>
6777      */
6778     public static final class Builder extends
6779         com.google.protobuf.GeneratedMessage.Builder<Builder>
6780        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLockOrBuilder {
6781       public static final com.google.protobuf.Descriptors.Descriptor
6782           getDescriptor() {
6783         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationLock_descriptor;
6784       }
6785 
6786       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6787           internalGetFieldAccessorTable() {
6788         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationLock_fieldAccessorTable
6789             .ensureFieldAccessorsInitialized(
6790                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder.class);
6791       }
6792 
6793       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.newBuilder()
6794       private Builder() {
6795         maybeForceBuilderInitialization();
6796       }
6797 
6798       private Builder(
6799           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6800         super(parent);
6801         maybeForceBuilderInitialization();
6802       }
6803       private void maybeForceBuilderInitialization() {
6804         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6805         }
6806       }
6807       private static Builder create() {
6808         return new Builder();
6809       }
6810 
6811       public Builder clear() {
6812         super.clear();
6813         lockOwner_ = "";
6814         bitField0_ = (bitField0_ & ~0x00000001);
6815         return this;
6816       }
6817 
6818       public Builder clone() {
6819         return create().mergeFrom(buildPartial());
6820       }
6821 
6822       public com.google.protobuf.Descriptors.Descriptor
6823           getDescriptorForType() {
6824         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationLock_descriptor;
6825       }
6826 
6827       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock getDefaultInstanceForType() {
6828         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance();
6829       }
6830 
6831       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock build() {
6832         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = buildPartial();
6833         if (!result.isInitialized()) {
6834           throw newUninitializedMessageException(result);
6835         }
6836         return result;
6837       }
6838 
6839       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock buildPartial() {
6840         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock(this);
6841         int from_bitField0_ = bitField0_;
6842         int to_bitField0_ = 0;
6843         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6844           to_bitField0_ |= 0x00000001;
6845         }
6846         result.lockOwner_ = lockOwner_;
6847         result.bitField0_ = to_bitField0_;
6848         onBuilt();
6849         return result;
6850       }
6851 
6852       public Builder mergeFrom(com.google.protobuf.Message other) {
6853         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) {
6854           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)other);
6855         } else {
6856           super.mergeFrom(other);
6857           return this;
6858         }
6859       }
6860 
6861       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other) {
6862         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance()) return this;
6863         if (other.hasLockOwner()) {
6864           bitField0_ |= 0x00000001;
6865           lockOwner_ = other.lockOwner_;
6866           onChanged();
6867         }
6868         this.mergeUnknownFields(other.getUnknownFields());
6869         return this;
6870       }
6871 
6872       public final boolean isInitialized() {
6873         if (!hasLockOwner()) {
6874           
6875           return false;
6876         }
6877         return true;
6878       }
6879 
6880       public Builder mergeFrom(
6881           com.google.protobuf.CodedInputStream input,
6882           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6883           throws java.io.IOException {
6884         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parsedMessage = null;
6885         try {
6886           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6887         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6888           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) e.getUnfinishedMessage();
6889           throw e;
6890         } finally {
6891           if (parsedMessage != null) {
6892             mergeFrom(parsedMessage);
6893           }
6894         }
6895         return this;
6896       }
6897       private int bitField0_;
6898 
6899       // required string lock_owner = 1;
6900       private java.lang.Object lockOwner_ = "";
6901       /**
6902        * <code>required string lock_owner = 1;</code>
6903        */
6904       public boolean hasLockOwner() {
6905         return ((bitField0_ & 0x00000001) == 0x00000001);
6906       }
6907       /**
6908        * <code>required string lock_owner = 1;</code>
6909        */
6910       public java.lang.String getLockOwner() {
6911         java.lang.Object ref = lockOwner_;
6912         if (!(ref instanceof java.lang.String)) {
6913           java.lang.String s = ((com.google.protobuf.ByteString) ref)
6914               .toStringUtf8();
6915           lockOwner_ = s;
6916           return s;
6917         } else {
6918           return (java.lang.String) ref;
6919         }
6920       }
6921       /**
6922        * <code>required string lock_owner = 1;</code>
6923        */
6924       public com.google.protobuf.ByteString
6925           getLockOwnerBytes() {
6926         java.lang.Object ref = lockOwner_;
6927         if (ref instanceof String) {
6928           com.google.protobuf.ByteString b = 
6929               com.google.protobuf.ByteString.copyFromUtf8(
6930                   (java.lang.String) ref);
6931           lockOwner_ = b;
6932           return b;
6933         } else {
6934           return (com.google.protobuf.ByteString) ref;
6935         }
6936       }
6937       /**
6938        * <code>required string lock_owner = 1;</code>
6939        */
6940       public Builder setLockOwner(
6941           java.lang.String value) {
6942         if (value == null) {
6943     throw new NullPointerException();
6944   }
6945   bitField0_ |= 0x00000001;
6946         lockOwner_ = value;
6947         onChanged();
6948         return this;
6949       }
6950       /**
6951        * <code>required string lock_owner = 1;</code>
6952        */
6953       public Builder clearLockOwner() {
6954         bitField0_ = (bitField0_ & ~0x00000001);
6955         lockOwner_ = getDefaultInstance().getLockOwner();
6956         onChanged();
6957         return this;
6958       }
6959       /**
6960        * <code>required string lock_owner = 1;</code>
6961        */
6962       public Builder setLockOwnerBytes(
6963           com.google.protobuf.ByteString value) {
6964         if (value == null) {
6965     throw new NullPointerException();
6966   }
6967   bitField0_ |= 0x00000001;
6968         lockOwner_ = value;
6969         onChanged();
6970         return this;
6971       }
6972 
6973       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationLock)
6974     }
6975 
6976     static {
6977       defaultInstance = new ReplicationLock(true);
6978       defaultInstance.initFields();
6979     }
6980 
6981     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationLock)
6982   }
6983 
6984   public interface TableLockOrBuilder
6985       extends com.google.protobuf.MessageOrBuilder {
6986 
6987     // optional .hbase.pb.TableName table_name = 1;
6988     /**
6989      * <code>optional .hbase.pb.TableName table_name = 1;</code>
6990      */
6991     boolean hasTableName();
6992     /**
6993      * <code>optional .hbase.pb.TableName table_name = 1;</code>
6994      */
6995     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
6996     /**
6997      * <code>optional .hbase.pb.TableName table_name = 1;</code>
6998      */
6999     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
7000 
7001     // optional .hbase.pb.ServerName lock_owner = 2;
7002     /**
7003      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7004      */
7005     boolean hasLockOwner();
7006     /**
7007      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7008      */
7009     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner();
7010     /**
7011      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7012      */
7013     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder();
7014 
7015     // optional int64 thread_id = 3;
7016     /**
7017      * <code>optional int64 thread_id = 3;</code>
7018      */
7019     boolean hasThreadId();
7020     /**
7021      * <code>optional int64 thread_id = 3;</code>
7022      */
7023     long getThreadId();
7024 
7025     // optional bool is_shared = 4;
7026     /**
7027      * <code>optional bool is_shared = 4;</code>
7028      */
7029     boolean hasIsShared();
7030     /**
7031      * <code>optional bool is_shared = 4;</code>
7032      */
7033     boolean getIsShared();
7034 
7035     // optional string purpose = 5;
7036     /**
7037      * <code>optional string purpose = 5;</code>
7038      */
7039     boolean hasPurpose();
7040     /**
7041      * <code>optional string purpose = 5;</code>
7042      */
7043     java.lang.String getPurpose();
7044     /**
7045      * <code>optional string purpose = 5;</code>
7046      */
7047     com.google.protobuf.ByteString
7048         getPurposeBytes();
7049 
7050     // optional int64 create_time = 6;
7051     /**
7052      * <code>optional int64 create_time = 6;</code>
7053      */
7054     boolean hasCreateTime();
7055     /**
7056      * <code>optional int64 create_time = 6;</code>
7057      */
7058     long getCreateTime();
7059   }
7060   /**
7061    * Protobuf type {@code hbase.pb.TableLock}
7062    *
7063    * <pre>
7064    **
7065    * Metadata associated with a table lock in zookeeper
7066    * </pre>
7067    */
7068   public static final class TableLock extends
7069       com.google.protobuf.GeneratedMessage
7070       implements TableLockOrBuilder {
7071     // Use TableLock.newBuilder() to construct.
7072     private TableLock(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7073       super(builder);
7074       this.unknownFields = builder.getUnknownFields();
7075     }
7076     private TableLock(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7077 
7078     private static final TableLock defaultInstance;
7079     public static TableLock getDefaultInstance() {
7080       return defaultInstance;
7081     }
7082 
7083     public TableLock getDefaultInstanceForType() {
7084       return defaultInstance;
7085     }
7086 
7087     private final com.google.protobuf.UnknownFieldSet unknownFields;
7088     @java.lang.Override
7089     public final com.google.protobuf.UnknownFieldSet
7090         getUnknownFields() {
7091       return this.unknownFields;
7092     }
7093     private TableLock(
7094         com.google.protobuf.CodedInputStream input,
7095         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7096         throws com.google.protobuf.InvalidProtocolBufferException {
7097       initFields();
7098       int mutable_bitField0_ = 0;
7099       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7100           com.google.protobuf.UnknownFieldSet.newBuilder();
7101       try {
7102         boolean done = false;
7103         while (!done) {
7104           int tag = input.readTag();
7105           switch (tag) {
7106             case 0:
7107               done = true;
7108               break;
7109             default: {
7110               if (!parseUnknownField(input, unknownFields,
7111                                      extensionRegistry, tag)) {
7112                 done = true;
7113               }
7114               break;
7115             }
7116             case 10: {
7117               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
7118               if (((bitField0_ & 0x00000001) == 0x00000001)) {
7119                 subBuilder = tableName_.toBuilder();
7120               }
7121               tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
7122               if (subBuilder != null) {
7123                 subBuilder.mergeFrom(tableName_);
7124                 tableName_ = subBuilder.buildPartial();
7125               }
7126               bitField0_ |= 0x00000001;
7127               break;
7128             }
7129             case 18: {
7130               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
7131               if (((bitField0_ & 0x00000002) == 0x00000002)) {
7132                 subBuilder = lockOwner_.toBuilder();
7133               }
7134               lockOwner_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
7135               if (subBuilder != null) {
7136                 subBuilder.mergeFrom(lockOwner_);
7137                 lockOwner_ = subBuilder.buildPartial();
7138               }
7139               bitField0_ |= 0x00000002;
7140               break;
7141             }
7142             case 24: {
7143               bitField0_ |= 0x00000004;
7144               threadId_ = input.readInt64();
7145               break;
7146             }
7147             case 32: {
7148               bitField0_ |= 0x00000008;
7149               isShared_ = input.readBool();
7150               break;
7151             }
7152             case 42: {
7153               bitField0_ |= 0x00000010;
7154               purpose_ = input.readBytes();
7155               break;
7156             }
7157             case 48: {
7158               bitField0_ |= 0x00000020;
7159               createTime_ = input.readInt64();
7160               break;
7161             }
7162           }
7163         }
7164       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7165         throw e.setUnfinishedMessage(this);
7166       } catch (java.io.IOException e) {
7167         throw new com.google.protobuf.InvalidProtocolBufferException(
7168             e.getMessage()).setUnfinishedMessage(this);
7169       } finally {
7170         this.unknownFields = unknownFields.build();
7171         makeExtensionsImmutable();
7172       }
7173     }
7174     public static final com.google.protobuf.Descriptors.Descriptor
7175         getDescriptor() {
7176       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
7177     }
7178 
7179     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7180         internalGetFieldAccessorTable() {
7181       return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
7182           .ensureFieldAccessorsInitialized(
7183               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
7184     }
7185 
7186     public static com.google.protobuf.Parser<TableLock> PARSER =
7187         new com.google.protobuf.AbstractParser<TableLock>() {
7188       public TableLock parsePartialFrom(
7189           com.google.protobuf.CodedInputStream input,
7190           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7191           throws com.google.protobuf.InvalidProtocolBufferException {
7192         return new TableLock(input, extensionRegistry);
7193       }
7194     };
7195 
7196     @java.lang.Override
7197     public com.google.protobuf.Parser<TableLock> getParserForType() {
7198       return PARSER;
7199     }
7200 
7201     private int bitField0_;
7202     // optional .hbase.pb.TableName table_name = 1;
7203     public static final int TABLE_NAME_FIELD_NUMBER = 1;
7204     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
7205     /**
7206      * <code>optional .hbase.pb.TableName table_name = 1;</code>
7207      */
7208     public boolean hasTableName() {
7209       return ((bitField0_ & 0x00000001) == 0x00000001);
7210     }
7211     /**
7212      * <code>optional .hbase.pb.TableName table_name = 1;</code>
7213      */
7214     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
7215       return tableName_;
7216     }
7217     /**
7218      * <code>optional .hbase.pb.TableName table_name = 1;</code>
7219      */
7220     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
7221       return tableName_;
7222     }
7223 
7224     // optional .hbase.pb.ServerName lock_owner = 2;
7225     public static final int LOCK_OWNER_FIELD_NUMBER = 2;
7226     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_;
7227     /**
7228      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7229      */
7230     public boolean hasLockOwner() {
7231       return ((bitField0_ & 0x00000002) == 0x00000002);
7232     }
7233     /**
7234      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7235      */
7236     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
7237       return lockOwner_;
7238     }
7239     /**
7240      * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7241      */
7242     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
7243       return lockOwner_;
7244     }
7245 
7246     // optional int64 thread_id = 3;
7247     public static final int THREAD_ID_FIELD_NUMBER = 3;
7248     private long threadId_;
7249     /**
7250      * <code>optional int64 thread_id = 3;</code>
7251      */
7252     public boolean hasThreadId() {
7253       return ((bitField0_ & 0x00000004) == 0x00000004);
7254     }
7255     /**
7256      * <code>optional int64 thread_id = 3;</code>
7257      */
7258     public long getThreadId() {
7259       return threadId_;
7260     }
7261 
7262     // optional bool is_shared = 4;
7263     public static final int IS_SHARED_FIELD_NUMBER = 4;
7264     private boolean isShared_;
7265     /**
7266      * <code>optional bool is_shared = 4;</code>
7267      */
7268     public boolean hasIsShared() {
7269       return ((bitField0_ & 0x00000008) == 0x00000008);
7270     }
7271     /**
7272      * <code>optional bool is_shared = 4;</code>
7273      */
7274     public boolean getIsShared() {
7275       return isShared_;
7276     }
7277 
7278     // optional string purpose = 5;
7279     public static final int PURPOSE_FIELD_NUMBER = 5;
7280     private java.lang.Object purpose_;
7281     /**
7282      * <code>optional string purpose = 5;</code>
7283      */
7284     public boolean hasPurpose() {
7285       return ((bitField0_ & 0x00000010) == 0x00000010);
7286     }
7287     /**
7288      * <code>optional string purpose = 5;</code>
7289      */
7290     public java.lang.String getPurpose() {
7291       java.lang.Object ref = purpose_;
7292       if (ref instanceof java.lang.String) {
7293         return (java.lang.String) ref;
7294       } else {
7295         com.google.protobuf.ByteString bs = 
7296             (com.google.protobuf.ByteString) ref;
7297         java.lang.String s = bs.toStringUtf8();
7298         if (bs.isValidUtf8()) {
7299           purpose_ = s;
7300         }
7301         return s;
7302       }
7303     }
7304     /**
7305      * <code>optional string purpose = 5;</code>
7306      */
7307     public com.google.protobuf.ByteString
7308         getPurposeBytes() {
7309       java.lang.Object ref = purpose_;
7310       if (ref instanceof java.lang.String) {
7311         com.google.protobuf.ByteString b = 
7312             com.google.protobuf.ByteString.copyFromUtf8(
7313                 (java.lang.String) ref);
7314         purpose_ = b;
7315         return b;
7316       } else {
7317         return (com.google.protobuf.ByteString) ref;
7318       }
7319     }
7320 
7321     // optional int64 create_time = 6;
7322     public static final int CREATE_TIME_FIELD_NUMBER = 6;
7323     private long createTime_;
7324     /**
7325      * <code>optional int64 create_time = 6;</code>
7326      */
7327     public boolean hasCreateTime() {
7328       return ((bitField0_ & 0x00000020) == 0x00000020);
7329     }
7330     /**
7331      * <code>optional int64 create_time = 6;</code>
7332      */
7333     public long getCreateTime() {
7334       return createTime_;
7335     }
7336 
7337     private void initFields() {
7338       tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
7339       lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
7340       threadId_ = 0L;
7341       isShared_ = false;
7342       purpose_ = "";
7343       createTime_ = 0L;
7344     }
7345     private byte memoizedIsInitialized = -1;
7346     public final boolean isInitialized() {
7347       byte isInitialized = memoizedIsInitialized;
7348       if (isInitialized != -1) return isInitialized == 1;
7349 
7350       if (hasTableName()) {
7351         if (!getTableName().isInitialized()) {
7352           memoizedIsInitialized = 0;
7353           return false;
7354         }
7355       }
7356       if (hasLockOwner()) {
7357         if (!getLockOwner().isInitialized()) {
7358           memoizedIsInitialized = 0;
7359           return false;
7360         }
7361       }
7362       memoizedIsInitialized = 1;
7363       return true;
7364     }
7365 
7366     public void writeTo(com.google.protobuf.CodedOutputStream output)
7367                         throws java.io.IOException {
7368       getSerializedSize();
7369       if (((bitField0_ & 0x00000001) == 0x00000001)) {
7370         output.writeMessage(1, tableName_);
7371       }
7372       if (((bitField0_ & 0x00000002) == 0x00000002)) {
7373         output.writeMessage(2, lockOwner_);
7374       }
7375       if (((bitField0_ & 0x00000004) == 0x00000004)) {
7376         output.writeInt64(3, threadId_);
7377       }
7378       if (((bitField0_ & 0x00000008) == 0x00000008)) {
7379         output.writeBool(4, isShared_);
7380       }
7381       if (((bitField0_ & 0x00000010) == 0x00000010)) {
7382         output.writeBytes(5, getPurposeBytes());
7383       }
7384       if (((bitField0_ & 0x00000020) == 0x00000020)) {
7385         output.writeInt64(6, createTime_);
7386       }
7387       getUnknownFields().writeTo(output);
7388     }
7389 
7390     private int memoizedSerializedSize = -1;
7391     public int getSerializedSize() {
7392       int size = memoizedSerializedSize;
7393       if (size != -1) return size;
7394 
7395       size = 0;
7396       if (((bitField0_ & 0x00000001) == 0x00000001)) {
7397         size += com.google.protobuf.CodedOutputStream
7398           .computeMessageSize(1, tableName_);
7399       }
7400       if (((bitField0_ & 0x00000002) == 0x00000002)) {
7401         size += com.google.protobuf.CodedOutputStream
7402           .computeMessageSize(2, lockOwner_);
7403       }
7404       if (((bitField0_ & 0x00000004) == 0x00000004)) {
7405         size += com.google.protobuf.CodedOutputStream
7406           .computeInt64Size(3, threadId_);
7407       }
7408       if (((bitField0_ & 0x00000008) == 0x00000008)) {
7409         size += com.google.protobuf.CodedOutputStream
7410           .computeBoolSize(4, isShared_);
7411       }
7412       if (((bitField0_ & 0x00000010) == 0x00000010)) {
7413         size += com.google.protobuf.CodedOutputStream
7414           .computeBytesSize(5, getPurposeBytes());
7415       }
7416       if (((bitField0_ & 0x00000020) == 0x00000020)) {
7417         size += com.google.protobuf.CodedOutputStream
7418           .computeInt64Size(6, createTime_);
7419       }
7420       size += getUnknownFields().getSerializedSize();
7421       memoizedSerializedSize = size;
7422       return size;
7423     }
7424 
7425     private static final long serialVersionUID = 0L;
7426     @java.lang.Override
7427     protected java.lang.Object writeReplace()
7428         throws java.io.ObjectStreamException {
7429       return super.writeReplace();
7430     }
7431 
7432     @java.lang.Override
7433     public boolean equals(final java.lang.Object obj) {
7434       if (obj == this) {
7435        return true;
7436       }
7437       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)) {
7438         return super.equals(obj);
7439       }
7440       org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) obj;
7441 
7442       boolean result = true;
7443       result = result && (hasTableName() == other.hasTableName());
7444       if (hasTableName()) {
7445         result = result && getTableName()
7446             .equals(other.getTableName());
7447       }
7448       result = result && (hasLockOwner() == other.hasLockOwner());
7449       if (hasLockOwner()) {
7450         result = result && getLockOwner()
7451             .equals(other.getLockOwner());
7452       }
7453       result = result && (hasThreadId() == other.hasThreadId());
7454       if (hasThreadId()) {
7455         result = result && (getThreadId()
7456             == other.getThreadId());
7457       }
7458       result = result && (hasIsShared() == other.hasIsShared());
7459       if (hasIsShared()) {
7460         result = result && (getIsShared()
7461             == other.getIsShared());
7462       }
7463       result = result && (hasPurpose() == other.hasPurpose());
7464       if (hasPurpose()) {
7465         result = result && getPurpose()
7466             .equals(other.getPurpose());
7467       }
7468       result = result && (hasCreateTime() == other.hasCreateTime());
7469       if (hasCreateTime()) {
7470         result = result && (getCreateTime()
7471             == other.getCreateTime());
7472       }
7473       result = result &&
7474           getUnknownFields().equals(other.getUnknownFields());
7475       return result;
7476     }
7477 
7478     private int memoizedHashCode = 0;
7479     @java.lang.Override
7480     public int hashCode() {
7481       if (memoizedHashCode != 0) {
7482         return memoizedHashCode;
7483       }
7484       int hash = 41;
7485       hash = (19 * hash) + getDescriptorForType().hashCode();
7486       if (hasTableName()) {
7487         hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
7488         hash = (53 * hash) + getTableName().hashCode();
7489       }
7490       if (hasLockOwner()) {
7491         hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
7492         hash = (53 * hash) + getLockOwner().hashCode();
7493       }
7494       if (hasThreadId()) {
7495         hash = (37 * hash) + THREAD_ID_FIELD_NUMBER;
7496         hash = (53 * hash) + hashLong(getThreadId());
7497       }
7498       if (hasIsShared()) {
7499         hash = (37 * hash) + IS_SHARED_FIELD_NUMBER;
7500         hash = (53 * hash) + hashBoolean(getIsShared());
7501       }
7502       if (hasPurpose()) {
7503         hash = (37 * hash) + PURPOSE_FIELD_NUMBER;
7504         hash = (53 * hash) + getPurpose().hashCode();
7505       }
7506       if (hasCreateTime()) {
7507         hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER;
7508         hash = (53 * hash) + hashLong(getCreateTime());
7509       }
7510       hash = (29 * hash) + getUnknownFields().hashCode();
7511       memoizedHashCode = hash;
7512       return hash;
7513     }
7514 
7515     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7516         com.google.protobuf.ByteString data)
7517         throws com.google.protobuf.InvalidProtocolBufferException {
7518       return PARSER.parseFrom(data);
7519     }
7520     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7521         com.google.protobuf.ByteString data,
7522         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7523         throws com.google.protobuf.InvalidProtocolBufferException {
7524       return PARSER.parseFrom(data, extensionRegistry);
7525     }
7526     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(byte[] data)
7527         throws com.google.protobuf.InvalidProtocolBufferException {
7528       return PARSER.parseFrom(data);
7529     }
7530     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7531         byte[] data,
7532         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7533         throws com.google.protobuf.InvalidProtocolBufferException {
7534       return PARSER.parseFrom(data, extensionRegistry);
7535     }
7536     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(java.io.InputStream input)
7537         throws java.io.IOException {
7538       return PARSER.parseFrom(input);
7539     }
7540     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7541         java.io.InputStream input,
7542         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7543         throws java.io.IOException {
7544       return PARSER.parseFrom(input, extensionRegistry);
7545     }
7546     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(java.io.InputStream input)
7547         throws java.io.IOException {
7548       return PARSER.parseDelimitedFrom(input);
7549     }
7550     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(
7551         java.io.InputStream input,
7552         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7553         throws java.io.IOException {
7554       return PARSER.parseDelimitedFrom(input, extensionRegistry);
7555     }
7556     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7557         com.google.protobuf.CodedInputStream input)
7558         throws java.io.IOException {
7559       return PARSER.parseFrom(input);
7560     }
7561     public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
7562         com.google.protobuf.CodedInputStream input,
7563         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7564         throws java.io.IOException {
7565       return PARSER.parseFrom(input, extensionRegistry);
7566     }
7567 
7568     public static Builder newBuilder() { return Builder.create(); }
7569     public Builder newBuilderForType() { return newBuilder(); }
7570     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock prototype) {
7571       return newBuilder().mergeFrom(prototype);
7572     }
7573     public Builder toBuilder() { return newBuilder(this); }
7574 
7575     @java.lang.Override
7576     protected Builder newBuilderForType(
7577         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7578       Builder builder = new Builder(parent);
7579       return builder;
7580     }
7581     /**
7582      * Protobuf type {@code hbase.pb.TableLock}
7583      *
7584      * <pre>
7585      **
7586      * Metadata associated with a table lock in zookeeper
7587      * </pre>
7588      */
7589     public static final class Builder extends
7590         com.google.protobuf.GeneratedMessage.Builder<Builder>
7591        implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLockOrBuilder {
7592       public static final com.google.protobuf.Descriptors.Descriptor
7593           getDescriptor() {
7594         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
7595       }
7596 
7597       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7598           internalGetFieldAccessorTable() {
7599         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
7600             .ensureFieldAccessorsInitialized(
7601                 org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
7602       }
7603 
7604       // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.newBuilder()
7605       private Builder() {
7606         maybeForceBuilderInitialization();
7607       }
7608 
7609       private Builder(
7610           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7611         super(parent);
7612         maybeForceBuilderInitialization();
7613       }
7614       private void maybeForceBuilderInitialization() {
7615         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7616           getTableNameFieldBuilder();
7617           getLockOwnerFieldBuilder();
7618         }
7619       }
7620       private static Builder create() {
7621         return new Builder();
7622       }
7623 
7624       public Builder clear() {
7625         super.clear();
7626         if (tableNameBuilder_ == null) {
7627           tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
7628         } else {
7629           tableNameBuilder_.clear();
7630         }
7631         bitField0_ = (bitField0_ & ~0x00000001);
7632         if (lockOwnerBuilder_ == null) {
7633           lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
7634         } else {
7635           lockOwnerBuilder_.clear();
7636         }
7637         bitField0_ = (bitField0_ & ~0x00000002);
7638         threadId_ = 0L;
7639         bitField0_ = (bitField0_ & ~0x00000004);
7640         isShared_ = false;
7641         bitField0_ = (bitField0_ & ~0x00000008);
7642         purpose_ = "";
7643         bitField0_ = (bitField0_ & ~0x00000010);
7644         createTime_ = 0L;
7645         bitField0_ = (bitField0_ & ~0x00000020);
7646         return this;
7647       }
7648 
7649       public Builder clone() {
7650         return create().mergeFrom(buildPartial());
7651       }
7652 
7653       public com.google.protobuf.Descriptors.Descriptor
7654           getDescriptorForType() {
7655         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
7656       }
7657 
7658       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() {
7659         return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance();
7660       }
7661 
7662       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock build() {
7663         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial();
7664         if (!result.isInitialized()) {
7665           throw newUninitializedMessageException(result);
7666         }
7667         return result;
7668       }
7669 
7670       public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildPartial() {
7671         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock(this);
7672         int from_bitField0_ = bitField0_;
7673         int to_bitField0_ = 0;
7674         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7675           to_bitField0_ |= 0x00000001;
7676         }
7677         if (tableNameBuilder_ == null) {
7678           result.tableName_ = tableName_;
7679         } else {
7680           result.tableName_ = tableNameBuilder_.build();
7681         }
7682         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7683           to_bitField0_ |= 0x00000002;
7684         }
7685         if (lockOwnerBuilder_ == null) {
7686           result.lockOwner_ = lockOwner_;
7687         } else {
7688           result.lockOwner_ = lockOwnerBuilder_.build();
7689         }
7690         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
7691           to_bitField0_ |= 0x00000004;
7692         }
7693         result.threadId_ = threadId_;
7694         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
7695           to_bitField0_ |= 0x00000008;
7696         }
7697         result.isShared_ = isShared_;
7698         if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
7699           to_bitField0_ |= 0x00000010;
7700         }
7701         result.purpose_ = purpose_;
7702         if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
7703           to_bitField0_ |= 0x00000020;
7704         }
7705         result.createTime_ = createTime_;
7706         result.bitField0_ = to_bitField0_;
7707         onBuilt();
7708         return result;
7709       }
7710 
7711       public Builder mergeFrom(com.google.protobuf.Message other) {
7712         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) {
7713           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)other);
7714         } else {
7715           super.mergeFrom(other);
7716           return this;
7717         }
7718       }
7719 
7720       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) {
7721         if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this;
7722         if (other.hasTableName()) {
7723           mergeTableName(other.getTableName());
7724         }
7725         if (other.hasLockOwner()) {
7726           mergeLockOwner(other.getLockOwner());
7727         }
7728         if (other.hasThreadId()) {
7729           setThreadId(other.getThreadId());
7730         }
7731         if (other.hasIsShared()) {
7732           setIsShared(other.getIsShared());
7733         }
7734         if (other.hasPurpose()) {
7735           bitField0_ |= 0x00000010;
7736           purpose_ = other.purpose_;
7737           onChanged();
7738         }
7739         if (other.hasCreateTime()) {
7740           setCreateTime(other.getCreateTime());
7741         }
7742         this.mergeUnknownFields(other.getUnknownFields());
7743         return this;
7744       }
7745 
7746       public final boolean isInitialized() {
7747         if (hasTableName()) {
7748           if (!getTableName().isInitialized()) {
7749             
7750             return false;
7751           }
7752         }
7753         if (hasLockOwner()) {
7754           if (!getLockOwner().isInitialized()) {
7755             
7756             return false;
7757           }
7758         }
7759         return true;
7760       }
7761 
7762       public Builder mergeFrom(
7763           com.google.protobuf.CodedInputStream input,
7764           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7765           throws java.io.IOException {
7766         org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parsedMessage = null;
7767         try {
7768           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7769         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7770           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) e.getUnfinishedMessage();
7771           throw e;
7772         } finally {
7773           if (parsedMessage != null) {
7774             mergeFrom(parsedMessage);
7775           }
7776         }
7777         return this;
7778       }
7779       private int bitField0_;
7780 
7781       // optional .hbase.pb.TableName table_name = 1;
7782       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
7783       private com.google.protobuf.SingleFieldBuilder<
7784           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
7785       /**
7786        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7787        */
7788       public boolean hasTableName() {
7789         return ((bitField0_ & 0x00000001) == 0x00000001);
7790       }
7791       /**
7792        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7793        */
7794       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
7795         if (tableNameBuilder_ == null) {
7796           return tableName_;
7797         } else {
7798           return tableNameBuilder_.getMessage();
7799         }
7800       }
7801       /**
7802        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7803        */
7804       public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
7805         if (tableNameBuilder_ == null) {
7806           if (value == null) {
7807             throw new NullPointerException();
7808           }
7809           tableName_ = value;
7810           onChanged();
7811         } else {
7812           tableNameBuilder_.setMessage(value);
7813         }
7814         bitField0_ |= 0x00000001;
7815         return this;
7816       }
7817       /**
7818        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7819        */
7820       public Builder setTableName(
7821           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
7822         if (tableNameBuilder_ == null) {
7823           tableName_ = builderForValue.build();
7824           onChanged();
7825         } else {
7826           tableNameBuilder_.setMessage(builderForValue.build());
7827         }
7828         bitField0_ |= 0x00000001;
7829         return this;
7830       }
7831       /**
7832        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7833        */
7834       public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
7835         if (tableNameBuilder_ == null) {
7836           if (((bitField0_ & 0x00000001) == 0x00000001) &&
7837               tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
7838             tableName_ =
7839               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
7840           } else {
7841             tableName_ = value;
7842           }
7843           onChanged();
7844         } else {
7845           tableNameBuilder_.mergeFrom(value);
7846         }
7847         bitField0_ |= 0x00000001;
7848         return this;
7849       }
7850       /**
7851        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7852        */
7853       public Builder clearTableName() {
7854         if (tableNameBuilder_ == null) {
7855           tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
7856           onChanged();
7857         } else {
7858           tableNameBuilder_.clear();
7859         }
7860         bitField0_ = (bitField0_ & ~0x00000001);
7861         return this;
7862       }
7863       /**
7864        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7865        */
7866       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
7867         bitField0_ |= 0x00000001;
7868         onChanged();
7869         return getTableNameFieldBuilder().getBuilder();
7870       }
7871       /**
7872        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7873        */
7874       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
7875         if (tableNameBuilder_ != null) {
7876           return tableNameBuilder_.getMessageOrBuilder();
7877         } else {
7878           return tableName_;
7879         }
7880       }
7881       /**
7882        * <code>optional .hbase.pb.TableName table_name = 1;</code>
7883        */
7884       private com.google.protobuf.SingleFieldBuilder<
7885           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
7886           getTableNameFieldBuilder() {
7887         if (tableNameBuilder_ == null) {
7888           tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7889               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
7890                   tableName_,
7891                   getParentForChildren(),
7892                   isClean());
7893           tableName_ = null;
7894         }
7895         return tableNameBuilder_;
7896       }
7897 
7898       // optional .hbase.pb.ServerName lock_owner = 2;
7899       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
7900       private com.google.protobuf.SingleFieldBuilder<
7901           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_;
7902       /**
7903        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7904        */
7905       public boolean hasLockOwner() {
7906         return ((bitField0_ & 0x00000002) == 0x00000002);
7907       }
7908       /**
7909        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7910        */
7911       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
7912         if (lockOwnerBuilder_ == null) {
7913           return lockOwner_;
7914         } else {
7915           return lockOwnerBuilder_.getMessage();
7916         }
7917       }
7918       /**
7919        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7920        */
7921       public Builder setLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
7922         if (lockOwnerBuilder_ == null) {
7923           if (value == null) {
7924             throw new NullPointerException();
7925           }
7926           lockOwner_ = value;
7927           onChanged();
7928         } else {
7929           lockOwnerBuilder_.setMessage(value);
7930         }
7931         bitField0_ |= 0x00000002;
7932         return this;
7933       }
7934       /**
7935        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7936        */
7937       public Builder setLockOwner(
7938           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
7939         if (lockOwnerBuilder_ == null) {
7940           lockOwner_ = builderForValue.build();
7941           onChanged();
7942         } else {
7943           lockOwnerBuilder_.setMessage(builderForValue.build());
7944         }
7945         bitField0_ |= 0x00000002;
7946         return this;
7947       }
7948       /**
7949        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7950        */
7951       public Builder mergeLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
7952         if (lockOwnerBuilder_ == null) {
7953           if (((bitField0_ & 0x00000002) == 0x00000002) &&
7954               lockOwner_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
7955             lockOwner_ =
7956               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(lockOwner_).mergeFrom(value).buildPartial();
7957           } else {
7958             lockOwner_ = value;
7959           }
7960           onChanged();
7961         } else {
7962           lockOwnerBuilder_.mergeFrom(value);
7963         }
7964         bitField0_ |= 0x00000002;
7965         return this;
7966       }
7967       /**
7968        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7969        */
7970       public Builder clearLockOwner() {
7971         if (lockOwnerBuilder_ == null) {
7972           lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
7973           onChanged();
7974         } else {
7975           lockOwnerBuilder_.clear();
7976         }
7977         bitField0_ = (bitField0_ & ~0x00000002);
7978         return this;
7979       }
7980       /**
7981        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7982        */
7983       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getLockOwnerBuilder() {
7984         bitField0_ |= 0x00000002;
7985         onChanged();
7986         return getLockOwnerFieldBuilder().getBuilder();
7987       }
7988       /**
7989        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
7990        */
7991       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
7992         if (lockOwnerBuilder_ != null) {
7993           return lockOwnerBuilder_.getMessageOrBuilder();
7994         } else {
7995           return lockOwner_;
7996         }
7997       }
7998       /**
7999        * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
8000        */
8001       private com.google.protobuf.SingleFieldBuilder<
8002           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
8003           getLockOwnerFieldBuilder() {
8004         if (lockOwnerBuilder_ == null) {
8005           lockOwnerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8006               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
8007                   lockOwner_,
8008                   getParentForChildren(),
8009                   isClean());
8010           lockOwner_ = null;
8011         }
8012         return lockOwnerBuilder_;
8013       }
8014 
8015       // optional int64 thread_id = 3;
8016       private long threadId_ ;
8017       /**
8018        * <code>optional int64 thread_id = 3;</code>
8019        */
8020       public boolean hasThreadId() {
8021         return ((bitField0_ & 0x00000004) == 0x00000004);
8022       }
8023       /**
8024        * <code>optional int64 thread_id = 3;</code>
8025        */
8026       public long getThreadId() {
8027         return threadId_;
8028       }
8029       /**
8030        * <code>optional int64 thread_id = 3;</code>
8031        */
8032       public Builder setThreadId(long value) {
8033         bitField0_ |= 0x00000004;
8034         threadId_ = value;
8035         onChanged();
8036         return this;
8037       }
8038       /**
8039        * <code>optional int64 thread_id = 3;</code>
8040        */
8041       public Builder clearThreadId() {
8042         bitField0_ = (bitField0_ & ~0x00000004);
8043         threadId_ = 0L;
8044         onChanged();
8045         return this;
8046       }
8047 
8048       // optional bool is_shared = 4;
8049       private boolean isShared_ ;
8050       /**
8051        * <code>optional bool is_shared = 4;</code>
8052        */
8053       public boolean hasIsShared() {
8054         return ((bitField0_ & 0x00000008) == 0x00000008);
8055       }
8056       /**
8057        * <code>optional bool is_shared = 4;</code>
8058        */
8059       public boolean getIsShared() {
8060         return isShared_;
8061       }
8062       /**
8063        * <code>optional bool is_shared = 4;</code>
8064        */
8065       public Builder setIsShared(boolean value) {
8066         bitField0_ |= 0x00000008;
8067         isShared_ = value;
8068         onChanged();
8069         return this;
8070       }
8071       /**
8072        * <code>optional bool is_shared = 4;</code>
8073        */
8074       public Builder clearIsShared() {
8075         bitField0_ = (bitField0_ & ~0x00000008);
8076         isShared_ = false;
8077         onChanged();
8078         return this;
8079       }
8080 
8081       // optional string purpose = 5;
8082       private java.lang.Object purpose_ = "";
8083       /**
8084        * <code>optional string purpose = 5;</code>
8085        */
8086       public boolean hasPurpose() {
8087         return ((bitField0_ & 0x00000010) == 0x00000010);
8088       }
8089       /**
8090        * <code>optional string purpose = 5;</code>
8091        */
8092       public java.lang.String getPurpose() {
8093         java.lang.Object ref = purpose_;
8094         if (!(ref instanceof java.lang.String)) {
8095           java.lang.String s = ((com.google.protobuf.ByteString) ref)
8096               .toStringUtf8();
8097           purpose_ = s;
8098           return s;
8099         } else {
8100           return (java.lang.String) ref;
8101         }
8102       }
8103       /**
8104        * <code>optional string purpose = 5;</code>
8105        */
8106       public com.google.protobuf.ByteString
8107           getPurposeBytes() {
8108         java.lang.Object ref = purpose_;
8109         if (ref instanceof String) {
8110           com.google.protobuf.ByteString b = 
8111               com.google.protobuf.ByteString.copyFromUtf8(
8112                   (java.lang.String) ref);
8113           purpose_ = b;
8114           return b;
8115         } else {
8116           return (com.google.protobuf.ByteString) ref;
8117         }
8118       }
8119       /**
8120        * <code>optional string purpose = 5;</code>
8121        */
8122       public Builder setPurpose(
8123           java.lang.String value) {
8124         if (value == null) {
8125     throw new NullPointerException();
8126   }
8127   bitField0_ |= 0x00000010;
8128         purpose_ = value;
8129         onChanged();
8130         return this;
8131       }
8132       /**
8133        * <code>optional string purpose = 5;</code>
8134        */
8135       public Builder clearPurpose() {
8136         bitField0_ = (bitField0_ & ~0x00000010);
8137         purpose_ = getDefaultInstance().getPurpose();
8138         onChanged();
8139         return this;
8140       }
8141       /**
8142        * <code>optional string purpose = 5;</code>
8143        */
8144       public Builder setPurposeBytes(
8145           com.google.protobuf.ByteString value) {
8146         if (value == null) {
8147     throw new NullPointerException();
8148   }
8149   bitField0_ |= 0x00000010;
8150         purpose_ = value;
8151         onChanged();
8152         return this;
8153       }
8154 
8155       // optional int64 create_time = 6;
8156       private long createTime_ ;
8157       /**
8158        * <code>optional int64 create_time = 6;</code>
8159        */
8160       public boolean hasCreateTime() {
8161         return ((bitField0_ & 0x00000020) == 0x00000020);
8162       }
8163       /**
8164        * <code>optional int64 create_time = 6;</code>
8165        */
8166       public long getCreateTime() {
8167         return createTime_;
8168       }
8169       /**
8170        * <code>optional int64 create_time = 6;</code>
8171        */
8172       public Builder setCreateTime(long value) {
8173         bitField0_ |= 0x00000020;
8174         createTime_ = value;
8175         onChanged();
8176         return this;
8177       }
8178       /**
8179        * <code>optional int64 create_time = 6;</code>
8180        */
8181       public Builder clearCreateTime() {
8182         bitField0_ = (bitField0_ & ~0x00000020);
8183         createTime_ = 0L;
8184         onChanged();
8185         return this;
8186       }
8187 
8188       // @@protoc_insertion_point(builder_scope:hbase.pb.TableLock)
8189     }
8190 
8191     static {
8192       defaultInstance = new TableLock(true);
8193       defaultInstance.initFields();
8194     }
8195 
8196     // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
8197   }
8198 
8199   private static com.google.protobuf.Descriptors.Descriptor
8200     internal_static_hbase_pb_MetaRegionServer_descriptor;
8201   private static
8202     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8203       internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable;
8204   private static com.google.protobuf.Descriptors.Descriptor
8205     internal_static_hbase_pb_Master_descriptor;
8206   private static
8207     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8208       internal_static_hbase_pb_Master_fieldAccessorTable;
8209   private static com.google.protobuf.Descriptors.Descriptor
8210     internal_static_hbase_pb_ClusterUp_descriptor;
8211   private static
8212     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8213       internal_static_hbase_pb_ClusterUp_fieldAccessorTable;
8214   private static com.google.protobuf.Descriptors.Descriptor
8215     internal_static_hbase_pb_SplitLogTask_descriptor;
8216   private static
8217     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8218       internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
8219   private static com.google.protobuf.Descriptors.Descriptor
8220     internal_static_hbase_pb_DeprecatedTableState_descriptor;
8221   private static
8222     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8223       internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
8224   private static com.google.protobuf.Descriptors.Descriptor
8225     internal_static_hbase_pb_ReplicationPeer_descriptor;
8226   private static
8227     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8228       internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable;
8229   private static com.google.protobuf.Descriptors.Descriptor
8230     internal_static_hbase_pb_ReplicationState_descriptor;
8231   private static
8232     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8233       internal_static_hbase_pb_ReplicationState_fieldAccessorTable;
8234   private static com.google.protobuf.Descriptors.Descriptor
8235     internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
8236   private static
8237     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8238       internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable;
8239   private static com.google.protobuf.Descriptors.Descriptor
8240     internal_static_hbase_pb_ReplicationLock_descriptor;
8241   private static
8242     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8243       internal_static_hbase_pb_ReplicationLock_fieldAccessorTable;
8244   private static com.google.protobuf.Descriptors.Descriptor
8245     internal_static_hbase_pb_TableLock_descriptor;
8246   private static
8247     com.google.protobuf.GeneratedMessage.FieldAccessorTable
8248       internal_static_hbase_pb_TableLock_fieldAccessorTable;
8249 
8250   public static com.google.protobuf.Descriptors.FileDescriptor
8251       getDescriptor() {
8252     return descriptor;
8253   }
8254   private static com.google.protobuf.Descriptors.FileDescriptor
8255       descriptor;
8256   static {
8257     java.lang.String[] descriptorData = {
8258       "\n\017ZooKeeper.proto\022\010hbase.pb\032\013HBase.proto" +
8259       "\032\023ClusterStatus.proto\"y\n\020MetaRegionServe" +
8260       "r\022$\n\006server\030\001 \002(\0132\024.hbase.pb.ServerName\022" +
8261       "\023\n\013rpc_version\030\002 \001(\r\022*\n\005state\030\003 \001(\0162\033.hb" +
8262       "ase.pb.RegionState.State\"V\n\006Master\022$\n\006ma" +
8263       "ster\030\001 \002(\0132\024.hbase.pb.ServerName\022\023\n\013rpc_" +
8264       "version\030\002 \001(\r\022\021\n\tinfo_port\030\003 \001(\r\"\037\n\tClus" +
8265       "terUp\022\022\n\nstart_date\030\001 \002(\t\"\247\002\n\014SplitLogTa" +
8266       "sk\022+\n\005state\030\001 \002(\0162\034.hbase.pb.SplitLogTas" +
8267       "k.State\022)\n\013server_name\030\002 \002(\0132\024.hbase.pb.",
8268       "ServerName\022:\n\004mode\030\003 \001(\0162#.hbase.pb.Spli" +
8269       "tLogTask.RecoveryMode:\007UNKNOWN\"C\n\005State\022" +
8270       "\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002" +
8271       "\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007U" +
8272       "NKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPLA" +
8273       "Y\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001 " +
8274       "\002(\0162$.hbase.pb.DeprecatedTableState.Stat" +
8275       "e:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" +
8276       "BLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\237\001\n\017" +
8277       "ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027r",
8278       "eplicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003" +
8279       "(\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigur" +
8280       "ation\030\004 \003(\0132\030.hbase.pb.NameStringPair\"g\n" +
8281       "\020ReplicationState\022/\n\005state\030\001 \002(\0162 .hbase" +
8282       ".pb.ReplicationState.State\"\"\n\005State\022\013\n\007E" +
8283       "NABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLo" +
8284       "gPosition\022\020\n\010position\030\001 \002(\003\"%\n\017Replicati" +
8285       "onLock\022\022\n\nlock_owner\030\001 \002(\t\"\252\001\n\tTableLock" +
8286       "\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableNam" +
8287       "e\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.ServerN",
8288       "ame\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(" +
8289       "\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003B" +
8290       "E\n*org.apache.hadoop.hbase.protobuf.gene" +
8291       "ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
8292     };
8293     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
8294       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
8295         public com.google.protobuf.ExtensionRegistry assignDescriptors(
8296             com.google.protobuf.Descriptors.FileDescriptor root) {
8297           descriptor = root;
8298           internal_static_hbase_pb_MetaRegionServer_descriptor =
8299             getDescriptor().getMessageTypes().get(0);
8300           internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable = new
8301             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8302               internal_static_hbase_pb_MetaRegionServer_descriptor,
8303               new java.lang.String[] { "Server", "RpcVersion", "State", });
8304           internal_static_hbase_pb_Master_descriptor =
8305             getDescriptor().getMessageTypes().get(1);
8306           internal_static_hbase_pb_Master_fieldAccessorTable = new
8307             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8308               internal_static_hbase_pb_Master_descriptor,
8309               new java.lang.String[] { "Master", "RpcVersion", "InfoPort", });
8310           internal_static_hbase_pb_ClusterUp_descriptor =
8311             getDescriptor().getMessageTypes().get(2);
8312           internal_static_hbase_pb_ClusterUp_fieldAccessorTable = new
8313             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8314               internal_static_hbase_pb_ClusterUp_descriptor,
8315               new java.lang.String[] { "StartDate", });
8316           internal_static_hbase_pb_SplitLogTask_descriptor =
8317             getDescriptor().getMessageTypes().get(3);
8318           internal_static_hbase_pb_SplitLogTask_fieldAccessorTable = new
8319             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8320               internal_static_hbase_pb_SplitLogTask_descriptor,
8321               new java.lang.String[] { "State", "ServerName", "Mode", });
8322           internal_static_hbase_pb_DeprecatedTableState_descriptor =
8323             getDescriptor().getMessageTypes().get(4);
8324           internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
8325             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8326               internal_static_hbase_pb_DeprecatedTableState_descriptor,
8327               new java.lang.String[] { "State", });
8328           internal_static_hbase_pb_ReplicationPeer_descriptor =
8329             getDescriptor().getMessageTypes().get(5);
8330           internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable = new
8331             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8332               internal_static_hbase_pb_ReplicationPeer_descriptor,
8333               new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", });
8334           internal_static_hbase_pb_ReplicationState_descriptor =
8335             getDescriptor().getMessageTypes().get(6);
8336           internal_static_hbase_pb_ReplicationState_fieldAccessorTable = new
8337             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8338               internal_static_hbase_pb_ReplicationState_descriptor,
8339               new java.lang.String[] { "State", });
8340           internal_static_hbase_pb_ReplicationHLogPosition_descriptor =
8341             getDescriptor().getMessageTypes().get(7);
8342           internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable = new
8343             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8344               internal_static_hbase_pb_ReplicationHLogPosition_descriptor,
8345               new java.lang.String[] { "Position", });
8346           internal_static_hbase_pb_ReplicationLock_descriptor =
8347             getDescriptor().getMessageTypes().get(8);
8348           internal_static_hbase_pb_ReplicationLock_fieldAccessorTable = new
8349             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8350               internal_static_hbase_pb_ReplicationLock_descriptor,
8351               new java.lang.String[] { "LockOwner", });
8352           internal_static_hbase_pb_TableLock_descriptor =
8353             getDescriptor().getMessageTypes().get(9);
8354           internal_static_hbase_pb_TableLock_fieldAccessorTable = new
8355             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
8356               internal_static_hbase_pb_TableLock_descriptor,
8357               new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", });
8358           return null;
8359         }
8360       };
8361     com.google.protobuf.Descriptors.FileDescriptor
8362       .internalBuildGeneratedFileFrom(descriptorData,
8363         new com.google.protobuf.Descriptors.FileDescriptor[] {
8364           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
8365           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
8366         }, assigner);
8367   }
8368 
8369   // @@protoc_insertion_point(outer_class_scope)
8370 }