View Javadoc

1   // Generated by the protocol buffer compiler.  DO NOT EDIT!
2   // source: ClusterStatus.proto
3   
4   package org.apache.hadoop.hbase.protobuf.generated;
5   
6   public final class ClusterStatusProtos {
7     private ClusterStatusProtos() {}
8     public static void registerAllExtensions(
9         com.google.protobuf.ExtensionRegistry registry) {
10    }
11    public interface RegionStateOrBuilder
12        extends com.google.protobuf.MessageOrBuilder {
13  
14      // required .hbase.pb.RegionInfo region_info = 1;
15      /**
16       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
17       */
18      boolean hasRegionInfo();
19      /**
20       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
21       */
22      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
23      /**
24       * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
25       */
26      org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
27  
28      // required .hbase.pb.RegionState.State state = 2;
29      /**
30       * <code>required .hbase.pb.RegionState.State state = 2;</code>
31       */
32      boolean hasState();
33      /**
34       * <code>required .hbase.pb.RegionState.State state = 2;</code>
35       */
36      org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState();
37  
38      // optional uint64 stamp = 3;
39      /**
40       * <code>optional uint64 stamp = 3;</code>
41       */
42      boolean hasStamp();
43      /**
44       * <code>optional uint64 stamp = 3;</code>
45       */
46      long getStamp();
47    }
48    /**
49     * Protobuf type {@code hbase.pb.RegionState}
50     */
51    public static final class RegionState extends
52        com.google.protobuf.GeneratedMessage
53        implements RegionStateOrBuilder {
54      // Use RegionState.newBuilder() to construct.
55      private RegionState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
56        super(builder);
57        this.unknownFields = builder.getUnknownFields();
58      }
59      private RegionState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
60  
61      private static final RegionState defaultInstance;
62      public static RegionState getDefaultInstance() {
63        return defaultInstance;
64      }
65  
66      public RegionState getDefaultInstanceForType() {
67        return defaultInstance;
68      }
69  
70      private final com.google.protobuf.UnknownFieldSet unknownFields;
71      @java.lang.Override
72      public final com.google.protobuf.UnknownFieldSet
73          getUnknownFields() {
74        return this.unknownFields;
75      }
76      private RegionState(
77          com.google.protobuf.CodedInputStream input,
78          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
79          throws com.google.protobuf.InvalidProtocolBufferException {
80        initFields();
81        int mutable_bitField0_ = 0;
82        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
83            com.google.protobuf.UnknownFieldSet.newBuilder();
84        try {
85          boolean done = false;
86          while (!done) {
87            int tag = input.readTag();
88            switch (tag) {
89              case 0:
90                done = true;
91                break;
92              default: {
93                if (!parseUnknownField(input, unknownFields,
94                                       extensionRegistry, tag)) {
95                  done = true;
96                }
97                break;
98              }
99              case 10: {
100               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
101               if (((bitField0_ & 0x00000001) == 0x00000001)) {
102                 subBuilder = regionInfo_.toBuilder();
103               }
104               regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
105               if (subBuilder != null) {
106                 subBuilder.mergeFrom(regionInfo_);
107                 regionInfo_ = subBuilder.buildPartial();
108               }
109               bitField0_ |= 0x00000001;
110               break;
111             }
112             case 16: {
113               int rawValue = input.readEnum();
114               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue);
115               if (value == null) {
116                 unknownFields.mergeVarintField(2, rawValue);
117               } else {
118                 bitField0_ |= 0x00000002;
119                 state_ = value;
120               }
121               break;
122             }
123             case 24: {
124               bitField0_ |= 0x00000004;
125               stamp_ = input.readUInt64();
126               break;
127             }
128           }
129         }
130       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
131         throw e.setUnfinishedMessage(this);
132       } catch (java.io.IOException e) {
133         throw new com.google.protobuf.InvalidProtocolBufferException(
134             e.getMessage()).setUnfinishedMessage(this);
135       } finally {
136         this.unknownFields = unknownFields.build();
137         makeExtensionsImmutable();
138       }
139     }
140     public static final com.google.protobuf.Descriptors.Descriptor
141         getDescriptor() {
142       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
143     }
144 
145     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
146         internalGetFieldAccessorTable() {
147       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_fieldAccessorTable
148           .ensureFieldAccessorsInitialized(
149               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
150     }
151 
152     public static com.google.protobuf.Parser<RegionState> PARSER =
153         new com.google.protobuf.AbstractParser<RegionState>() {
154       public RegionState parsePartialFrom(
155           com.google.protobuf.CodedInputStream input,
156           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
157           throws com.google.protobuf.InvalidProtocolBufferException {
158         return new RegionState(input, extensionRegistry);
159       }
160     };
161 
162     @java.lang.Override
163     public com.google.protobuf.Parser<RegionState> getParserForType() {
164       return PARSER;
165     }
166 
167     /**
168      * Protobuf enum {@code hbase.pb.RegionState.State}
169      */
170     public enum State
171         implements com.google.protobuf.ProtocolMessageEnum {
172       /**
173        * <code>OFFLINE = 0;</code>
174        *
175        * <pre>
176        * region is in an offline state
177        * </pre>
178        */
179       OFFLINE(0, 0),
180       /**
181        * <code>PENDING_OPEN = 1;</code>
182        *
183        * <pre>
184        * sent rpc to server to open but has not begun
185        * </pre>
186        */
187       PENDING_OPEN(1, 1),
188       /**
189        * <code>OPENING = 2;</code>
190        *
191        * <pre>
192        * server has begun to open but not yet done
193        * </pre>
194        */
195       OPENING(2, 2),
196       /**
197        * <code>OPEN = 3;</code>
198        *
199        * <pre>
200        * server opened region and updated meta
201        * </pre>
202        */
203       OPEN(3, 3),
204       /**
205        * <code>PENDING_CLOSE = 4;</code>
206        *
207        * <pre>
208        * sent rpc to server to close but has not begun
209        * </pre>
210        */
211       PENDING_CLOSE(4, 4),
212       /**
213        * <code>CLOSING = 5;</code>
214        *
215        * <pre>
216        * server has begun to close but not yet done
217        * </pre>
218        */
219       CLOSING(5, 5),
220       /**
221        * <code>CLOSED = 6;</code>
222        *
223        * <pre>
224        * server closed region and updated meta
225        * </pre>
226        */
227       CLOSED(6, 6),
228       /**
229        * <code>SPLITTING = 7;</code>
230        *
231        * <pre>
232        * server started split of a region
233        * </pre>
234        */
235       SPLITTING(7, 7),
236       /**
237        * <code>SPLIT = 8;</code>
238        *
239        * <pre>
240        * server completed split of a region
241        * </pre>
242        */
243       SPLIT(8, 8),
244       /**
245        * <code>FAILED_OPEN = 9;</code>
246        *
247        * <pre>
248        * failed to open, and won't retry any more
249        * </pre>
250        */
251       FAILED_OPEN(9, 9),
252       /**
253        * <code>FAILED_CLOSE = 10;</code>
254        *
255        * <pre>
256        * failed to close, and won't retry any more
257        * </pre>
258        */
259       FAILED_CLOSE(10, 10),
260       /**
261        * <code>MERGING = 11;</code>
262        *
263        * <pre>
264        * server started merge a region
265        * </pre>
266        */
267       MERGING(11, 11),
268       /**
269        * <code>MERGED = 12;</code>
270        *
271        * <pre>
272        * server completed merge of a region
273        * </pre>
274        */
275       MERGED(12, 12),
276       /**
277        * <code>SPLITTING_NEW = 13;</code>
278        *
279        * <pre>
280        * new region to be created when RS splits a parent
281        * </pre>
282        */
283       SPLITTING_NEW(13, 13),
284       /**
285        * <code>MERGING_NEW = 14;</code>
286        *
287        * <pre>
288        * region but hasn't be created yet, or master doesn't
289        * know it's already created
290        * </pre>
291        */
292       MERGING_NEW(14, 14),
293       ;
294 
295       /**
296        * <code>OFFLINE = 0;</code>
297        *
298        * <pre>
299        * region is in an offline state
300        * </pre>
301        */
302       public static final int OFFLINE_VALUE = 0;
303       /**
304        * <code>PENDING_OPEN = 1;</code>
305        *
306        * <pre>
307        * sent rpc to server to open but has not begun
308        * </pre>
309        */
310       public static final int PENDING_OPEN_VALUE = 1;
311       /**
312        * <code>OPENING = 2;</code>
313        *
314        * <pre>
315        * server has begun to open but not yet done
316        * </pre>
317        */
318       public static final int OPENING_VALUE = 2;
319       /**
320        * <code>OPEN = 3;</code>
321        *
322        * <pre>
323        * server opened region and updated meta
324        * </pre>
325        */
326       public static final int OPEN_VALUE = 3;
327       /**
328        * <code>PENDING_CLOSE = 4;</code>
329        *
330        * <pre>
331        * sent rpc to server to close but has not begun
332        * </pre>
333        */
334       public static final int PENDING_CLOSE_VALUE = 4;
335       /**
336        * <code>CLOSING = 5;</code>
337        *
338        * <pre>
339        * server has begun to close but not yet done
340        * </pre>
341        */
342       public static final int CLOSING_VALUE = 5;
343       /**
344        * <code>CLOSED = 6;</code>
345        *
346        * <pre>
347        * server closed region and updated meta
348        * </pre>
349        */
350       public static final int CLOSED_VALUE = 6;
351       /**
352        * <code>SPLITTING = 7;</code>
353        *
354        * <pre>
355        * server started split of a region
356        * </pre>
357        */
358       public static final int SPLITTING_VALUE = 7;
359       /**
360        * <code>SPLIT = 8;</code>
361        *
362        * <pre>
363        * server completed split of a region
364        * </pre>
365        */
366       public static final int SPLIT_VALUE = 8;
367       /**
368        * <code>FAILED_OPEN = 9;</code>
369        *
370        * <pre>
371        * failed to open, and won't retry any more
372        * </pre>
373        */
374       public static final int FAILED_OPEN_VALUE = 9;
375       /**
376        * <code>FAILED_CLOSE = 10;</code>
377        *
378        * <pre>
379        * failed to close, and won't retry any more
380        * </pre>
381        */
382       public static final int FAILED_CLOSE_VALUE = 10;
383       /**
384        * <code>MERGING = 11;</code>
385        *
386        * <pre>
387        * server started merge a region
388        * </pre>
389        */
390       public static final int MERGING_VALUE = 11;
391       /**
392        * <code>MERGED = 12;</code>
393        *
394        * <pre>
395        * server completed merge of a region
396        * </pre>
397        */
398       public static final int MERGED_VALUE = 12;
399       /**
400        * <code>SPLITTING_NEW = 13;</code>
401        *
402        * <pre>
403        * new region to be created when RS splits a parent
404        * </pre>
405        */
406       public static final int SPLITTING_NEW_VALUE = 13;
407       /**
408        * <code>MERGING_NEW = 14;</code>
409        *
410        * <pre>
411        * region but hasn't be created yet, or master doesn't
412        * know it's already created
413        * </pre>
414        */
415       public static final int MERGING_NEW_VALUE = 14;
416 
417 
418       public final int getNumber() { return value; }
419 
420       public static State valueOf(int value) {
421         switch (value) {
422           case 0: return OFFLINE;
423           case 1: return PENDING_OPEN;
424           case 2: return OPENING;
425           case 3: return OPEN;
426           case 4: return PENDING_CLOSE;
427           case 5: return CLOSING;
428           case 6: return CLOSED;
429           case 7: return SPLITTING;
430           case 8: return SPLIT;
431           case 9: return FAILED_OPEN;
432           case 10: return FAILED_CLOSE;
433           case 11: return MERGING;
434           case 12: return MERGED;
435           case 13: return SPLITTING_NEW;
436           case 14: return MERGING_NEW;
437           default: return null;
438         }
439       }
440 
441       public static com.google.protobuf.Internal.EnumLiteMap<State>
442           internalGetValueMap() {
443         return internalValueMap;
444       }
445       private static com.google.protobuf.Internal.EnumLiteMap<State>
446           internalValueMap =
447             new com.google.protobuf.Internal.EnumLiteMap<State>() {
448               public State findValueByNumber(int number) {
449                 return State.valueOf(number);
450               }
451             };
452 
453       public final com.google.protobuf.Descriptors.EnumValueDescriptor
454           getValueDescriptor() {
455         return getDescriptor().getValues().get(index);
456       }
457       public final com.google.protobuf.Descriptors.EnumDescriptor
458           getDescriptorForType() {
459         return getDescriptor();
460       }
461       public static final com.google.protobuf.Descriptors.EnumDescriptor
462           getDescriptor() {
463         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDescriptor().getEnumTypes().get(0);
464       }
465 
466       private static final State[] VALUES = values();
467 
468       public static State valueOf(
469           com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
470         if (desc.getType() != getDescriptor()) {
471           throw new java.lang.IllegalArgumentException(
472             "EnumValueDescriptor is not for this type.");
473         }
474         return VALUES[desc.getIndex()];
475       }
476 
477       private final int index;
478       private final int value;
479 
480       private State(int index, int value) {
481         this.index = index;
482         this.value = value;
483       }
484 
485       // @@protoc_insertion_point(enum_scope:hbase.pb.RegionState.State)
486     }
487 
488     private int bitField0_;
489     // required .hbase.pb.RegionInfo region_info = 1;
490     public static final int REGION_INFO_FIELD_NUMBER = 1;
491     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
492     /**
493      * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
494      */
495     public boolean hasRegionInfo() {
496       return ((bitField0_ & 0x00000001) == 0x00000001);
497     }
498     /**
499      * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
500      */
501     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
502       return regionInfo_;
503     }
504     /**
505      * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
506      */
507     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
508       return regionInfo_;
509     }
510 
511     // required .hbase.pb.RegionState.State state = 2;
512     public static final int STATE_FIELD_NUMBER = 2;
513     private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_;
514     /**
515      * <code>required .hbase.pb.RegionState.State state = 2;</code>
516      */
517     public boolean hasState() {
518       return ((bitField0_ & 0x00000002) == 0x00000002);
519     }
520     /**
521      * <code>required .hbase.pb.RegionState.State state = 2;</code>
522      */
523     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
524       return state_;
525     }
526 
527     // optional uint64 stamp = 3;
528     public static final int STAMP_FIELD_NUMBER = 3;
529     private long stamp_;
530     /**
531      * <code>optional uint64 stamp = 3;</code>
532      */
533     public boolean hasStamp() {
534       return ((bitField0_ & 0x00000004) == 0x00000004);
535     }
536     /**
537      * <code>optional uint64 stamp = 3;</code>
538      */
539     public long getStamp() {
540       return stamp_;
541     }
542 
543     private void initFields() {
544       regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
545       state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
546       stamp_ = 0L;
547     }
548     private byte memoizedIsInitialized = -1;
549     public final boolean isInitialized() {
550       byte isInitialized = memoizedIsInitialized;
551       if (isInitialized != -1) return isInitialized == 1;
552 
553       if (!hasRegionInfo()) {
554         memoizedIsInitialized = 0;
555         return false;
556       }
557       if (!hasState()) {
558         memoizedIsInitialized = 0;
559         return false;
560       }
561       if (!getRegionInfo().isInitialized()) {
562         memoizedIsInitialized = 0;
563         return false;
564       }
565       memoizedIsInitialized = 1;
566       return true;
567     }
568 
569     public void writeTo(com.google.protobuf.CodedOutputStream output)
570                         throws java.io.IOException {
571       getSerializedSize();
572       if (((bitField0_ & 0x00000001) == 0x00000001)) {
573         output.writeMessage(1, regionInfo_);
574       }
575       if (((bitField0_ & 0x00000002) == 0x00000002)) {
576         output.writeEnum(2, state_.getNumber());
577       }
578       if (((bitField0_ & 0x00000004) == 0x00000004)) {
579         output.writeUInt64(3, stamp_);
580       }
581       getUnknownFields().writeTo(output);
582     }
583 
584     private int memoizedSerializedSize = -1;
585     public int getSerializedSize() {
586       int size = memoizedSerializedSize;
587       if (size != -1) return size;
588 
589       size = 0;
590       if (((bitField0_ & 0x00000001) == 0x00000001)) {
591         size += com.google.protobuf.CodedOutputStream
592           .computeMessageSize(1, regionInfo_);
593       }
594       if (((bitField0_ & 0x00000002) == 0x00000002)) {
595         size += com.google.protobuf.CodedOutputStream
596           .computeEnumSize(2, state_.getNumber());
597       }
598       if (((bitField0_ & 0x00000004) == 0x00000004)) {
599         size += com.google.protobuf.CodedOutputStream
600           .computeUInt64Size(3, stamp_);
601       }
602       size += getUnknownFields().getSerializedSize();
603       memoizedSerializedSize = size;
604       return size;
605     }
606 
607     private static final long serialVersionUID = 0L;
608     @java.lang.Override
609     protected java.lang.Object writeReplace()
610         throws java.io.ObjectStreamException {
611       return super.writeReplace();
612     }
613 
614     @java.lang.Override
615     public boolean equals(final java.lang.Object obj) {
616       if (obj == this) {
617        return true;
618       }
619       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState)) {
620         return super.equals(obj);
621       }
622       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) obj;
623 
624       boolean result = true;
625       result = result && (hasRegionInfo() == other.hasRegionInfo());
626       if (hasRegionInfo()) {
627         result = result && getRegionInfo()
628             .equals(other.getRegionInfo());
629       }
630       result = result && (hasState() == other.hasState());
631       if (hasState()) {
632         result = result &&
633             (getState() == other.getState());
634       }
635       result = result && (hasStamp() == other.hasStamp());
636       if (hasStamp()) {
637         result = result && (getStamp()
638             == other.getStamp());
639       }
640       result = result &&
641           getUnknownFields().equals(other.getUnknownFields());
642       return result;
643     }
644 
645     private int memoizedHashCode = 0;
646     @java.lang.Override
647     public int hashCode() {
648       if (memoizedHashCode != 0) {
649         return memoizedHashCode;
650       }
651       int hash = 41;
652       hash = (19 * hash) + getDescriptorForType().hashCode();
653       if (hasRegionInfo()) {
654         hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
655         hash = (53 * hash) + getRegionInfo().hashCode();
656       }
657       if (hasState()) {
658         hash = (37 * hash) + STATE_FIELD_NUMBER;
659         hash = (53 * hash) + hashEnum(getState());
660       }
661       if (hasStamp()) {
662         hash = (37 * hash) + STAMP_FIELD_NUMBER;
663         hash = (53 * hash) + hashLong(getStamp());
664       }
665       hash = (29 * hash) + getUnknownFields().hashCode();
666       memoizedHashCode = hash;
667       return hash;
668     }
669 
670     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
671         com.google.protobuf.ByteString data)
672         throws com.google.protobuf.InvalidProtocolBufferException {
673       return PARSER.parseFrom(data);
674     }
675     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
676         com.google.protobuf.ByteString data,
677         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
678         throws com.google.protobuf.InvalidProtocolBufferException {
679       return PARSER.parseFrom(data, extensionRegistry);
680     }
681     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(byte[] data)
682         throws com.google.protobuf.InvalidProtocolBufferException {
683       return PARSER.parseFrom(data);
684     }
685     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
686         byte[] data,
687         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
688         throws com.google.protobuf.InvalidProtocolBufferException {
689       return PARSER.parseFrom(data, extensionRegistry);
690     }
691     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(java.io.InputStream input)
692         throws java.io.IOException {
693       return PARSER.parseFrom(input);
694     }
695     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
696         java.io.InputStream input,
697         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
698         throws java.io.IOException {
699       return PARSER.parseFrom(input, extensionRegistry);
700     }
701     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(java.io.InputStream input)
702         throws java.io.IOException {
703       return PARSER.parseDelimitedFrom(input);
704     }
705     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(
706         java.io.InputStream input,
707         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
708         throws java.io.IOException {
709       return PARSER.parseDelimitedFrom(input, extensionRegistry);
710     }
711     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
712         com.google.protobuf.CodedInputStream input)
713         throws java.io.IOException {
714       return PARSER.parseFrom(input);
715     }
716     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
717         com.google.protobuf.CodedInputStream input,
718         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
719         throws java.io.IOException {
720       return PARSER.parseFrom(input, extensionRegistry);
721     }
722 
723     public static Builder newBuilder() { return Builder.create(); }
724     public Builder newBuilderForType() { return newBuilder(); }
725     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState prototype) {
726       return newBuilder().mergeFrom(prototype);
727     }
728     public Builder toBuilder() { return newBuilder(this); }
729 
730     @java.lang.Override
731     protected Builder newBuilderForType(
732         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
733       Builder builder = new Builder(parent);
734       return builder;
735     }
736     /**
737      * Protobuf type {@code hbase.pb.RegionState}
738      */
739     public static final class Builder extends
740         com.google.protobuf.GeneratedMessage.Builder<Builder>
741        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder {
742       public static final com.google.protobuf.Descriptors.Descriptor
743           getDescriptor() {
744         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
745       }
746 
747       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
748           internalGetFieldAccessorTable() {
749         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_fieldAccessorTable
750             .ensureFieldAccessorsInitialized(
751                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
752       }
753 
754       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder()
755       private Builder() {
756         maybeForceBuilderInitialization();
757       }
758 
759       private Builder(
760           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
761         super(parent);
762         maybeForceBuilderInitialization();
763       }
764       private void maybeForceBuilderInitialization() {
765         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
766           getRegionInfoFieldBuilder();
767         }
768       }
769       private static Builder create() {
770         return new Builder();
771       }
772 
773       public Builder clear() {
774         super.clear();
775         if (regionInfoBuilder_ == null) {
776           regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
777         } else {
778           regionInfoBuilder_.clear();
779         }
780         bitField0_ = (bitField0_ & ~0x00000001);
781         state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
782         bitField0_ = (bitField0_ & ~0x00000002);
783         stamp_ = 0L;
784         bitField0_ = (bitField0_ & ~0x00000004);
785         return this;
786       }
787 
788       public Builder clone() {
789         return create().mergeFrom(buildPartial());
790       }
791 
792       public com.google.protobuf.Descriptors.Descriptor
793           getDescriptorForType() {
794         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionState_descriptor;
795       }
796 
797       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getDefaultInstanceForType() {
798         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
799       }
800 
801       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState build() {
802         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = buildPartial();
803         if (!result.isInitialized()) {
804           throw newUninitializedMessageException(result);
805         }
806         return result;
807       }
808 
809       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState buildPartial() {
810         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState(this);
811         int from_bitField0_ = bitField0_;
812         int to_bitField0_ = 0;
813         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
814           to_bitField0_ |= 0x00000001;
815         }
816         if (regionInfoBuilder_ == null) {
817           result.regionInfo_ = regionInfo_;
818         } else {
819           result.regionInfo_ = regionInfoBuilder_.build();
820         }
821         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
822           to_bitField0_ |= 0x00000002;
823         }
824         result.state_ = state_;
825         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
826           to_bitField0_ |= 0x00000004;
827         }
828         result.stamp_ = stamp_;
829         result.bitField0_ = to_bitField0_;
830         onBuilt();
831         return result;
832       }
833 
834       public Builder mergeFrom(com.google.protobuf.Message other) {
835         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) {
836           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState)other);
837         } else {
838           super.mergeFrom(other);
839           return this;
840         }
841       }
842 
843       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other) {
844         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) return this;
845         if (other.hasRegionInfo()) {
846           mergeRegionInfo(other.getRegionInfo());
847         }
848         if (other.hasState()) {
849           setState(other.getState());
850         }
851         if (other.hasStamp()) {
852           setStamp(other.getStamp());
853         }
854         this.mergeUnknownFields(other.getUnknownFields());
855         return this;
856       }
857 
858       public final boolean isInitialized() {
859         if (!hasRegionInfo()) {
860           
861           return false;
862         }
863         if (!hasState()) {
864           
865           return false;
866         }
867         if (!getRegionInfo().isInitialized()) {
868           
869           return false;
870         }
871         return true;
872       }
873 
874       public Builder mergeFrom(
875           com.google.protobuf.CodedInputStream input,
876           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
877           throws java.io.IOException {
878         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parsedMessage = null;
879         try {
880           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
881         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
882           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) e.getUnfinishedMessage();
883           throw e;
884         } finally {
885           if (parsedMessage != null) {
886             mergeFrom(parsedMessage);
887           }
888         }
889         return this;
890       }
891       private int bitField0_;
892 
893       // required .hbase.pb.RegionInfo region_info = 1;
894       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
895       private com.google.protobuf.SingleFieldBuilder<
896           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
897       /**
898        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
899        */
900       public boolean hasRegionInfo() {
901         return ((bitField0_ & 0x00000001) == 0x00000001);
902       }
903       /**
904        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
905        */
906       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
907         if (regionInfoBuilder_ == null) {
908           return regionInfo_;
909         } else {
910           return regionInfoBuilder_.getMessage();
911         }
912       }
913       /**
914        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
915        */
916       public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
917         if (regionInfoBuilder_ == null) {
918           if (value == null) {
919             throw new NullPointerException();
920           }
921           regionInfo_ = value;
922           onChanged();
923         } else {
924           regionInfoBuilder_.setMessage(value);
925         }
926         bitField0_ |= 0x00000001;
927         return this;
928       }
929       /**
930        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
931        */
932       public Builder setRegionInfo(
933           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
934         if (regionInfoBuilder_ == null) {
935           regionInfo_ = builderForValue.build();
936           onChanged();
937         } else {
938           regionInfoBuilder_.setMessage(builderForValue.build());
939         }
940         bitField0_ |= 0x00000001;
941         return this;
942       }
943       /**
944        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
945        */
946       public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
947         if (regionInfoBuilder_ == null) {
948           if (((bitField0_ & 0x00000001) == 0x00000001) &&
949               regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
950             regionInfo_ =
951               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
952           } else {
953             regionInfo_ = value;
954           }
955           onChanged();
956         } else {
957           regionInfoBuilder_.mergeFrom(value);
958         }
959         bitField0_ |= 0x00000001;
960         return this;
961       }
962       /**
963        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
964        */
965       public Builder clearRegionInfo() {
966         if (regionInfoBuilder_ == null) {
967           regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
968           onChanged();
969         } else {
970           regionInfoBuilder_.clear();
971         }
972         bitField0_ = (bitField0_ & ~0x00000001);
973         return this;
974       }
975       /**
976        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
977        */
978       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
979         bitField0_ |= 0x00000001;
980         onChanged();
981         return getRegionInfoFieldBuilder().getBuilder();
982       }
983       /**
984        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
985        */
986       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
987         if (regionInfoBuilder_ != null) {
988           return regionInfoBuilder_.getMessageOrBuilder();
989         } else {
990           return regionInfo_;
991         }
992       }
993       /**
994        * <code>required .hbase.pb.RegionInfo region_info = 1;</code>
995        */
996       private com.google.protobuf.SingleFieldBuilder<
997           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
998           getRegionInfoFieldBuilder() {
999         if (regionInfoBuilder_ == null) {
1000           regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1001               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
1002                   regionInfo_,
1003                   getParentForChildren(),
1004                   isClean());
1005           regionInfo_ = null;
1006         }
1007         return regionInfoBuilder_;
1008       }
1009 
1010       // required .hbase.pb.RegionState.State state = 2;
1011       private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
1012       /**
1013        * <code>required .hbase.pb.RegionState.State state = 2;</code>
1014        */
1015       public boolean hasState() {
1016         return ((bitField0_ & 0x00000002) == 0x00000002);
1017       }
1018       /**
1019        * <code>required .hbase.pb.RegionState.State state = 2;</code>
1020        */
1021       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
1022         return state_;
1023       }
1024       /**
1025        * <code>required .hbase.pb.RegionState.State state = 2;</code>
1026        */
1027       public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value) {
1028         if (value == null) {
1029           throw new NullPointerException();
1030         }
1031         bitField0_ |= 0x00000002;
1032         state_ = value;
1033         onChanged();
1034         return this;
1035       }
1036       /**
1037        * <code>required .hbase.pb.RegionState.State state = 2;</code>
1038        */
1039       public Builder clearState() {
1040         bitField0_ = (bitField0_ & ~0x00000002);
1041         state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
1042         onChanged();
1043         return this;
1044       }
1045 
1046       // optional uint64 stamp = 3;
1047       private long stamp_ ;
1048       /**
1049        * <code>optional uint64 stamp = 3;</code>
1050        */
1051       public boolean hasStamp() {
1052         return ((bitField0_ & 0x00000004) == 0x00000004);
1053       }
1054       /**
1055        * <code>optional uint64 stamp = 3;</code>
1056        */
1057       public long getStamp() {
1058         return stamp_;
1059       }
1060       /**
1061        * <code>optional uint64 stamp = 3;</code>
1062        */
1063       public Builder setStamp(long value) {
1064         bitField0_ |= 0x00000004;
1065         stamp_ = value;
1066         onChanged();
1067         return this;
1068       }
1069       /**
1070        * <code>optional uint64 stamp = 3;</code>
1071        */
1072       public Builder clearStamp() {
1073         bitField0_ = (bitField0_ & ~0x00000004);
1074         stamp_ = 0L;
1075         onChanged();
1076         return this;
1077       }
1078 
1079       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionState)
1080     }
1081 
1082     static {
1083       defaultInstance = new RegionState(true);
1084       defaultInstance.initFields();
1085     }
1086 
1087     // @@protoc_insertion_point(class_scope:hbase.pb.RegionState)
1088   }
1089 
1090   public interface RegionInTransitionOrBuilder
1091       extends com.google.protobuf.MessageOrBuilder {
1092 
1093     // required .hbase.pb.RegionSpecifier spec = 1;
1094     /**
1095      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1096      */
1097     boolean hasSpec();
1098     /**
1099      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1100      */
1101     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec();
1102     /**
1103      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1104      */
1105     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder();
1106 
1107     // required .hbase.pb.RegionState region_state = 2;
1108     /**
1109      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1110      */
1111     boolean hasRegionState();
1112     /**
1113      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1114      */
1115     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState();
1116     /**
1117      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1118      */
1119     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder();
1120   }
1121   /**
1122    * Protobuf type {@code hbase.pb.RegionInTransition}
1123    */
1124   public static final class RegionInTransition extends
1125       com.google.protobuf.GeneratedMessage
1126       implements RegionInTransitionOrBuilder {
1127     // Use RegionInTransition.newBuilder() to construct.
1128     private RegionInTransition(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1129       super(builder);
1130       this.unknownFields = builder.getUnknownFields();
1131     }
1132     private RegionInTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1133 
1134     private static final RegionInTransition defaultInstance;
1135     public static RegionInTransition getDefaultInstance() {
1136       return defaultInstance;
1137     }
1138 
1139     public RegionInTransition getDefaultInstanceForType() {
1140       return defaultInstance;
1141     }
1142 
1143     private final com.google.protobuf.UnknownFieldSet unknownFields;
1144     @java.lang.Override
1145     public final com.google.protobuf.UnknownFieldSet
1146         getUnknownFields() {
1147       return this.unknownFields;
1148     }
1149     private RegionInTransition(
1150         com.google.protobuf.CodedInputStream input,
1151         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1152         throws com.google.protobuf.InvalidProtocolBufferException {
1153       initFields();
1154       int mutable_bitField0_ = 0;
1155       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1156           com.google.protobuf.UnknownFieldSet.newBuilder();
1157       try {
1158         boolean done = false;
1159         while (!done) {
1160           int tag = input.readTag();
1161           switch (tag) {
1162             case 0:
1163               done = true;
1164               break;
1165             default: {
1166               if (!parseUnknownField(input, unknownFields,
1167                                      extensionRegistry, tag)) {
1168                 done = true;
1169               }
1170               break;
1171             }
1172             case 10: {
1173               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
1174               if (((bitField0_ & 0x00000001) == 0x00000001)) {
1175                 subBuilder = spec_.toBuilder();
1176               }
1177               spec_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
1178               if (subBuilder != null) {
1179                 subBuilder.mergeFrom(spec_);
1180                 spec_ = subBuilder.buildPartial();
1181               }
1182               bitField0_ |= 0x00000001;
1183               break;
1184             }
1185             case 18: {
1186               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder subBuilder = null;
1187               if (((bitField0_ & 0x00000002) == 0x00000002)) {
1188                 subBuilder = regionState_.toBuilder();
1189               }
1190               regionState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.PARSER, extensionRegistry);
1191               if (subBuilder != null) {
1192                 subBuilder.mergeFrom(regionState_);
1193                 regionState_ = subBuilder.buildPartial();
1194               }
1195               bitField0_ |= 0x00000002;
1196               break;
1197             }
1198           }
1199         }
1200       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1201         throw e.setUnfinishedMessage(this);
1202       } catch (java.io.IOException e) {
1203         throw new com.google.protobuf.InvalidProtocolBufferException(
1204             e.getMessage()).setUnfinishedMessage(this);
1205       } finally {
1206         this.unknownFields = unknownFields.build();
1207         makeExtensionsImmutable();
1208       }
1209     }
1210     public static final com.google.protobuf.Descriptors.Descriptor
1211         getDescriptor() {
1212       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
1213     }
1214 
1215     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1216         internalGetFieldAccessorTable() {
1217       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_fieldAccessorTable
1218           .ensureFieldAccessorsInitialized(
1219               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
1220     }
1221 
1222     public static com.google.protobuf.Parser<RegionInTransition> PARSER =
1223         new com.google.protobuf.AbstractParser<RegionInTransition>() {
1224       public RegionInTransition parsePartialFrom(
1225           com.google.protobuf.CodedInputStream input,
1226           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1227           throws com.google.protobuf.InvalidProtocolBufferException {
1228         return new RegionInTransition(input, extensionRegistry);
1229       }
1230     };
1231 
1232     @java.lang.Override
1233     public com.google.protobuf.Parser<RegionInTransition> getParserForType() {
1234       return PARSER;
1235     }
1236 
1237     private int bitField0_;
1238     // required .hbase.pb.RegionSpecifier spec = 1;
1239     public static final int SPEC_FIELD_NUMBER = 1;
1240     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_;
1241     /**
1242      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1243      */
1244     public boolean hasSpec() {
1245       return ((bitField0_ & 0x00000001) == 0x00000001);
1246     }
1247     /**
1248      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1249      */
1250     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
1251       return spec_;
1252     }
1253     /**
1254      * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1255      */
1256     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
1257       return spec_;
1258     }
1259 
1260     // required .hbase.pb.RegionState region_state = 2;
1261     public static final int REGION_STATE_FIELD_NUMBER = 2;
1262     private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_;
1263     /**
1264      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1265      */
1266     public boolean hasRegionState() {
1267       return ((bitField0_ & 0x00000002) == 0x00000002);
1268     }
1269     /**
1270      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1271      */
1272     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
1273       return regionState_;
1274     }
1275     /**
1276      * <code>required .hbase.pb.RegionState region_state = 2;</code>
1277      */
1278     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
1279       return regionState_;
1280     }
1281 
1282     private void initFields() {
1283       spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
1284       regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
1285     }
1286     private byte memoizedIsInitialized = -1;
1287     public final boolean isInitialized() {
1288       byte isInitialized = memoizedIsInitialized;
1289       if (isInitialized != -1) return isInitialized == 1;
1290 
1291       if (!hasSpec()) {
1292         memoizedIsInitialized = 0;
1293         return false;
1294       }
1295       if (!hasRegionState()) {
1296         memoizedIsInitialized = 0;
1297         return false;
1298       }
1299       if (!getSpec().isInitialized()) {
1300         memoizedIsInitialized = 0;
1301         return false;
1302       }
1303       if (!getRegionState().isInitialized()) {
1304         memoizedIsInitialized = 0;
1305         return false;
1306       }
1307       memoizedIsInitialized = 1;
1308       return true;
1309     }
1310 
1311     public void writeTo(com.google.protobuf.CodedOutputStream output)
1312                         throws java.io.IOException {
1313       getSerializedSize();
1314       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1315         output.writeMessage(1, spec_);
1316       }
1317       if (((bitField0_ & 0x00000002) == 0x00000002)) {
1318         output.writeMessage(2, regionState_);
1319       }
1320       getUnknownFields().writeTo(output);
1321     }
1322 
1323     private int memoizedSerializedSize = -1;
1324     public int getSerializedSize() {
1325       int size = memoizedSerializedSize;
1326       if (size != -1) return size;
1327 
1328       size = 0;
1329       if (((bitField0_ & 0x00000001) == 0x00000001)) {
1330         size += com.google.protobuf.CodedOutputStream
1331           .computeMessageSize(1, spec_);
1332       }
1333       if (((bitField0_ & 0x00000002) == 0x00000002)) {
1334         size += com.google.protobuf.CodedOutputStream
1335           .computeMessageSize(2, regionState_);
1336       }
1337       size += getUnknownFields().getSerializedSize();
1338       memoizedSerializedSize = size;
1339       return size;
1340     }
1341 
1342     private static final long serialVersionUID = 0L;
1343     @java.lang.Override
1344     protected java.lang.Object writeReplace()
1345         throws java.io.ObjectStreamException {
1346       return super.writeReplace();
1347     }
1348 
1349     @java.lang.Override
1350     public boolean equals(final java.lang.Object obj) {
1351       if (obj == this) {
1352        return true;
1353       }
1354       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition)) {
1355         return super.equals(obj);
1356       }
1357       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) obj;
1358 
1359       boolean result = true;
1360       result = result && (hasSpec() == other.hasSpec());
1361       if (hasSpec()) {
1362         result = result && getSpec()
1363             .equals(other.getSpec());
1364       }
1365       result = result && (hasRegionState() == other.hasRegionState());
1366       if (hasRegionState()) {
1367         result = result && getRegionState()
1368             .equals(other.getRegionState());
1369       }
1370       result = result &&
1371           getUnknownFields().equals(other.getUnknownFields());
1372       return result;
1373     }
1374 
1375     private int memoizedHashCode = 0;
1376     @java.lang.Override
1377     public int hashCode() {
1378       if (memoizedHashCode != 0) {
1379         return memoizedHashCode;
1380       }
1381       int hash = 41;
1382       hash = (19 * hash) + getDescriptorForType().hashCode();
1383       if (hasSpec()) {
1384         hash = (37 * hash) + SPEC_FIELD_NUMBER;
1385         hash = (53 * hash) + getSpec().hashCode();
1386       }
1387       if (hasRegionState()) {
1388         hash = (37 * hash) + REGION_STATE_FIELD_NUMBER;
1389         hash = (53 * hash) + getRegionState().hashCode();
1390       }
1391       hash = (29 * hash) + getUnknownFields().hashCode();
1392       memoizedHashCode = hash;
1393       return hash;
1394     }
1395 
1396     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1397         com.google.protobuf.ByteString data)
1398         throws com.google.protobuf.InvalidProtocolBufferException {
1399       return PARSER.parseFrom(data);
1400     }
1401     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1402         com.google.protobuf.ByteString data,
1403         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1404         throws com.google.protobuf.InvalidProtocolBufferException {
1405       return PARSER.parseFrom(data, extensionRegistry);
1406     }
1407     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(byte[] data)
1408         throws com.google.protobuf.InvalidProtocolBufferException {
1409       return PARSER.parseFrom(data);
1410     }
1411     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1412         byte[] data,
1413         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1414         throws com.google.protobuf.InvalidProtocolBufferException {
1415       return PARSER.parseFrom(data, extensionRegistry);
1416     }
1417     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(java.io.InputStream input)
1418         throws java.io.IOException {
1419       return PARSER.parseFrom(input);
1420     }
1421     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1422         java.io.InputStream input,
1423         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1424         throws java.io.IOException {
1425       return PARSER.parseFrom(input, extensionRegistry);
1426     }
1427     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(java.io.InputStream input)
1428         throws java.io.IOException {
1429       return PARSER.parseDelimitedFrom(input);
1430     }
1431     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(
1432         java.io.InputStream input,
1433         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1434         throws java.io.IOException {
1435       return PARSER.parseDelimitedFrom(input, extensionRegistry);
1436     }
1437     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1438         com.google.protobuf.CodedInputStream input)
1439         throws java.io.IOException {
1440       return PARSER.parseFrom(input);
1441     }
1442     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
1443         com.google.protobuf.CodedInputStream input,
1444         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1445         throws java.io.IOException {
1446       return PARSER.parseFrom(input, extensionRegistry);
1447     }
1448 
1449     public static Builder newBuilder() { return Builder.create(); }
1450     public Builder newBuilderForType() { return newBuilder(); }
1451     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition prototype) {
1452       return newBuilder().mergeFrom(prototype);
1453     }
1454     public Builder toBuilder() { return newBuilder(this); }
1455 
1456     @java.lang.Override
1457     protected Builder newBuilderForType(
1458         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1459       Builder builder = new Builder(parent);
1460       return builder;
1461     }
1462     /**
1463      * Protobuf type {@code hbase.pb.RegionInTransition}
1464      */
1465     public static final class Builder extends
1466         com.google.protobuf.GeneratedMessage.Builder<Builder>
1467        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder {
1468       public static final com.google.protobuf.Descriptors.Descriptor
1469           getDescriptor() {
1470         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
1471       }
1472 
1473       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1474           internalGetFieldAccessorTable() {
1475         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_fieldAccessorTable
1476             .ensureFieldAccessorsInitialized(
1477                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
1478       }
1479 
1480       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.newBuilder()
1481       private Builder() {
1482         maybeForceBuilderInitialization();
1483       }
1484 
1485       private Builder(
1486           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1487         super(parent);
1488         maybeForceBuilderInitialization();
1489       }
1490       private void maybeForceBuilderInitialization() {
1491         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1492           getSpecFieldBuilder();
1493           getRegionStateFieldBuilder();
1494         }
1495       }
1496       private static Builder create() {
1497         return new Builder();
1498       }
1499 
1500       public Builder clear() {
1501         super.clear();
1502         if (specBuilder_ == null) {
1503           spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
1504         } else {
1505           specBuilder_.clear();
1506         }
1507         bitField0_ = (bitField0_ & ~0x00000001);
1508         if (regionStateBuilder_ == null) {
1509           regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
1510         } else {
1511           regionStateBuilder_.clear();
1512         }
1513         bitField0_ = (bitField0_ & ~0x00000002);
1514         return this;
1515       }
1516 
1517       public Builder clone() {
1518         return create().mergeFrom(buildPartial());
1519       }
1520 
1521       public com.google.protobuf.Descriptors.Descriptor
1522           getDescriptorForType() {
1523         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionInTransition_descriptor;
1524       }
1525 
1526       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getDefaultInstanceForType() {
1527         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance();
1528       }
1529 
1530       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition build() {
1531         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = buildPartial();
1532         if (!result.isInitialized()) {
1533           throw newUninitializedMessageException(result);
1534         }
1535         return result;
1536       }
1537 
1538       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition buildPartial() {
1539         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition(this);
1540         int from_bitField0_ = bitField0_;
1541         int to_bitField0_ = 0;
1542         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1543           to_bitField0_ |= 0x00000001;
1544         }
1545         if (specBuilder_ == null) {
1546           result.spec_ = spec_;
1547         } else {
1548           result.spec_ = specBuilder_.build();
1549         }
1550         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1551           to_bitField0_ |= 0x00000002;
1552         }
1553         if (regionStateBuilder_ == null) {
1554           result.regionState_ = regionState_;
1555         } else {
1556           result.regionState_ = regionStateBuilder_.build();
1557         }
1558         result.bitField0_ = to_bitField0_;
1559         onBuilt();
1560         return result;
1561       }
1562 
1563       public Builder mergeFrom(com.google.protobuf.Message other) {
1564         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) {
1565           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition)other);
1566         } else {
1567           super.mergeFrom(other);
1568           return this;
1569         }
1570       }
1571 
1572       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other) {
1573         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance()) return this;
1574         if (other.hasSpec()) {
1575           mergeSpec(other.getSpec());
1576         }
1577         if (other.hasRegionState()) {
1578           mergeRegionState(other.getRegionState());
1579         }
1580         this.mergeUnknownFields(other.getUnknownFields());
1581         return this;
1582       }
1583 
1584       public final boolean isInitialized() {
1585         if (!hasSpec()) {
1586           
1587           return false;
1588         }
1589         if (!hasRegionState()) {
1590           
1591           return false;
1592         }
1593         if (!getSpec().isInitialized()) {
1594           
1595           return false;
1596         }
1597         if (!getRegionState().isInitialized()) {
1598           
1599           return false;
1600         }
1601         return true;
1602       }
1603 
1604       public Builder mergeFrom(
1605           com.google.protobuf.CodedInputStream input,
1606           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1607           throws java.io.IOException {
1608         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parsedMessage = null;
1609         try {
1610           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1611         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1612           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) e.getUnfinishedMessage();
1613           throw e;
1614         } finally {
1615           if (parsedMessage != null) {
1616             mergeFrom(parsedMessage);
1617           }
1618         }
1619         return this;
1620       }
1621       private int bitField0_;
1622 
1623       // required .hbase.pb.RegionSpecifier spec = 1;
1624       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
1625       private com.google.protobuf.SingleFieldBuilder<
1626           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> specBuilder_;
1627       /**
1628        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1629        */
1630       public boolean hasSpec() {
1631         return ((bitField0_ & 0x00000001) == 0x00000001);
1632       }
1633       /**
1634        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1635        */
1636       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
1637         if (specBuilder_ == null) {
1638           return spec_;
1639         } else {
1640           return specBuilder_.getMessage();
1641         }
1642       }
1643       /**
1644        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1645        */
1646       public Builder setSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
1647         if (specBuilder_ == null) {
1648           if (value == null) {
1649             throw new NullPointerException();
1650           }
1651           spec_ = value;
1652           onChanged();
1653         } else {
1654           specBuilder_.setMessage(value);
1655         }
1656         bitField0_ |= 0x00000001;
1657         return this;
1658       }
1659       /**
1660        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1661        */
1662       public Builder setSpec(
1663           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
1664         if (specBuilder_ == null) {
1665           spec_ = builderForValue.build();
1666           onChanged();
1667         } else {
1668           specBuilder_.setMessage(builderForValue.build());
1669         }
1670         bitField0_ |= 0x00000001;
1671         return this;
1672       }
1673       /**
1674        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1675        */
1676       public Builder mergeSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
1677         if (specBuilder_ == null) {
1678           if (((bitField0_ & 0x00000001) == 0x00000001) &&
1679               spec_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
1680             spec_ =
1681               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(spec_).mergeFrom(value).buildPartial();
1682           } else {
1683             spec_ = value;
1684           }
1685           onChanged();
1686         } else {
1687           specBuilder_.mergeFrom(value);
1688         }
1689         bitField0_ |= 0x00000001;
1690         return this;
1691       }
1692       /**
1693        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1694        */
1695       public Builder clearSpec() {
1696         if (specBuilder_ == null) {
1697           spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
1698           onChanged();
1699         } else {
1700           specBuilder_.clear();
1701         }
1702         bitField0_ = (bitField0_ & ~0x00000001);
1703         return this;
1704       }
1705       /**
1706        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1707        */
1708       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getSpecBuilder() {
1709         bitField0_ |= 0x00000001;
1710         onChanged();
1711         return getSpecFieldBuilder().getBuilder();
1712       }
1713       /**
1714        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1715        */
1716       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
1717         if (specBuilder_ != null) {
1718           return specBuilder_.getMessageOrBuilder();
1719         } else {
1720           return spec_;
1721         }
1722       }
1723       /**
1724        * <code>required .hbase.pb.RegionSpecifier spec = 1;</code>
1725        */
1726       private com.google.protobuf.SingleFieldBuilder<
1727           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
1728           getSpecFieldBuilder() {
1729         if (specBuilder_ == null) {
1730           specBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1731               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
1732                   spec_,
1733                   getParentForChildren(),
1734                   isClean());
1735           spec_ = null;
1736         }
1737         return specBuilder_;
1738       }
1739 
1740       // required .hbase.pb.RegionState region_state = 2;
1741       private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
1742       private com.google.protobuf.SingleFieldBuilder<
1743           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> regionStateBuilder_;
1744       /**
1745        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1746        */
1747       public boolean hasRegionState() {
1748         return ((bitField0_ & 0x00000002) == 0x00000002);
1749       }
1750       /**
1751        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1752        */
1753       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
1754         if (regionStateBuilder_ == null) {
1755           return regionState_;
1756         } else {
1757           return regionStateBuilder_.getMessage();
1758         }
1759       }
1760       /**
1761        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1762        */
1763       public Builder setRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) {
1764         if (regionStateBuilder_ == null) {
1765           if (value == null) {
1766             throw new NullPointerException();
1767           }
1768           regionState_ = value;
1769           onChanged();
1770         } else {
1771           regionStateBuilder_.setMessage(value);
1772         }
1773         bitField0_ |= 0x00000002;
1774         return this;
1775       }
1776       /**
1777        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1778        */
1779       public Builder setRegionState(
1780           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder builderForValue) {
1781         if (regionStateBuilder_ == null) {
1782           regionState_ = builderForValue.build();
1783           onChanged();
1784         } else {
1785           regionStateBuilder_.setMessage(builderForValue.build());
1786         }
1787         bitField0_ |= 0x00000002;
1788         return this;
1789       }
1790       /**
1791        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1792        */
1793       public Builder mergeRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) {
1794         if (regionStateBuilder_ == null) {
1795           if (((bitField0_ & 0x00000002) == 0x00000002) &&
1796               regionState_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) {
1797             regionState_ =
1798               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder(regionState_).mergeFrom(value).buildPartial();
1799           } else {
1800             regionState_ = value;
1801           }
1802           onChanged();
1803         } else {
1804           regionStateBuilder_.mergeFrom(value);
1805         }
1806         bitField0_ |= 0x00000002;
1807         return this;
1808       }
1809       /**
1810        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1811        */
1812       public Builder clearRegionState() {
1813         if (regionStateBuilder_ == null) {
1814           regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
1815           onChanged();
1816         } else {
1817           regionStateBuilder_.clear();
1818         }
1819         bitField0_ = (bitField0_ & ~0x00000002);
1820         return this;
1821       }
1822       /**
1823        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1824        */
1825       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder getRegionStateBuilder() {
1826         bitField0_ |= 0x00000002;
1827         onChanged();
1828         return getRegionStateFieldBuilder().getBuilder();
1829       }
1830       /**
1831        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1832        */
1833       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
1834         if (regionStateBuilder_ != null) {
1835           return regionStateBuilder_.getMessageOrBuilder();
1836         } else {
1837           return regionState_;
1838         }
1839       }
1840       /**
1841        * <code>required .hbase.pb.RegionState region_state = 2;</code>
1842        */
1843       private com.google.protobuf.SingleFieldBuilder<
1844           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> 
1845           getRegionStateFieldBuilder() {
1846         if (regionStateBuilder_ == null) {
1847           regionStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1848               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder>(
1849                   regionState_,
1850                   getParentForChildren(),
1851                   isClean());
1852           regionState_ = null;
1853         }
1854         return regionStateBuilder_;
1855       }
1856 
1857       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionInTransition)
1858     }
1859 
1860     static {
1861       defaultInstance = new RegionInTransition(true);
1862       defaultInstance.initFields();
1863     }
1864 
1865     // @@protoc_insertion_point(class_scope:hbase.pb.RegionInTransition)
1866   }
1867 
1868   public interface StoreSequenceIdOrBuilder
1869       extends com.google.protobuf.MessageOrBuilder {
1870 
1871     // required bytes family_name = 1;
1872     /**
1873      * <code>required bytes family_name = 1;</code>
1874      */
1875     boolean hasFamilyName();
1876     /**
1877      * <code>required bytes family_name = 1;</code>
1878      */
1879     com.google.protobuf.ByteString getFamilyName();
1880 
1881     // required uint64 sequence_id = 2;
1882     /**
1883      * <code>required uint64 sequence_id = 2;</code>
1884      */
1885     boolean hasSequenceId();
1886     /**
1887      * <code>required uint64 sequence_id = 2;</code>
1888      */
1889     long getSequenceId();
1890   }
1891   /**
1892    * Protobuf type {@code hbase.pb.StoreSequenceId}
1893    *
1894    * <pre>
1895    **
1896    * sequence Id of a store
1897    * </pre>
1898    */
1899   public static final class StoreSequenceId extends
1900       com.google.protobuf.GeneratedMessage
1901       implements StoreSequenceIdOrBuilder {
1902     // Use StoreSequenceId.newBuilder() to construct.
1903     private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1904       super(builder);
1905       this.unknownFields = builder.getUnknownFields();
1906     }
1907     private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1908 
1909     private static final StoreSequenceId defaultInstance;
1910     public static StoreSequenceId getDefaultInstance() {
1911       return defaultInstance;
1912     }
1913 
1914     public StoreSequenceId getDefaultInstanceForType() {
1915       return defaultInstance;
1916     }
1917 
1918     private final com.google.protobuf.UnknownFieldSet unknownFields;
1919     @java.lang.Override
1920     public final com.google.protobuf.UnknownFieldSet
1921         getUnknownFields() {
1922       return this.unknownFields;
1923     }
1924     private StoreSequenceId(
1925         com.google.protobuf.CodedInputStream input,
1926         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1927         throws com.google.protobuf.InvalidProtocolBufferException {
1928       initFields();
1929       int mutable_bitField0_ = 0;
1930       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1931           com.google.protobuf.UnknownFieldSet.newBuilder();
1932       try {
1933         boolean done = false;
1934         while (!done) {
1935           int tag = input.readTag();
1936           switch (tag) {
1937             case 0:
1938               done = true;
1939               break;
1940             default: {
1941               if (!parseUnknownField(input, unknownFields,
1942                                      extensionRegistry, tag)) {
1943                 done = true;
1944               }
1945               break;
1946             }
1947             case 10: {
1948               bitField0_ |= 0x00000001;
1949               familyName_ = input.readBytes();
1950               break;
1951             }
1952             case 16: {
1953               bitField0_ |= 0x00000002;
1954               sequenceId_ = input.readUInt64();
1955               break;
1956             }
1957           }
1958         }
1959       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1960         throw e.setUnfinishedMessage(this);
1961       } catch (java.io.IOException e) {
1962         throw new com.google.protobuf.InvalidProtocolBufferException(
1963             e.getMessage()).setUnfinishedMessage(this);
1964       } finally {
1965         this.unknownFields = unknownFields.build();
1966         makeExtensionsImmutable();
1967       }
1968     }
1969     public static final com.google.protobuf.Descriptors.Descriptor
1970         getDescriptor() {
1971       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
1972     }
1973 
1974     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1975         internalGetFieldAccessorTable() {
1976       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable
1977           .ensureFieldAccessorsInitialized(
1978               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
1979     }
1980 
1981     public static com.google.protobuf.Parser<StoreSequenceId> PARSER =
1982         new com.google.protobuf.AbstractParser<StoreSequenceId>() {
1983       public StoreSequenceId parsePartialFrom(
1984           com.google.protobuf.CodedInputStream input,
1985           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1986           throws com.google.protobuf.InvalidProtocolBufferException {
1987         return new StoreSequenceId(input, extensionRegistry);
1988       }
1989     };
1990 
1991     @java.lang.Override
1992     public com.google.protobuf.Parser<StoreSequenceId> getParserForType() {
1993       return PARSER;
1994     }
1995 
1996     private int bitField0_;
1997     // required bytes family_name = 1;
1998     public static final int FAMILY_NAME_FIELD_NUMBER = 1;
1999     private com.google.protobuf.ByteString familyName_;
2000     /**
2001      * <code>required bytes family_name = 1;</code>
2002      */
2003     public boolean hasFamilyName() {
2004       return ((bitField0_ & 0x00000001) == 0x00000001);
2005     }
2006     /**
2007      * <code>required bytes family_name = 1;</code>
2008      */
2009     public com.google.protobuf.ByteString getFamilyName() {
2010       return familyName_;
2011     }
2012 
2013     // required uint64 sequence_id = 2;
2014     public static final int SEQUENCE_ID_FIELD_NUMBER = 2;
2015     private long sequenceId_;
2016     /**
2017      * <code>required uint64 sequence_id = 2;</code>
2018      */
2019     public boolean hasSequenceId() {
2020       return ((bitField0_ & 0x00000002) == 0x00000002);
2021     }
2022     /**
2023      * <code>required uint64 sequence_id = 2;</code>
2024      */
2025     public long getSequenceId() {
2026       return sequenceId_;
2027     }
2028 
2029     private void initFields() {
2030       familyName_ = com.google.protobuf.ByteString.EMPTY;
2031       sequenceId_ = 0L;
2032     }
2033     private byte memoizedIsInitialized = -1;
2034     public final boolean isInitialized() {
2035       byte isInitialized = memoizedIsInitialized;
2036       if (isInitialized != -1) return isInitialized == 1;
2037 
2038       if (!hasFamilyName()) {
2039         memoizedIsInitialized = 0;
2040         return false;
2041       }
2042       if (!hasSequenceId()) {
2043         memoizedIsInitialized = 0;
2044         return false;
2045       }
2046       memoizedIsInitialized = 1;
2047       return true;
2048     }
2049 
2050     public void writeTo(com.google.protobuf.CodedOutputStream output)
2051                         throws java.io.IOException {
2052       getSerializedSize();
2053       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2054         output.writeBytes(1, familyName_);
2055       }
2056       if (((bitField0_ & 0x00000002) == 0x00000002)) {
2057         output.writeUInt64(2, sequenceId_);
2058       }
2059       getUnknownFields().writeTo(output);
2060     }
2061 
2062     private int memoizedSerializedSize = -1;
2063     public int getSerializedSize() {
2064       int size = memoizedSerializedSize;
2065       if (size != -1) return size;
2066 
2067       size = 0;
2068       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2069         size += com.google.protobuf.CodedOutputStream
2070           .computeBytesSize(1, familyName_);
2071       }
2072       if (((bitField0_ & 0x00000002) == 0x00000002)) {
2073         size += com.google.protobuf.CodedOutputStream
2074           .computeUInt64Size(2, sequenceId_);
2075       }
2076       size += getUnknownFields().getSerializedSize();
2077       memoizedSerializedSize = size;
2078       return size;
2079     }
2080 
2081     private static final long serialVersionUID = 0L;
2082     @java.lang.Override
2083     protected java.lang.Object writeReplace()
2084         throws java.io.ObjectStreamException {
2085       return super.writeReplace();
2086     }
2087 
2088     @java.lang.Override
2089     public boolean equals(final java.lang.Object obj) {
2090       if (obj == this) {
2091        return true;
2092       }
2093       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)) {
2094         return super.equals(obj);
2095       }
2096       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) obj;
2097 
2098       boolean result = true;
2099       result = result && (hasFamilyName() == other.hasFamilyName());
2100       if (hasFamilyName()) {
2101         result = result && getFamilyName()
2102             .equals(other.getFamilyName());
2103       }
2104       result = result && (hasSequenceId() == other.hasSequenceId());
2105       if (hasSequenceId()) {
2106         result = result && (getSequenceId()
2107             == other.getSequenceId());
2108       }
2109       result = result &&
2110           getUnknownFields().equals(other.getUnknownFields());
2111       return result;
2112     }
2113 
2114     private int memoizedHashCode = 0;
2115     @java.lang.Override
2116     public int hashCode() {
2117       if (memoizedHashCode != 0) {
2118         return memoizedHashCode;
2119       }
2120       int hash = 41;
2121       hash = (19 * hash) + getDescriptorForType().hashCode();
2122       if (hasFamilyName()) {
2123         hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
2124         hash = (53 * hash) + getFamilyName().hashCode();
2125       }
2126       if (hasSequenceId()) {
2127         hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER;
2128         hash = (53 * hash) + hashLong(getSequenceId());
2129       }
2130       hash = (29 * hash) + getUnknownFields().hashCode();
2131       memoizedHashCode = hash;
2132       return hash;
2133     }
2134 
2135     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2136         com.google.protobuf.ByteString data)
2137         throws com.google.protobuf.InvalidProtocolBufferException {
2138       return PARSER.parseFrom(data);
2139     }
2140     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2141         com.google.protobuf.ByteString data,
2142         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2143         throws com.google.protobuf.InvalidProtocolBufferException {
2144       return PARSER.parseFrom(data, extensionRegistry);
2145     }
2146     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(byte[] data)
2147         throws com.google.protobuf.InvalidProtocolBufferException {
2148       return PARSER.parseFrom(data);
2149     }
2150     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2151         byte[] data,
2152         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2153         throws com.google.protobuf.InvalidProtocolBufferException {
2154       return PARSER.parseFrom(data, extensionRegistry);
2155     }
2156     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(java.io.InputStream input)
2157         throws java.io.IOException {
2158       return PARSER.parseFrom(input);
2159     }
2160     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2161         java.io.InputStream input,
2162         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2163         throws java.io.IOException {
2164       return PARSER.parseFrom(input, extensionRegistry);
2165     }
2166     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input)
2167         throws java.io.IOException {
2168       return PARSER.parseDelimitedFrom(input);
2169     }
2170     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(
2171         java.io.InputStream input,
2172         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2173         throws java.io.IOException {
2174       return PARSER.parseDelimitedFrom(input, extensionRegistry);
2175     }
2176     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2177         com.google.protobuf.CodedInputStream input)
2178         throws java.io.IOException {
2179       return PARSER.parseFrom(input);
2180     }
2181     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
2182         com.google.protobuf.CodedInputStream input,
2183         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2184         throws java.io.IOException {
2185       return PARSER.parseFrom(input, extensionRegistry);
2186     }
2187 
2188     public static Builder newBuilder() { return Builder.create(); }
2189     public Builder newBuilderForType() { return newBuilder(); }
2190     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId prototype) {
2191       return newBuilder().mergeFrom(prototype);
2192     }
2193     public Builder toBuilder() { return newBuilder(this); }
2194 
2195     @java.lang.Override
2196     protected Builder newBuilderForType(
2197         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2198       Builder builder = new Builder(parent);
2199       return builder;
2200     }
2201     /**
2202      * Protobuf type {@code hbase.pb.StoreSequenceId}
2203      *
2204      * <pre>
2205      **
2206      * sequence Id of a store
2207      * </pre>
2208      */
2209     public static final class Builder extends
2210         com.google.protobuf.GeneratedMessage.Builder<Builder>
2211        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder {
2212       public static final com.google.protobuf.Descriptors.Descriptor
2213           getDescriptor() {
2214         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
2215       }
2216 
2217       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2218           internalGetFieldAccessorTable() {
2219         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable
2220             .ensureFieldAccessorsInitialized(
2221                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
2222       }
2223 
2224       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.newBuilder()
2225       private Builder() {
2226         maybeForceBuilderInitialization();
2227       }
2228 
2229       private Builder(
2230           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2231         super(parent);
2232         maybeForceBuilderInitialization();
2233       }
2234       private void maybeForceBuilderInitialization() {
2235         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2236         }
2237       }
2238       private static Builder create() {
2239         return new Builder();
2240       }
2241 
2242       public Builder clear() {
2243         super.clear();
2244         familyName_ = com.google.protobuf.ByteString.EMPTY;
2245         bitField0_ = (bitField0_ & ~0x00000001);
2246         sequenceId_ = 0L;
2247         bitField0_ = (bitField0_ & ~0x00000002);
2248         return this;
2249       }
2250 
2251       public Builder clone() {
2252         return create().mergeFrom(buildPartial());
2253       }
2254 
2255       public com.google.protobuf.Descriptors.Descriptor
2256           getDescriptorForType() {
2257         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_StoreSequenceId_descriptor;
2258       }
2259 
2260       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getDefaultInstanceForType() {
2261         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance();
2262       }
2263 
2264       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId build() {
2265         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = buildPartial();
2266         if (!result.isInitialized()) {
2267           throw newUninitializedMessageException(result);
2268         }
2269         return result;
2270       }
2271 
2272       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId buildPartial() {
2273         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId(this);
2274         int from_bitField0_ = bitField0_;
2275         int to_bitField0_ = 0;
2276         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2277           to_bitField0_ |= 0x00000001;
2278         }
2279         result.familyName_ = familyName_;
2280         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2281           to_bitField0_ |= 0x00000002;
2282         }
2283         result.sequenceId_ = sequenceId_;
2284         result.bitField0_ = to_bitField0_;
2285         onBuilt();
2286         return result;
2287       }
2288 
2289       public Builder mergeFrom(com.google.protobuf.Message other) {
2290         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) {
2291           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)other);
2292         } else {
2293           super.mergeFrom(other);
2294           return this;
2295         }
2296       }
2297 
2298       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other) {
2299         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()) return this;
2300         if (other.hasFamilyName()) {
2301           setFamilyName(other.getFamilyName());
2302         }
2303         if (other.hasSequenceId()) {
2304           setSequenceId(other.getSequenceId());
2305         }
2306         this.mergeUnknownFields(other.getUnknownFields());
2307         return this;
2308       }
2309 
2310       public final boolean isInitialized() {
2311         if (!hasFamilyName()) {
2312           
2313           return false;
2314         }
2315         if (!hasSequenceId()) {
2316           
2317           return false;
2318         }
2319         return true;
2320       }
2321 
2322       public Builder mergeFrom(
2323           com.google.protobuf.CodedInputStream input,
2324           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2325           throws java.io.IOException {
2326         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parsedMessage = null;
2327         try {
2328           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2329         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2330           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) e.getUnfinishedMessage();
2331           throw e;
2332         } finally {
2333           if (parsedMessage != null) {
2334             mergeFrom(parsedMessage);
2335           }
2336         }
2337         return this;
2338       }
2339       private int bitField0_;
2340 
2341       // required bytes family_name = 1;
2342       private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY;
2343       /**
2344        * <code>required bytes family_name = 1;</code>
2345        */
2346       public boolean hasFamilyName() {
2347         return ((bitField0_ & 0x00000001) == 0x00000001);
2348       }
2349       /**
2350        * <code>required bytes family_name = 1;</code>
2351        */
2352       public com.google.protobuf.ByteString getFamilyName() {
2353         return familyName_;
2354       }
2355       /**
2356        * <code>required bytes family_name = 1;</code>
2357        */
2358       public Builder setFamilyName(com.google.protobuf.ByteString value) {
2359         if (value == null) {
2360     throw new NullPointerException();
2361   }
2362   bitField0_ |= 0x00000001;
2363         familyName_ = value;
2364         onChanged();
2365         return this;
2366       }
2367       /**
2368        * <code>required bytes family_name = 1;</code>
2369        */
2370       public Builder clearFamilyName() {
2371         bitField0_ = (bitField0_ & ~0x00000001);
2372         familyName_ = getDefaultInstance().getFamilyName();
2373         onChanged();
2374         return this;
2375       }
2376 
2377       // required uint64 sequence_id = 2;
2378       private long sequenceId_ ;
2379       /**
2380        * <code>required uint64 sequence_id = 2;</code>
2381        */
2382       public boolean hasSequenceId() {
2383         return ((bitField0_ & 0x00000002) == 0x00000002);
2384       }
2385       /**
2386        * <code>required uint64 sequence_id = 2;</code>
2387        */
2388       public long getSequenceId() {
2389         return sequenceId_;
2390       }
2391       /**
2392        * <code>required uint64 sequence_id = 2;</code>
2393        */
2394       public Builder setSequenceId(long value) {
2395         bitField0_ |= 0x00000002;
2396         sequenceId_ = value;
2397         onChanged();
2398         return this;
2399       }
2400       /**
2401        * <code>required uint64 sequence_id = 2;</code>
2402        */
2403       public Builder clearSequenceId() {
2404         bitField0_ = (bitField0_ & ~0x00000002);
2405         sequenceId_ = 0L;
2406         onChanged();
2407         return this;
2408       }
2409 
2410       // @@protoc_insertion_point(builder_scope:hbase.pb.StoreSequenceId)
2411     }
2412 
2413     static {
2414       defaultInstance = new StoreSequenceId(true);
2415       defaultInstance.initFields();
2416     }
2417 
2418     // @@protoc_insertion_point(class_scope:hbase.pb.StoreSequenceId)
2419   }
2420 
2421   public interface RegionStoreSequenceIdsOrBuilder
2422       extends com.google.protobuf.MessageOrBuilder {
2423 
2424     // required uint64 last_flushed_sequence_id = 1;
2425     /**
2426      * <code>required uint64 last_flushed_sequence_id = 1;</code>
2427      */
2428     boolean hasLastFlushedSequenceId();
2429     /**
2430      * <code>required uint64 last_flushed_sequence_id = 1;</code>
2431      */
2432     long getLastFlushedSequenceId();
2433 
2434     // repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;
2435     /**
2436      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2437      */
2438     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> 
2439         getStoreSequenceIdList();
2440     /**
2441      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2442      */
2443     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index);
2444     /**
2445      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2446      */
2447     int getStoreSequenceIdCount();
2448     /**
2449      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2450      */
2451     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
2452         getStoreSequenceIdOrBuilderList();
2453     /**
2454      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2455      */
2456     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
2457         int index);
2458   }
2459   /**
2460    * Protobuf type {@code hbase.pb.RegionStoreSequenceIds}
2461    *
2462    * <pre>
2463    **
2464    * contains a sequence id of a region which should be the minimum of its store sequence ids and
2465    * list of sequence ids of the region's stores
2466    * </pre>
2467    */
2468   public static final class RegionStoreSequenceIds extends
2469       com.google.protobuf.GeneratedMessage
2470       implements RegionStoreSequenceIdsOrBuilder {
2471     // Use RegionStoreSequenceIds.newBuilder() to construct.
2472     private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2473       super(builder);
2474       this.unknownFields = builder.getUnknownFields();
2475     }
2476     private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2477 
2478     private static final RegionStoreSequenceIds defaultInstance;
2479     public static RegionStoreSequenceIds getDefaultInstance() {
2480       return defaultInstance;
2481     }
2482 
2483     public RegionStoreSequenceIds getDefaultInstanceForType() {
2484       return defaultInstance;
2485     }
2486 
2487     private final com.google.protobuf.UnknownFieldSet unknownFields;
2488     @java.lang.Override
2489     public final com.google.protobuf.UnknownFieldSet
2490         getUnknownFields() {
2491       return this.unknownFields;
2492     }
2493     private RegionStoreSequenceIds(
2494         com.google.protobuf.CodedInputStream input,
2495         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2496         throws com.google.protobuf.InvalidProtocolBufferException {
2497       initFields();
2498       int mutable_bitField0_ = 0;
2499       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2500           com.google.protobuf.UnknownFieldSet.newBuilder();
2501       try {
2502         boolean done = false;
2503         while (!done) {
2504           int tag = input.readTag();
2505           switch (tag) {
2506             case 0:
2507               done = true;
2508               break;
2509             default: {
2510               if (!parseUnknownField(input, unknownFields,
2511                                      extensionRegistry, tag)) {
2512                 done = true;
2513               }
2514               break;
2515             }
2516             case 8: {
2517               bitField0_ |= 0x00000001;
2518               lastFlushedSequenceId_ = input.readUInt64();
2519               break;
2520             }
2521             case 18: {
2522               if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
2523                 storeSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>();
2524                 mutable_bitField0_ |= 0x00000002;
2525               }
2526               storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
2527               break;
2528             }
2529           }
2530         }
2531       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2532         throw e.setUnfinishedMessage(this);
2533       } catch (java.io.IOException e) {
2534         throw new com.google.protobuf.InvalidProtocolBufferException(
2535             e.getMessage()).setUnfinishedMessage(this);
2536       } finally {
2537         if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
2538           storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_);
2539         }
2540         this.unknownFields = unknownFields.build();
2541         makeExtensionsImmutable();
2542       }
2543     }
2544     public static final com.google.protobuf.Descriptors.Descriptor
2545         getDescriptor() {
2546       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionStoreSequenceIds_descriptor;
2547     }
2548 
2549     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2550         internalGetFieldAccessorTable() {
2551       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionStoreSequenceIds_fieldAccessorTable
2552           .ensureFieldAccessorsInitialized(
2553               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class);
2554     }
2555 
2556     public static com.google.protobuf.Parser<RegionStoreSequenceIds> PARSER =
2557         new com.google.protobuf.AbstractParser<RegionStoreSequenceIds>() {
2558       public RegionStoreSequenceIds parsePartialFrom(
2559           com.google.protobuf.CodedInputStream input,
2560           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2561           throws com.google.protobuf.InvalidProtocolBufferException {
2562         return new RegionStoreSequenceIds(input, extensionRegistry);
2563       }
2564     };
2565 
2566     @java.lang.Override
2567     public com.google.protobuf.Parser<RegionStoreSequenceIds> getParserForType() {
2568       return PARSER;
2569     }
2570 
2571     private int bitField0_;
2572     // required uint64 last_flushed_sequence_id = 1;
2573     public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1;
2574     private long lastFlushedSequenceId_;
2575     /**
2576      * <code>required uint64 last_flushed_sequence_id = 1;</code>
2577      */
2578     public boolean hasLastFlushedSequenceId() {
2579       return ((bitField0_ & 0x00000001) == 0x00000001);
2580     }
2581     /**
2582      * <code>required uint64 last_flushed_sequence_id = 1;</code>
2583      */
2584     public long getLastFlushedSequenceId() {
2585       return lastFlushedSequenceId_;
2586     }
2587 
2588     // repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;
2589     public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2;
2590     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeSequenceId_;
2591     /**
2592      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2593      */
2594     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreSequenceIdList() {
2595       return storeSequenceId_;
2596     }
2597     /**
2598      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2599      */
2600     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
2601         getStoreSequenceIdOrBuilderList() {
2602       return storeSequenceId_;
2603     }
2604     /**
2605      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2606      */
2607     public int getStoreSequenceIdCount() {
2608       return storeSequenceId_.size();
2609     }
2610     /**
2611      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2612      */
2613     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) {
2614       return storeSequenceId_.get(index);
2615     }
2616     /**
2617      * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
2618      */
2619     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
2620         int index) {
2621       return storeSequenceId_.get(index);
2622     }
2623 
2624     private void initFields() {
2625       lastFlushedSequenceId_ = 0L;
2626       storeSequenceId_ = java.util.Collections.emptyList();
2627     }
2628     private byte memoizedIsInitialized = -1;
2629     public final boolean isInitialized() {
2630       byte isInitialized = memoizedIsInitialized;
2631       if (isInitialized != -1) return isInitialized == 1;
2632 
2633       if (!hasLastFlushedSequenceId()) {
2634         memoizedIsInitialized = 0;
2635         return false;
2636       }
2637       for (int i = 0; i < getStoreSequenceIdCount(); i++) {
2638         if (!getStoreSequenceId(i).isInitialized()) {
2639           memoizedIsInitialized = 0;
2640           return false;
2641         }
2642       }
2643       memoizedIsInitialized = 1;
2644       return true;
2645     }
2646 
2647     public void writeTo(com.google.protobuf.CodedOutputStream output)
2648                         throws java.io.IOException {
2649       getSerializedSize();
2650       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2651         output.writeUInt64(1, lastFlushedSequenceId_);
2652       }
2653       for (int i = 0; i < storeSequenceId_.size(); i++) {
2654         output.writeMessage(2, storeSequenceId_.get(i));
2655       }
2656       getUnknownFields().writeTo(output);
2657     }
2658 
2659     private int memoizedSerializedSize = -1;
2660     public int getSerializedSize() {
2661       int size = memoizedSerializedSize;
2662       if (size != -1) return size;
2663 
2664       size = 0;
2665       if (((bitField0_ & 0x00000001) == 0x00000001)) {
2666         size += com.google.protobuf.CodedOutputStream
2667           .computeUInt64Size(1, lastFlushedSequenceId_);
2668       }
2669       for (int i = 0; i < storeSequenceId_.size(); i++) {
2670         size += com.google.protobuf.CodedOutputStream
2671           .computeMessageSize(2, storeSequenceId_.get(i));
2672       }
2673       size += getUnknownFields().getSerializedSize();
2674       memoizedSerializedSize = size;
2675       return size;
2676     }
2677 
2678     private static final long serialVersionUID = 0L;
2679     @java.lang.Override
2680     protected java.lang.Object writeReplace()
2681         throws java.io.ObjectStreamException {
2682       return super.writeReplace();
2683     }
2684 
2685     @java.lang.Override
2686     public boolean equals(final java.lang.Object obj) {
2687       if (obj == this) {
2688        return true;
2689       }
2690       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)) {
2691         return super.equals(obj);
2692       }
2693       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) obj;
2694 
2695       boolean result = true;
2696       result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId());
2697       if (hasLastFlushedSequenceId()) {
2698         result = result && (getLastFlushedSequenceId()
2699             == other.getLastFlushedSequenceId());
2700       }
2701       result = result && getStoreSequenceIdList()
2702           .equals(other.getStoreSequenceIdList());
2703       result = result &&
2704           getUnknownFields().equals(other.getUnknownFields());
2705       return result;
2706     }
2707 
2708     private int memoizedHashCode = 0;
2709     @java.lang.Override
2710     public int hashCode() {
2711       if (memoizedHashCode != 0) {
2712         return memoizedHashCode;
2713       }
2714       int hash = 41;
2715       hash = (19 * hash) + getDescriptorForType().hashCode();
2716       if (hasLastFlushedSequenceId()) {
2717         hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER;
2718         hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
2719       }
2720       if (getStoreSequenceIdCount() > 0) {
2721         hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER;
2722         hash = (53 * hash) + getStoreSequenceIdList().hashCode();
2723       }
2724       hash = (29 * hash) + getUnknownFields().hashCode();
2725       memoizedHashCode = hash;
2726       return hash;
2727     }
2728 
2729     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2730         com.google.protobuf.ByteString data)
2731         throws com.google.protobuf.InvalidProtocolBufferException {
2732       return PARSER.parseFrom(data);
2733     }
2734     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2735         com.google.protobuf.ByteString data,
2736         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2737         throws com.google.protobuf.InvalidProtocolBufferException {
2738       return PARSER.parseFrom(data, extensionRegistry);
2739     }
2740     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(byte[] data)
2741         throws com.google.protobuf.InvalidProtocolBufferException {
2742       return PARSER.parseFrom(data);
2743     }
2744     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2745         byte[] data,
2746         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2747         throws com.google.protobuf.InvalidProtocolBufferException {
2748       return PARSER.parseFrom(data, extensionRegistry);
2749     }
2750     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input)
2751         throws java.io.IOException {
2752       return PARSER.parseFrom(input);
2753     }
2754     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2755         java.io.InputStream input,
2756         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2757         throws java.io.IOException {
2758       return PARSER.parseFrom(input, extensionRegistry);
2759     }
2760     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input)
2761         throws java.io.IOException {
2762       return PARSER.parseDelimitedFrom(input);
2763     }
2764     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(
2765         java.io.InputStream input,
2766         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2767         throws java.io.IOException {
2768       return PARSER.parseDelimitedFrom(input, extensionRegistry);
2769     }
2770     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2771         com.google.protobuf.CodedInputStream input)
2772         throws java.io.IOException {
2773       return PARSER.parseFrom(input);
2774     }
2775     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
2776         com.google.protobuf.CodedInputStream input,
2777         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2778         throws java.io.IOException {
2779       return PARSER.parseFrom(input, extensionRegistry);
2780     }
2781 
2782     public static Builder newBuilder() { return Builder.create(); }
2783     public Builder newBuilderForType() { return newBuilder(); }
2784     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds prototype) {
2785       return newBuilder().mergeFrom(prototype);
2786     }
2787     public Builder toBuilder() { return newBuilder(this); }
2788 
2789     @java.lang.Override
2790     protected Builder newBuilderForType(
2791         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2792       Builder builder = new Builder(parent);
2793       return builder;
2794     }
2795     /**
2796      * Protobuf type {@code hbase.pb.RegionStoreSequenceIds}
2797      *
2798      * <pre>
2799      **
2800      * contains a sequence id of a region which should be the minimum of its store sequence ids and
2801      * list of sequence ids of the region's stores
2802      * </pre>
2803      */
2804     public static final class Builder extends
2805         com.google.protobuf.GeneratedMessage.Builder<Builder>
2806        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsOrBuilder {
2807       public static final com.google.protobuf.Descriptors.Descriptor
2808           getDescriptor() {
2809         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionStoreSequenceIds_descriptor;
2810       }
2811 
2812       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2813           internalGetFieldAccessorTable() {
2814         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionStoreSequenceIds_fieldAccessorTable
2815             .ensureFieldAccessorsInitialized(
2816                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class);
2817       }
2818 
2819       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.newBuilder()
2820       private Builder() {
2821         maybeForceBuilderInitialization();
2822       }
2823 
2824       private Builder(
2825           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2826         super(parent);
2827         maybeForceBuilderInitialization();
2828       }
2829       private void maybeForceBuilderInitialization() {
2830         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2831           getStoreSequenceIdFieldBuilder();
2832         }
2833       }
2834       private static Builder create() {
2835         return new Builder();
2836       }
2837 
2838       public Builder clear() {
2839         super.clear();
2840         lastFlushedSequenceId_ = 0L;
2841         bitField0_ = (bitField0_ & ~0x00000001);
2842         if (storeSequenceIdBuilder_ == null) {
2843           storeSequenceId_ = java.util.Collections.emptyList();
2844           bitField0_ = (bitField0_ & ~0x00000002);
2845         } else {
2846           storeSequenceIdBuilder_.clear();
2847         }
2848         return this;
2849       }
2850 
2851       public Builder clone() {
2852         return create().mergeFrom(buildPartial());
2853       }
2854 
2855       public com.google.protobuf.Descriptors.Descriptor
2856           getDescriptorForType() {
2857         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionStoreSequenceIds_descriptor;
2858       }
2859 
2860       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds getDefaultInstanceForType() {
2861         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance();
2862       }
2863 
2864       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds build() {
2865         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = buildPartial();
2866         if (!result.isInitialized()) {
2867           throw newUninitializedMessageException(result);
2868         }
2869         return result;
2870       }
2871 
2872       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds buildPartial() {
2873         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds(this);
2874         int from_bitField0_ = bitField0_;
2875         int to_bitField0_ = 0;
2876         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2877           to_bitField0_ |= 0x00000001;
2878         }
2879         result.lastFlushedSequenceId_ = lastFlushedSequenceId_;
2880         if (storeSequenceIdBuilder_ == null) {
2881           if (((bitField0_ & 0x00000002) == 0x00000002)) {
2882             storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_);
2883             bitField0_ = (bitField0_ & ~0x00000002);
2884           }
2885           result.storeSequenceId_ = storeSequenceId_;
2886         } else {
2887           result.storeSequenceId_ = storeSequenceIdBuilder_.build();
2888         }
2889         result.bitField0_ = to_bitField0_;
2890         onBuilt();
2891         return result;
2892       }
2893 
2894       public Builder mergeFrom(com.google.protobuf.Message other) {
2895         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) {
2896           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)other);
2897         } else {
2898           super.mergeFrom(other);
2899           return this;
2900         }
2901       }
2902 
2903       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other) {
2904         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance()) return this;
2905         if (other.hasLastFlushedSequenceId()) {
2906           setLastFlushedSequenceId(other.getLastFlushedSequenceId());
2907         }
2908         if (storeSequenceIdBuilder_ == null) {
2909           if (!other.storeSequenceId_.isEmpty()) {
2910             if (storeSequenceId_.isEmpty()) {
2911               storeSequenceId_ = other.storeSequenceId_;
2912               bitField0_ = (bitField0_ & ~0x00000002);
2913             } else {
2914               ensureStoreSequenceIdIsMutable();
2915               storeSequenceId_.addAll(other.storeSequenceId_);
2916             }
2917             onChanged();
2918           }
2919         } else {
2920           if (!other.storeSequenceId_.isEmpty()) {
2921             if (storeSequenceIdBuilder_.isEmpty()) {
2922               storeSequenceIdBuilder_.dispose();
2923               storeSequenceIdBuilder_ = null;
2924               storeSequenceId_ = other.storeSequenceId_;
2925               bitField0_ = (bitField0_ & ~0x00000002);
2926               storeSequenceIdBuilder_ = 
2927                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
2928                    getStoreSequenceIdFieldBuilder() : null;
2929             } else {
2930               storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_);
2931             }
2932           }
2933         }
2934         this.mergeUnknownFields(other.getUnknownFields());
2935         return this;
2936       }
2937 
2938       public final boolean isInitialized() {
2939         if (!hasLastFlushedSequenceId()) {
2940           
2941           return false;
2942         }
2943         for (int i = 0; i < getStoreSequenceIdCount(); i++) {
2944           if (!getStoreSequenceId(i).isInitialized()) {
2945             
2946             return false;
2947           }
2948         }
2949         return true;
2950       }
2951 
2952       public Builder mergeFrom(
2953           com.google.protobuf.CodedInputStream input,
2954           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2955           throws java.io.IOException {
2956         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parsedMessage = null;
2957         try {
2958           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2959         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2960           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) e.getUnfinishedMessage();
2961           throw e;
2962         } finally {
2963           if (parsedMessage != null) {
2964             mergeFrom(parsedMessage);
2965           }
2966         }
2967         return this;
2968       }
2969       private int bitField0_;
2970 
2971       // required uint64 last_flushed_sequence_id = 1;
2972       private long lastFlushedSequenceId_ ;
2973       /**
2974        * <code>required uint64 last_flushed_sequence_id = 1;</code>
2975        */
2976       public boolean hasLastFlushedSequenceId() {
2977         return ((bitField0_ & 0x00000001) == 0x00000001);
2978       }
2979       /**
2980        * <code>required uint64 last_flushed_sequence_id = 1;</code>
2981        */
2982       public long getLastFlushedSequenceId() {
2983         return lastFlushedSequenceId_;
2984       }
2985       /**
2986        * <code>required uint64 last_flushed_sequence_id = 1;</code>
2987        */
2988       public Builder setLastFlushedSequenceId(long value) {
2989         bitField0_ |= 0x00000001;
2990         lastFlushedSequenceId_ = value;
2991         onChanged();
2992         return this;
2993       }
2994       /**
2995        * <code>required uint64 last_flushed_sequence_id = 1;</code>
2996        */
2997       public Builder clearLastFlushedSequenceId() {
2998         bitField0_ = (bitField0_ & ~0x00000001);
2999         lastFlushedSequenceId_ = 0L;
3000         onChanged();
3001         return this;
3002       }
3003 
3004       // repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;
3005       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeSequenceId_ =
3006         java.util.Collections.emptyList();
3007       private void ensureStoreSequenceIdIsMutable() {
3008         if (!((bitField0_ & 0x00000002) == 0x00000002)) {
3009           storeSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>(storeSequenceId_);
3010           bitField0_ |= 0x00000002;
3011          }
3012       }
3013 
3014       private com.google.protobuf.RepeatedFieldBuilder<
3015           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_;
3016 
3017       /**
3018        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3019        */
3020       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreSequenceIdList() {
3021         if (storeSequenceIdBuilder_ == null) {
3022           return java.util.Collections.unmodifiableList(storeSequenceId_);
3023         } else {
3024           return storeSequenceIdBuilder_.getMessageList();
3025         }
3026       }
3027       /**
3028        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3029        */
3030       public int getStoreSequenceIdCount() {
3031         if (storeSequenceIdBuilder_ == null) {
3032           return storeSequenceId_.size();
3033         } else {
3034           return storeSequenceIdBuilder_.getCount();
3035         }
3036       }
3037       /**
3038        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3039        */
3040       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) {
3041         if (storeSequenceIdBuilder_ == null) {
3042           return storeSequenceId_.get(index);
3043         } else {
3044           return storeSequenceIdBuilder_.getMessage(index);
3045         }
3046       }
3047       /**
3048        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3049        */
3050       public Builder setStoreSequenceId(
3051           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
3052         if (storeSequenceIdBuilder_ == null) {
3053           if (value == null) {
3054             throw new NullPointerException();
3055           }
3056           ensureStoreSequenceIdIsMutable();
3057           storeSequenceId_.set(index, value);
3058           onChanged();
3059         } else {
3060           storeSequenceIdBuilder_.setMessage(index, value);
3061         }
3062         return this;
3063       }
3064       /**
3065        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3066        */
3067       public Builder setStoreSequenceId(
3068           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
3069         if (storeSequenceIdBuilder_ == null) {
3070           ensureStoreSequenceIdIsMutable();
3071           storeSequenceId_.set(index, builderForValue.build());
3072           onChanged();
3073         } else {
3074           storeSequenceIdBuilder_.setMessage(index, builderForValue.build());
3075         }
3076         return this;
3077       }
3078       /**
3079        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3080        */
3081       public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
3082         if (storeSequenceIdBuilder_ == null) {
3083           if (value == null) {
3084             throw new NullPointerException();
3085           }
3086           ensureStoreSequenceIdIsMutable();
3087           storeSequenceId_.add(value);
3088           onChanged();
3089         } else {
3090           storeSequenceIdBuilder_.addMessage(value);
3091         }
3092         return this;
3093       }
3094       /**
3095        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3096        */
3097       public Builder addStoreSequenceId(
3098           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
3099         if (storeSequenceIdBuilder_ == null) {
3100           if (value == null) {
3101             throw new NullPointerException();
3102           }
3103           ensureStoreSequenceIdIsMutable();
3104           storeSequenceId_.add(index, value);
3105           onChanged();
3106         } else {
3107           storeSequenceIdBuilder_.addMessage(index, value);
3108         }
3109         return this;
3110       }
3111       /**
3112        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3113        */
3114       public Builder addStoreSequenceId(
3115           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
3116         if (storeSequenceIdBuilder_ == null) {
3117           ensureStoreSequenceIdIsMutable();
3118           storeSequenceId_.add(builderForValue.build());
3119           onChanged();
3120         } else {
3121           storeSequenceIdBuilder_.addMessage(builderForValue.build());
3122         }
3123         return this;
3124       }
3125       /**
3126        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3127        */
3128       public Builder addStoreSequenceId(
3129           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
3130         if (storeSequenceIdBuilder_ == null) {
3131           ensureStoreSequenceIdIsMutable();
3132           storeSequenceId_.add(index, builderForValue.build());
3133           onChanged();
3134         } else {
3135           storeSequenceIdBuilder_.addMessage(index, builderForValue.build());
3136         }
3137         return this;
3138       }
3139       /**
3140        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3141        */
3142       public Builder addAllStoreSequenceId(
3143           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> values) {
3144         if (storeSequenceIdBuilder_ == null) {
3145           ensureStoreSequenceIdIsMutable();
3146           super.addAll(values, storeSequenceId_);
3147           onChanged();
3148         } else {
3149           storeSequenceIdBuilder_.addAllMessages(values);
3150         }
3151         return this;
3152       }
3153       /**
3154        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3155        */
3156       public Builder clearStoreSequenceId() {
3157         if (storeSequenceIdBuilder_ == null) {
3158           storeSequenceId_ = java.util.Collections.emptyList();
3159           bitField0_ = (bitField0_ & ~0x00000002);
3160           onChanged();
3161         } else {
3162           storeSequenceIdBuilder_.clear();
3163         }
3164         return this;
3165       }
3166       /**
3167        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3168        */
3169       public Builder removeStoreSequenceId(int index) {
3170         if (storeSequenceIdBuilder_ == null) {
3171           ensureStoreSequenceIdIsMutable();
3172           storeSequenceId_.remove(index);
3173           onChanged();
3174         } else {
3175           storeSequenceIdBuilder_.remove(index);
3176         }
3177         return this;
3178       }
3179       /**
3180        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3181        */
3182       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder(
3183           int index) {
3184         return getStoreSequenceIdFieldBuilder().getBuilder(index);
3185       }
3186       /**
3187        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3188        */
3189       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
3190           int index) {
3191         if (storeSequenceIdBuilder_ == null) {
3192           return storeSequenceId_.get(index);  } else {
3193           return storeSequenceIdBuilder_.getMessageOrBuilder(index);
3194         }
3195       }
3196       /**
3197        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3198        */
3199       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
3200            getStoreSequenceIdOrBuilderList() {
3201         if (storeSequenceIdBuilder_ != null) {
3202           return storeSequenceIdBuilder_.getMessageOrBuilderList();
3203         } else {
3204           return java.util.Collections.unmodifiableList(storeSequenceId_);
3205         }
3206       }
3207       /**
3208        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3209        */
3210       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() {
3211         return getStoreSequenceIdFieldBuilder().addBuilder(
3212             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
3213       }
3214       /**
3215        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3216        */
3217       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder(
3218           int index) {
3219         return getStoreSequenceIdFieldBuilder().addBuilder(
3220             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
3221       }
3222       /**
3223        * <code>repeated .hbase.pb.StoreSequenceId store_sequence_id = 2;</code>
3224        */
3225       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder> 
3226            getStoreSequenceIdBuilderList() {
3227         return getStoreSequenceIdFieldBuilder().getBuilderList();
3228       }
3229       private com.google.protobuf.RepeatedFieldBuilder<
3230           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
3231           getStoreSequenceIdFieldBuilder() {
3232         if (storeSequenceIdBuilder_ == null) {
3233           storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
3234               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>(
3235                   storeSequenceId_,
3236                   ((bitField0_ & 0x00000002) == 0x00000002),
3237                   getParentForChildren(),
3238                   isClean());
3239           storeSequenceId_ = null;
3240         }
3241         return storeSequenceIdBuilder_;
3242       }
3243 
3244       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionStoreSequenceIds)
3245     }
3246 
3247     static {
3248       defaultInstance = new RegionStoreSequenceIds(true);
3249       defaultInstance.initFields();
3250     }
3251 
3252     // @@protoc_insertion_point(class_scope:hbase.pb.RegionStoreSequenceIds)
3253   }
3254 
3255   public interface RegionLoadOrBuilder
3256       extends com.google.protobuf.MessageOrBuilder {
3257 
3258     // required .hbase.pb.RegionSpecifier region_specifier = 1;
3259     /**
3260      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3261      *
3262      * <pre>
3263      ** the region specifier 
3264      * </pre>
3265      */
3266     boolean hasRegionSpecifier();
3267     /**
3268      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3269      *
3270      * <pre>
3271      ** the region specifier 
3272      * </pre>
3273      */
3274     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier();
3275     /**
3276      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3277      *
3278      * <pre>
3279      ** the region specifier 
3280      * </pre>
3281      */
3282     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder();
3283 
3284     // optional uint32 stores = 2;
3285     /**
3286      * <code>optional uint32 stores = 2;</code>
3287      *
3288      * <pre>
3289      ** the number of stores for the region 
3290      * </pre>
3291      */
3292     boolean hasStores();
3293     /**
3294      * <code>optional uint32 stores = 2;</code>
3295      *
3296      * <pre>
3297      ** the number of stores for the region 
3298      * </pre>
3299      */
3300     int getStores();
3301 
3302     // optional uint32 storefiles = 3;
3303     /**
3304      * <code>optional uint32 storefiles = 3;</code>
3305      *
3306      * <pre>
3307      ** the number of storefiles for the region 
3308      * </pre>
3309      */
3310     boolean hasStorefiles();
3311     /**
3312      * <code>optional uint32 storefiles = 3;</code>
3313      *
3314      * <pre>
3315      ** the number of storefiles for the region 
3316      * </pre>
3317      */
3318     int getStorefiles();
3319 
3320     // optional uint32 store_uncompressed_size_MB = 4;
3321     /**
3322      * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
3323      *
3324      * <pre>
3325      ** the total size of the store files for the region, uncompressed, in MB 
3326      * </pre>
3327      */
3328     boolean hasStoreUncompressedSizeMB();
3329     /**
3330      * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
3331      *
3332      * <pre>
3333      ** the total size of the store files for the region, uncompressed, in MB 
3334      * </pre>
3335      */
3336     int getStoreUncompressedSizeMB();
3337 
3338     // optional uint32 storefile_size_MB = 5;
3339     /**
3340      * <code>optional uint32 storefile_size_MB = 5;</code>
3341      *
3342      * <pre>
3343      ** the current total size of the store files for the region, in MB 
3344      * </pre>
3345      */
3346     boolean hasStorefileSizeMB();
3347     /**
3348      * <code>optional uint32 storefile_size_MB = 5;</code>
3349      *
3350      * <pre>
3351      ** the current total size of the store files for the region, in MB 
3352      * </pre>
3353      */
3354     int getStorefileSizeMB();
3355 
3356     // optional uint32 memstore_size_MB = 6;
3357     /**
3358      * <code>optional uint32 memstore_size_MB = 6;</code>
3359      *
3360      * <pre>
3361      ** the current size of the memstore for the region, in MB 
3362      * </pre>
3363      */
3364     boolean hasMemstoreSizeMB();
3365     /**
3366      * <code>optional uint32 memstore_size_MB = 6;</code>
3367      *
3368      * <pre>
3369      ** the current size of the memstore for the region, in MB 
3370      * </pre>
3371      */
3372     int getMemstoreSizeMB();
3373 
3374     // optional uint32 storefile_index_size_MB = 7;
3375     /**
3376      * <code>optional uint32 storefile_index_size_MB = 7;</code>
3377      *
3378      * <pre>
3379      **
3380      * The current total size of root-level store file indexes for the region,
3381      * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
3382      * </pre>
3383      */
3384     boolean hasStorefileIndexSizeMB();
3385     /**
3386      * <code>optional uint32 storefile_index_size_MB = 7;</code>
3387      *
3388      * <pre>
3389      **
3390      * The current total size of root-level store file indexes for the region,
3391      * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
3392      * </pre>
3393      */
3394     int getStorefileIndexSizeMB();
3395 
3396     // optional uint64 read_requests_count = 8;
3397     /**
3398      * <code>optional uint64 read_requests_count = 8;</code>
3399      *
3400      * <pre>
3401      ** the current total read requests made to region 
3402      * </pre>
3403      */
3404     boolean hasReadRequestsCount();
3405     /**
3406      * <code>optional uint64 read_requests_count = 8;</code>
3407      *
3408      * <pre>
3409      ** the current total read requests made to region 
3410      * </pre>
3411      */
3412     long getReadRequestsCount();
3413 
3414     // optional uint64 write_requests_count = 9;
3415     /**
3416      * <code>optional uint64 write_requests_count = 9;</code>
3417      *
3418      * <pre>
3419      ** the current total write requests made to region 
3420      * </pre>
3421      */
3422     boolean hasWriteRequestsCount();
3423     /**
3424      * <code>optional uint64 write_requests_count = 9;</code>
3425      *
3426      * <pre>
3427      ** the current total write requests made to region 
3428      * </pre>
3429      */
3430     long getWriteRequestsCount();
3431 
3432     // optional uint64 total_compacting_KVs = 10;
3433     /**
3434      * <code>optional uint64 total_compacting_KVs = 10;</code>
3435      *
3436      * <pre>
3437      ** the total compacting key values in currently running compaction 
3438      * </pre>
3439      */
3440     boolean hasTotalCompactingKVs();
3441     /**
3442      * <code>optional uint64 total_compacting_KVs = 10;</code>
3443      *
3444      * <pre>
3445      ** the total compacting key values in currently running compaction 
3446      * </pre>
3447      */
3448     long getTotalCompactingKVs();
3449 
3450     // optional uint64 current_compacted_KVs = 11;
3451     /**
3452      * <code>optional uint64 current_compacted_KVs = 11;</code>
3453      *
3454      * <pre>
3455      ** the completed count of key values in currently running compaction 
3456      * </pre>
3457      */
3458     boolean hasCurrentCompactedKVs();
3459     /**
3460      * <code>optional uint64 current_compacted_KVs = 11;</code>
3461      *
3462      * <pre>
3463      ** the completed count of key values in currently running compaction 
3464      * </pre>
3465      */
3466     long getCurrentCompactedKVs();
3467 
3468     // optional uint32 root_index_size_KB = 12;
3469     /**
3470      * <code>optional uint32 root_index_size_KB = 12;</code>
3471      *
3472      * <pre>
3473      ** The current total size of root-level indexes for the region, in KB. 
3474      * </pre>
3475      */
3476     boolean hasRootIndexSizeKB();
3477     /**
3478      * <code>optional uint32 root_index_size_KB = 12;</code>
3479      *
3480      * <pre>
3481      ** The current total size of root-level indexes for the region, in KB. 
3482      * </pre>
3483      */
3484     int getRootIndexSizeKB();
3485 
3486     // optional uint32 total_static_index_size_KB = 13;
3487     /**
3488      * <code>optional uint32 total_static_index_size_KB = 13;</code>
3489      *
3490      * <pre>
3491      ** The total size of all index blocks, not just the root level, in KB. 
3492      * </pre>
3493      */
3494     boolean hasTotalStaticIndexSizeKB();
3495     /**
3496      * <code>optional uint32 total_static_index_size_KB = 13;</code>
3497      *
3498      * <pre>
3499      ** The total size of all index blocks, not just the root level, in KB. 
3500      * </pre>
3501      */
3502     int getTotalStaticIndexSizeKB();
3503 
3504     // optional uint32 total_static_bloom_size_KB = 14;
3505     /**
3506      * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
3507      *
3508      * <pre>
3509      **
3510      * The total size of all Bloom filter blocks, not just loaded into the
3511      * block cache, in KB.
3512      * </pre>
3513      */
3514     boolean hasTotalStaticBloomSizeKB();
3515     /**
3516      * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
3517      *
3518      * <pre>
3519      **
3520      * The total size of all Bloom filter blocks, not just loaded into the
3521      * block cache, in KB.
3522      * </pre>
3523      */
3524     int getTotalStaticBloomSizeKB();
3525 
3526     // optional uint64 complete_sequence_id = 15;
3527     /**
3528      * <code>optional uint64 complete_sequence_id = 15;</code>
3529      *
3530      * <pre>
3531      ** the most recent sequence Id from cache flush 
3532      * </pre>
3533      */
3534     boolean hasCompleteSequenceId();
3535     /**
3536      * <code>optional uint64 complete_sequence_id = 15;</code>
3537      *
3538      * <pre>
3539      ** the most recent sequence Id from cache flush 
3540      * </pre>
3541      */
3542     long getCompleteSequenceId();
3543 
3544     // optional float data_locality = 16;
3545     /**
3546      * <code>optional float data_locality = 16;</code>
3547      *
3548      * <pre>
3549      ** The current data locality for region in the regionserver 
3550      * </pre>
3551      */
3552     boolean hasDataLocality();
3553     /**
3554      * <code>optional float data_locality = 16;</code>
3555      *
3556      * <pre>
3557      ** The current data locality for region in the regionserver 
3558      * </pre>
3559      */
3560     float getDataLocality();
3561 
3562     // optional uint64 last_major_compaction_ts = 17 [default = 0];
3563     /**
3564      * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
3565      */
3566     boolean hasLastMajorCompactionTs();
3567     /**
3568      * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
3569      */
3570     long getLastMajorCompactionTs();
3571 
3572     // repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;
3573     /**
3574      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
3575      *
3576      * <pre>
3577      ** the most recent sequence Id of store from cache flush 
3578      * </pre>
3579      */
3580     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> 
3581         getStoreCompleteSequenceIdList();
3582     /**
3583      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
3584      *
3585      * <pre>
3586      ** the most recent sequence Id of store from cache flush 
3587      * </pre>
3588      */
3589     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index);
3590     /**
3591      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
3592      *
3593      * <pre>
3594      ** the most recent sequence Id of store from cache flush 
3595      * </pre>
3596      */
3597     int getStoreCompleteSequenceIdCount();
3598     /**
3599      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
3600      *
3601      * <pre>
3602      ** the most recent sequence Id of store from cache flush 
3603      * </pre>
3604      */
3605     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
3606         getStoreCompleteSequenceIdOrBuilderList();
3607     /**
3608      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
3609      *
3610      * <pre>
3611      ** the most recent sequence Id of store from cache flush 
3612      * </pre>
3613      */
3614     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
3615         int index);
3616   }
3617   /**
3618    * Protobuf type {@code hbase.pb.RegionLoad}
3619    */
3620   public static final class RegionLoad extends
3621       com.google.protobuf.GeneratedMessage
3622       implements RegionLoadOrBuilder {
3623     // Use RegionLoad.newBuilder() to construct.
3624     private RegionLoad(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3625       super(builder);
3626       this.unknownFields = builder.getUnknownFields();
3627     }
3628     private RegionLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3629 
3630     private static final RegionLoad defaultInstance;
3631     public static RegionLoad getDefaultInstance() {
3632       return defaultInstance;
3633     }
3634 
3635     public RegionLoad getDefaultInstanceForType() {
3636       return defaultInstance;
3637     }
3638 
3639     private final com.google.protobuf.UnknownFieldSet unknownFields;
3640     @java.lang.Override
3641     public final com.google.protobuf.UnknownFieldSet
3642         getUnknownFields() {
3643       return this.unknownFields;
3644     }
3645     private RegionLoad(
3646         com.google.protobuf.CodedInputStream input,
3647         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3648         throws com.google.protobuf.InvalidProtocolBufferException {
3649       initFields();
3650       int mutable_bitField0_ = 0;
3651       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3652           com.google.protobuf.UnknownFieldSet.newBuilder();
3653       try {
3654         boolean done = false;
3655         while (!done) {
3656           int tag = input.readTag();
3657           switch (tag) {
3658             case 0:
3659               done = true;
3660               break;
3661             default: {
3662               if (!parseUnknownField(input, unknownFields,
3663                                      extensionRegistry, tag)) {
3664                 done = true;
3665               }
3666               break;
3667             }
3668             case 10: {
3669               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
3670               if (((bitField0_ & 0x00000001) == 0x00000001)) {
3671                 subBuilder = regionSpecifier_.toBuilder();
3672               }
3673               regionSpecifier_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
3674               if (subBuilder != null) {
3675                 subBuilder.mergeFrom(regionSpecifier_);
3676                 regionSpecifier_ = subBuilder.buildPartial();
3677               }
3678               bitField0_ |= 0x00000001;
3679               break;
3680             }
3681             case 16: {
3682               bitField0_ |= 0x00000002;
3683               stores_ = input.readUInt32();
3684               break;
3685             }
3686             case 24: {
3687               bitField0_ |= 0x00000004;
3688               storefiles_ = input.readUInt32();
3689               break;
3690             }
3691             case 32: {
3692               bitField0_ |= 0x00000008;
3693               storeUncompressedSizeMB_ = input.readUInt32();
3694               break;
3695             }
3696             case 40: {
3697               bitField0_ |= 0x00000010;
3698               storefileSizeMB_ = input.readUInt32();
3699               break;
3700             }
3701             case 48: {
3702               bitField0_ |= 0x00000020;
3703               memstoreSizeMB_ = input.readUInt32();
3704               break;
3705             }
3706             case 56: {
3707               bitField0_ |= 0x00000040;
3708               storefileIndexSizeMB_ = input.readUInt32();
3709               break;
3710             }
3711             case 64: {
3712               bitField0_ |= 0x00000080;
3713               readRequestsCount_ = input.readUInt64();
3714               break;
3715             }
3716             case 72: {
3717               bitField0_ |= 0x00000100;
3718               writeRequestsCount_ = input.readUInt64();
3719               break;
3720             }
3721             case 80: {
3722               bitField0_ |= 0x00000200;
3723               totalCompactingKVs_ = input.readUInt64();
3724               break;
3725             }
3726             case 88: {
3727               bitField0_ |= 0x00000400;
3728               currentCompactedKVs_ = input.readUInt64();
3729               break;
3730             }
3731             case 96: {
3732               bitField0_ |= 0x00000800;
3733               rootIndexSizeKB_ = input.readUInt32();
3734               break;
3735             }
3736             case 104: {
3737               bitField0_ |= 0x00001000;
3738               totalStaticIndexSizeKB_ = input.readUInt32();
3739               break;
3740             }
3741             case 112: {
3742               bitField0_ |= 0x00002000;
3743               totalStaticBloomSizeKB_ = input.readUInt32();
3744               break;
3745             }
3746             case 120: {
3747               bitField0_ |= 0x00004000;
3748               completeSequenceId_ = input.readUInt64();
3749               break;
3750             }
3751             case 133: {
3752               bitField0_ |= 0x00008000;
3753               dataLocality_ = input.readFloat();
3754               break;
3755             }
3756             case 136: {
3757               bitField0_ |= 0x00010000;
3758               lastMajorCompactionTs_ = input.readUInt64();
3759               break;
3760             }
3761             case 146: {
3762               if (!((mutable_bitField0_ & 0x00020000) == 0x00020000)) {
3763                 storeCompleteSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>();
3764                 mutable_bitField0_ |= 0x00020000;
3765               }
3766               storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
3767               break;
3768             }
3769           }
3770         }
3771       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3772         throw e.setUnfinishedMessage(this);
3773       } catch (java.io.IOException e) {
3774         throw new com.google.protobuf.InvalidProtocolBufferException(
3775             e.getMessage()).setUnfinishedMessage(this);
3776       } finally {
3777         if (((mutable_bitField0_ & 0x00020000) == 0x00020000)) {
3778           storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
3779         }
3780         this.unknownFields = unknownFields.build();
3781         makeExtensionsImmutable();
3782       }
3783     }
3784     public static final com.google.protobuf.Descriptors.Descriptor
3785         getDescriptor() {
3786       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionLoad_descriptor;
3787     }
3788 
3789     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3790         internalGetFieldAccessorTable() {
3791       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionLoad_fieldAccessorTable
3792           .ensureFieldAccessorsInitialized(
3793               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder.class);
3794     }
3795 
3796     public static com.google.protobuf.Parser<RegionLoad> PARSER =
3797         new com.google.protobuf.AbstractParser<RegionLoad>() {
3798       public RegionLoad parsePartialFrom(
3799           com.google.protobuf.CodedInputStream input,
3800           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3801           throws com.google.protobuf.InvalidProtocolBufferException {
3802         return new RegionLoad(input, extensionRegistry);
3803       }
3804     };
3805 
3806     @java.lang.Override
3807     public com.google.protobuf.Parser<RegionLoad> getParserForType() {
3808       return PARSER;
3809     }
3810 
3811     private int bitField0_;
3812     // required .hbase.pb.RegionSpecifier region_specifier = 1;
3813     public static final int REGION_SPECIFIER_FIELD_NUMBER = 1;
3814     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_;
3815     /**
3816      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3817      *
3818      * <pre>
3819      ** the region specifier 
3820      * </pre>
3821      */
3822     public boolean hasRegionSpecifier() {
3823       return ((bitField0_ & 0x00000001) == 0x00000001);
3824     }
3825     /**
3826      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3827      *
3828      * <pre>
3829      ** the region specifier 
3830      * </pre>
3831      */
3832     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() {
3833       return regionSpecifier_;
3834     }
3835     /**
3836      * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
3837      *
3838      * <pre>
3839      ** the region specifier 
3840      * </pre>
3841      */
3842     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() {
3843       return regionSpecifier_;
3844     }
3845 
3846     // optional uint32 stores = 2;
3847     public static final int STORES_FIELD_NUMBER = 2;
3848     private int stores_;
3849     /**
3850      * <code>optional uint32 stores = 2;</code>
3851      *
3852      * <pre>
3853      ** the number of stores for the region 
3854      * </pre>
3855      */
3856     public boolean hasStores() {
3857       return ((bitField0_ & 0x00000002) == 0x00000002);
3858     }
3859     /**
3860      * <code>optional uint32 stores = 2;</code>
3861      *
3862      * <pre>
3863      ** the number of stores for the region 
3864      * </pre>
3865      */
3866     public int getStores() {
3867       return stores_;
3868     }
3869 
3870     // optional uint32 storefiles = 3;
3871     public static final int STOREFILES_FIELD_NUMBER = 3;
3872     private int storefiles_;
3873     /**
3874      * <code>optional uint32 storefiles = 3;</code>
3875      *
3876      * <pre>
3877      ** the number of storefiles for the region 
3878      * </pre>
3879      */
3880     public boolean hasStorefiles() {
3881       return ((bitField0_ & 0x00000004) == 0x00000004);
3882     }
3883     /**
3884      * <code>optional uint32 storefiles = 3;</code>
3885      *
3886      * <pre>
3887      ** the number of storefiles for the region 
3888      * </pre>
3889      */
3890     public int getStorefiles() {
3891       return storefiles_;
3892     }
3893 
3894     // optional uint32 store_uncompressed_size_MB = 4;
3895     public static final int STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER = 4;
3896     private int storeUncompressedSizeMB_;
3897     /**
3898      * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
3899      *
3900      * <pre>
3901      ** the total size of the store files for the region, uncompressed, in MB 
3902      * </pre>
3903      */
3904     public boolean hasStoreUncompressedSizeMB() {
3905       return ((bitField0_ & 0x00000008) == 0x00000008);
3906     }
3907     /**
3908      * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
3909      *
3910      * <pre>
3911      ** the total size of the store files for the region, uncompressed, in MB 
3912      * </pre>
3913      */
3914     public int getStoreUncompressedSizeMB() {
3915       return storeUncompressedSizeMB_;
3916     }
3917 
3918     // optional uint32 storefile_size_MB = 5;
3919     public static final int STOREFILE_SIZE_MB_FIELD_NUMBER = 5;
3920     private int storefileSizeMB_;
3921     /**
3922      * <code>optional uint32 storefile_size_MB = 5;</code>
3923      *
3924      * <pre>
3925      ** the current total size of the store files for the region, in MB 
3926      * </pre>
3927      */
3928     public boolean hasStorefileSizeMB() {
3929       return ((bitField0_ & 0x00000010) == 0x00000010);
3930     }
3931     /**
3932      * <code>optional uint32 storefile_size_MB = 5;</code>
3933      *
3934      * <pre>
3935      ** the current total size of the store files for the region, in MB 
3936      * </pre>
3937      */
3938     public int getStorefileSizeMB() {
3939       return storefileSizeMB_;
3940     }
3941 
3942     // optional uint32 memstore_size_MB = 6;
3943     public static final int MEMSTORE_SIZE_MB_FIELD_NUMBER = 6;
3944     private int memstoreSizeMB_;
3945     /**
3946      * <code>optional uint32 memstore_size_MB = 6;</code>
3947      *
3948      * <pre>
3949      ** the current size of the memstore for the region, in MB 
3950      * </pre>
3951      */
3952     public boolean hasMemstoreSizeMB() {
3953       return ((bitField0_ & 0x00000020) == 0x00000020);
3954     }
3955     /**
3956      * <code>optional uint32 memstore_size_MB = 6;</code>
3957      *
3958      * <pre>
3959      ** the current size of the memstore for the region, in MB 
3960      * </pre>
3961      */
3962     public int getMemstoreSizeMB() {
3963       return memstoreSizeMB_;
3964     }
3965 
3966     // optional uint32 storefile_index_size_MB = 7;
3967     public static final int STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER = 7;
3968     private int storefileIndexSizeMB_;
3969     /**
3970      * <code>optional uint32 storefile_index_size_MB = 7;</code>
3971      *
3972      * <pre>
3973      **
3974      * The current total size of root-level store file indexes for the region,
3975      * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
3976      * </pre>
3977      */
3978     public boolean hasStorefileIndexSizeMB() {
3979       return ((bitField0_ & 0x00000040) == 0x00000040);
3980     }
3981     /**
3982      * <code>optional uint32 storefile_index_size_MB = 7;</code>
3983      *
3984      * <pre>
3985      **
3986      * The current total size of root-level store file indexes for the region,
3987      * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
3988      * </pre>
3989      */
3990     public int getStorefileIndexSizeMB() {
3991       return storefileIndexSizeMB_;
3992     }
3993 
3994     // optional uint64 read_requests_count = 8;
3995     public static final int READ_REQUESTS_COUNT_FIELD_NUMBER = 8;
3996     private long readRequestsCount_;
3997     /**
3998      * <code>optional uint64 read_requests_count = 8;</code>
3999      *
4000      * <pre>
4001      ** the current total read requests made to region 
4002      * </pre>
4003      */
4004     public boolean hasReadRequestsCount() {
4005       return ((bitField0_ & 0x00000080) == 0x00000080);
4006     }
4007     /**
4008      * <code>optional uint64 read_requests_count = 8;</code>
4009      *
4010      * <pre>
4011      ** the current total read requests made to region 
4012      * </pre>
4013      */
4014     public long getReadRequestsCount() {
4015       return readRequestsCount_;
4016     }
4017 
4018     // optional uint64 write_requests_count = 9;
4019     public static final int WRITE_REQUESTS_COUNT_FIELD_NUMBER = 9;
4020     private long writeRequestsCount_;
4021     /**
4022      * <code>optional uint64 write_requests_count = 9;</code>
4023      *
4024      * <pre>
4025      ** the current total write requests made to region 
4026      * </pre>
4027      */
4028     public boolean hasWriteRequestsCount() {
4029       return ((bitField0_ & 0x00000100) == 0x00000100);
4030     }
4031     /**
4032      * <code>optional uint64 write_requests_count = 9;</code>
4033      *
4034      * <pre>
4035      ** the current total write requests made to region 
4036      * </pre>
4037      */
4038     public long getWriteRequestsCount() {
4039       return writeRequestsCount_;
4040     }
4041 
4042     // optional uint64 total_compacting_KVs = 10;
4043     public static final int TOTAL_COMPACTING_KVS_FIELD_NUMBER = 10;
4044     private long totalCompactingKVs_;
4045     /**
4046      * <code>optional uint64 total_compacting_KVs = 10;</code>
4047      *
4048      * <pre>
4049      ** the total compacting key values in currently running compaction 
4050      * </pre>
4051      */
4052     public boolean hasTotalCompactingKVs() {
4053       return ((bitField0_ & 0x00000200) == 0x00000200);
4054     }
4055     /**
4056      * <code>optional uint64 total_compacting_KVs = 10;</code>
4057      *
4058      * <pre>
4059      ** the total compacting key values in currently running compaction 
4060      * </pre>
4061      */
4062     public long getTotalCompactingKVs() {
4063       return totalCompactingKVs_;
4064     }
4065 
4066     // optional uint64 current_compacted_KVs = 11;
4067     public static final int CURRENT_COMPACTED_KVS_FIELD_NUMBER = 11;
4068     private long currentCompactedKVs_;
4069     /**
4070      * <code>optional uint64 current_compacted_KVs = 11;</code>
4071      *
4072      * <pre>
4073      ** the completed count of key values in currently running compaction 
4074      * </pre>
4075      */
4076     public boolean hasCurrentCompactedKVs() {
4077       return ((bitField0_ & 0x00000400) == 0x00000400);
4078     }
4079     /**
4080      * <code>optional uint64 current_compacted_KVs = 11;</code>
4081      *
4082      * <pre>
4083      ** the completed count of key values in currently running compaction 
4084      * </pre>
4085      */
4086     public long getCurrentCompactedKVs() {
4087       return currentCompactedKVs_;
4088     }
4089 
4090     // optional uint32 root_index_size_KB = 12;
4091     public static final int ROOT_INDEX_SIZE_KB_FIELD_NUMBER = 12;
4092     private int rootIndexSizeKB_;
4093     /**
4094      * <code>optional uint32 root_index_size_KB = 12;</code>
4095      *
4096      * <pre>
4097      ** The current total size of root-level indexes for the region, in KB. 
4098      * </pre>
4099      */
4100     public boolean hasRootIndexSizeKB() {
4101       return ((bitField0_ & 0x00000800) == 0x00000800);
4102     }
4103     /**
4104      * <code>optional uint32 root_index_size_KB = 12;</code>
4105      *
4106      * <pre>
4107      ** The current total size of root-level indexes for the region, in KB. 
4108      * </pre>
4109      */
4110     public int getRootIndexSizeKB() {
4111       return rootIndexSizeKB_;
4112     }
4113 
4114     // optional uint32 total_static_index_size_KB = 13;
4115     public static final int TOTAL_STATIC_INDEX_SIZE_KB_FIELD_NUMBER = 13;
4116     private int totalStaticIndexSizeKB_;
4117     /**
4118      * <code>optional uint32 total_static_index_size_KB = 13;</code>
4119      *
4120      * <pre>
4121      ** The total size of all index blocks, not just the root level, in KB. 
4122      * </pre>
4123      */
4124     public boolean hasTotalStaticIndexSizeKB() {
4125       return ((bitField0_ & 0x00001000) == 0x00001000);
4126     }
4127     /**
4128      * <code>optional uint32 total_static_index_size_KB = 13;</code>
4129      *
4130      * <pre>
4131      ** The total size of all index blocks, not just the root level, in KB. 
4132      * </pre>
4133      */
4134     public int getTotalStaticIndexSizeKB() {
4135       return totalStaticIndexSizeKB_;
4136     }
4137 
4138     // optional uint32 total_static_bloom_size_KB = 14;
4139     public static final int TOTAL_STATIC_BLOOM_SIZE_KB_FIELD_NUMBER = 14;
4140     private int totalStaticBloomSizeKB_;
4141     /**
4142      * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
4143      *
4144      * <pre>
4145      **
4146      * The total size of all Bloom filter blocks, not just loaded into the
4147      * block cache, in KB.
4148      * </pre>
4149      */
4150     public boolean hasTotalStaticBloomSizeKB() {
4151       return ((bitField0_ & 0x00002000) == 0x00002000);
4152     }
4153     /**
4154      * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
4155      *
4156      * <pre>
4157      **
4158      * The total size of all Bloom filter blocks, not just loaded into the
4159      * block cache, in KB.
4160      * </pre>
4161      */
4162     public int getTotalStaticBloomSizeKB() {
4163       return totalStaticBloomSizeKB_;
4164     }
4165 
4166     // optional uint64 complete_sequence_id = 15;
4167     public static final int COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 15;
4168     private long completeSequenceId_;
4169     /**
4170      * <code>optional uint64 complete_sequence_id = 15;</code>
4171      *
4172      * <pre>
4173      ** the most recent sequence Id from cache flush 
4174      * </pre>
4175      */
4176     public boolean hasCompleteSequenceId() {
4177       return ((bitField0_ & 0x00004000) == 0x00004000);
4178     }
4179     /**
4180      * <code>optional uint64 complete_sequence_id = 15;</code>
4181      *
4182      * <pre>
4183      ** the most recent sequence Id from cache flush 
4184      * </pre>
4185      */
4186     public long getCompleteSequenceId() {
4187       return completeSequenceId_;
4188     }
4189 
4190     // optional float data_locality = 16;
4191     public static final int DATA_LOCALITY_FIELD_NUMBER = 16;
4192     private float dataLocality_;
4193     /**
4194      * <code>optional float data_locality = 16;</code>
4195      *
4196      * <pre>
4197      ** The current data locality for region in the regionserver 
4198      * </pre>
4199      */
4200     public boolean hasDataLocality() {
4201       return ((bitField0_ & 0x00008000) == 0x00008000);
4202     }
4203     /**
4204      * <code>optional float data_locality = 16;</code>
4205      *
4206      * <pre>
4207      ** The current data locality for region in the regionserver 
4208      * </pre>
4209      */
4210     public float getDataLocality() {
4211       return dataLocality_;
4212     }
4213 
4214     // optional uint64 last_major_compaction_ts = 17 [default = 0];
4215     public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
4216     private long lastMajorCompactionTs_;
4217     /**
4218      * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
4219      */
4220     public boolean hasLastMajorCompactionTs() {
4221       return ((bitField0_ & 0x00010000) == 0x00010000);
4222     }
4223     /**
4224      * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
4225      */
4226     public long getLastMajorCompactionTs() {
4227       return lastMajorCompactionTs_;
4228     }
4229 
4230     // repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;
4231     public static final int STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 18;
4232     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeCompleteSequenceId_;
4233     /**
4234      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
4235      *
4236      * <pre>
4237      ** the most recent sequence Id of store from cache flush 
4238      * </pre>
4239      */
4240     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreCompleteSequenceIdList() {
4241       return storeCompleteSequenceId_;
4242     }
4243     /**
4244      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
4245      *
4246      * <pre>
4247      ** the most recent sequence Id of store from cache flush 
4248      * </pre>
4249      */
4250     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
4251         getStoreCompleteSequenceIdOrBuilderList() {
4252       return storeCompleteSequenceId_;
4253     }
4254     /**
4255      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
4256      *
4257      * <pre>
4258      ** the most recent sequence Id of store from cache flush 
4259      * </pre>
4260      */
4261     public int getStoreCompleteSequenceIdCount() {
4262       return storeCompleteSequenceId_.size();
4263     }
4264     /**
4265      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
4266      *
4267      * <pre>
4268      ** the most recent sequence Id of store from cache flush 
4269      * </pre>
4270      */
4271     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) {
4272       return storeCompleteSequenceId_.get(index);
4273     }
4274     /**
4275      * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
4276      *
4277      * <pre>
4278      ** the most recent sequence Id of store from cache flush 
4279      * </pre>
4280      */
4281     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
4282         int index) {
4283       return storeCompleteSequenceId_.get(index);
4284     }
4285 
4286     private void initFields() {
4287       regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
4288       stores_ = 0;
4289       storefiles_ = 0;
4290       storeUncompressedSizeMB_ = 0;
4291       storefileSizeMB_ = 0;
4292       memstoreSizeMB_ = 0;
4293       storefileIndexSizeMB_ = 0;
4294       readRequestsCount_ = 0L;
4295       writeRequestsCount_ = 0L;
4296       totalCompactingKVs_ = 0L;
4297       currentCompactedKVs_ = 0L;
4298       rootIndexSizeKB_ = 0;
4299       totalStaticIndexSizeKB_ = 0;
4300       totalStaticBloomSizeKB_ = 0;
4301       completeSequenceId_ = 0L;
4302       dataLocality_ = 0F;
4303       lastMajorCompactionTs_ = 0L;
4304       storeCompleteSequenceId_ = java.util.Collections.emptyList();
4305     }
4306     private byte memoizedIsInitialized = -1;
4307     public final boolean isInitialized() {
4308       byte isInitialized = memoizedIsInitialized;
4309       if (isInitialized != -1) return isInitialized == 1;
4310 
4311       if (!hasRegionSpecifier()) {
4312         memoizedIsInitialized = 0;
4313         return false;
4314       }
4315       if (!getRegionSpecifier().isInitialized()) {
4316         memoizedIsInitialized = 0;
4317         return false;
4318       }
4319       for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) {
4320         if (!getStoreCompleteSequenceId(i).isInitialized()) {
4321           memoizedIsInitialized = 0;
4322           return false;
4323         }
4324       }
4325       memoizedIsInitialized = 1;
4326       return true;
4327     }
4328 
4329     public void writeTo(com.google.protobuf.CodedOutputStream output)
4330                         throws java.io.IOException {
4331       getSerializedSize();
4332       if (((bitField0_ & 0x00000001) == 0x00000001)) {
4333         output.writeMessage(1, regionSpecifier_);
4334       }
4335       if (((bitField0_ & 0x00000002) == 0x00000002)) {
4336         output.writeUInt32(2, stores_);
4337       }
4338       if (((bitField0_ & 0x00000004) == 0x00000004)) {
4339         output.writeUInt32(3, storefiles_);
4340       }
4341       if (((bitField0_ & 0x00000008) == 0x00000008)) {
4342         output.writeUInt32(4, storeUncompressedSizeMB_);
4343       }
4344       if (((bitField0_ & 0x00000010) == 0x00000010)) {
4345         output.writeUInt32(5, storefileSizeMB_);
4346       }
4347       if (((bitField0_ & 0x00000020) == 0x00000020)) {
4348         output.writeUInt32(6, memstoreSizeMB_);
4349       }
4350       if (((bitField0_ & 0x00000040) == 0x00000040)) {
4351         output.writeUInt32(7, storefileIndexSizeMB_);
4352       }
4353       if (((bitField0_ & 0x00000080) == 0x00000080)) {
4354         output.writeUInt64(8, readRequestsCount_);
4355       }
4356       if (((bitField0_ & 0x00000100) == 0x00000100)) {
4357         output.writeUInt64(9, writeRequestsCount_);
4358       }
4359       if (((bitField0_ & 0x00000200) == 0x00000200)) {
4360         output.writeUInt64(10, totalCompactingKVs_);
4361       }
4362       if (((bitField0_ & 0x00000400) == 0x00000400)) {
4363         output.writeUInt64(11, currentCompactedKVs_);
4364       }
4365       if (((bitField0_ & 0x00000800) == 0x00000800)) {
4366         output.writeUInt32(12, rootIndexSizeKB_);
4367       }
4368       if (((bitField0_ & 0x00001000) == 0x00001000)) {
4369         output.writeUInt32(13, totalStaticIndexSizeKB_);
4370       }
4371       if (((bitField0_ & 0x00002000) == 0x00002000)) {
4372         output.writeUInt32(14, totalStaticBloomSizeKB_);
4373       }
4374       if (((bitField0_ & 0x00004000) == 0x00004000)) {
4375         output.writeUInt64(15, completeSequenceId_);
4376       }
4377       if (((bitField0_ & 0x00008000) == 0x00008000)) {
4378         output.writeFloat(16, dataLocality_);
4379       }
4380       if (((bitField0_ & 0x00010000) == 0x00010000)) {
4381         output.writeUInt64(17, lastMajorCompactionTs_);
4382       }
4383       for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
4384         output.writeMessage(18, storeCompleteSequenceId_.get(i));
4385       }
4386       getUnknownFields().writeTo(output);
4387     }
4388 
4389     private int memoizedSerializedSize = -1;
4390     public int getSerializedSize() {
4391       int size = memoizedSerializedSize;
4392       if (size != -1) return size;
4393 
4394       size = 0;
4395       if (((bitField0_ & 0x00000001) == 0x00000001)) {
4396         size += com.google.protobuf.CodedOutputStream
4397           .computeMessageSize(1, regionSpecifier_);
4398       }
4399       if (((bitField0_ & 0x00000002) == 0x00000002)) {
4400         size += com.google.protobuf.CodedOutputStream
4401           .computeUInt32Size(2, stores_);
4402       }
4403       if (((bitField0_ & 0x00000004) == 0x00000004)) {
4404         size += com.google.protobuf.CodedOutputStream
4405           .computeUInt32Size(3, storefiles_);
4406       }
4407       if (((bitField0_ & 0x00000008) == 0x00000008)) {
4408         size += com.google.protobuf.CodedOutputStream
4409           .computeUInt32Size(4, storeUncompressedSizeMB_);
4410       }
4411       if (((bitField0_ & 0x00000010) == 0x00000010)) {
4412         size += com.google.protobuf.CodedOutputStream
4413           .computeUInt32Size(5, storefileSizeMB_);
4414       }
4415       if (((bitField0_ & 0x00000020) == 0x00000020)) {
4416         size += com.google.protobuf.CodedOutputStream
4417           .computeUInt32Size(6, memstoreSizeMB_);
4418       }
4419       if (((bitField0_ & 0x00000040) == 0x00000040)) {
4420         size += com.google.protobuf.CodedOutputStream
4421           .computeUInt32Size(7, storefileIndexSizeMB_);
4422       }
4423       if (((bitField0_ & 0x00000080) == 0x00000080)) {
4424         size += com.google.protobuf.CodedOutputStream
4425           .computeUInt64Size(8, readRequestsCount_);
4426       }
4427       if (((bitField0_ & 0x00000100) == 0x00000100)) {
4428         size += com.google.protobuf.CodedOutputStream
4429           .computeUInt64Size(9, writeRequestsCount_);
4430       }
4431       if (((bitField0_ & 0x00000200) == 0x00000200)) {
4432         size += com.google.protobuf.CodedOutputStream
4433           .computeUInt64Size(10, totalCompactingKVs_);
4434       }
4435       if (((bitField0_ & 0x00000400) == 0x00000400)) {
4436         size += com.google.protobuf.CodedOutputStream
4437           .computeUInt64Size(11, currentCompactedKVs_);
4438       }
4439       if (((bitField0_ & 0x00000800) == 0x00000800)) {
4440         size += com.google.protobuf.CodedOutputStream
4441           .computeUInt32Size(12, rootIndexSizeKB_);
4442       }
4443       if (((bitField0_ & 0x00001000) == 0x00001000)) {
4444         size += com.google.protobuf.CodedOutputStream
4445           .computeUInt32Size(13, totalStaticIndexSizeKB_);
4446       }
4447       if (((bitField0_ & 0x00002000) == 0x00002000)) {
4448         size += com.google.protobuf.CodedOutputStream
4449           .computeUInt32Size(14, totalStaticBloomSizeKB_);
4450       }
4451       if (((bitField0_ & 0x00004000) == 0x00004000)) {
4452         size += com.google.protobuf.CodedOutputStream
4453           .computeUInt64Size(15, completeSequenceId_);
4454       }
4455       if (((bitField0_ & 0x00008000) == 0x00008000)) {
4456         size += com.google.protobuf.CodedOutputStream
4457           .computeFloatSize(16, dataLocality_);
4458       }
4459       if (((bitField0_ & 0x00010000) == 0x00010000)) {
4460         size += com.google.protobuf.CodedOutputStream
4461           .computeUInt64Size(17, lastMajorCompactionTs_);
4462       }
4463       for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
4464         size += com.google.protobuf.CodedOutputStream
4465           .computeMessageSize(18, storeCompleteSequenceId_.get(i));
4466       }
4467       size += getUnknownFields().getSerializedSize();
4468       memoizedSerializedSize = size;
4469       return size;
4470     }
4471 
4472     private static final long serialVersionUID = 0L;
4473     @java.lang.Override
4474     protected java.lang.Object writeReplace()
4475         throws java.io.ObjectStreamException {
4476       return super.writeReplace();
4477     }
4478 
4479     @java.lang.Override
4480     public boolean equals(final java.lang.Object obj) {
4481       if (obj == this) {
4482        return true;
4483       }
4484       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad)) {
4485         return super.equals(obj);
4486       }
4487       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) obj;
4488 
4489       boolean result = true;
4490       result = result && (hasRegionSpecifier() == other.hasRegionSpecifier());
4491       if (hasRegionSpecifier()) {
4492         result = result && getRegionSpecifier()
4493             .equals(other.getRegionSpecifier());
4494       }
4495       result = result && (hasStores() == other.hasStores());
4496       if (hasStores()) {
4497         result = result && (getStores()
4498             == other.getStores());
4499       }
4500       result = result && (hasStorefiles() == other.hasStorefiles());
4501       if (hasStorefiles()) {
4502         result = result && (getStorefiles()
4503             == other.getStorefiles());
4504       }
4505       result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB());
4506       if (hasStoreUncompressedSizeMB()) {
4507         result = result && (getStoreUncompressedSizeMB()
4508             == other.getStoreUncompressedSizeMB());
4509       }
4510       result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB());
4511       if (hasStorefileSizeMB()) {
4512         result = result && (getStorefileSizeMB()
4513             == other.getStorefileSizeMB());
4514       }
4515       result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB());
4516       if (hasMemstoreSizeMB()) {
4517         result = result && (getMemstoreSizeMB()
4518             == other.getMemstoreSizeMB());
4519       }
4520       result = result && (hasStorefileIndexSizeMB() == other.hasStorefileIndexSizeMB());
4521       if (hasStorefileIndexSizeMB()) {
4522         result = result && (getStorefileIndexSizeMB()
4523             == other.getStorefileIndexSizeMB());
4524       }
4525       result = result && (hasReadRequestsCount() == other.hasReadRequestsCount());
4526       if (hasReadRequestsCount()) {
4527         result = result && (getReadRequestsCount()
4528             == other.getReadRequestsCount());
4529       }
4530       result = result && (hasWriteRequestsCount() == other.hasWriteRequestsCount());
4531       if (hasWriteRequestsCount()) {
4532         result = result && (getWriteRequestsCount()
4533             == other.getWriteRequestsCount());
4534       }
4535       result = result && (hasTotalCompactingKVs() == other.hasTotalCompactingKVs());
4536       if (hasTotalCompactingKVs()) {
4537         result = result && (getTotalCompactingKVs()
4538             == other.getTotalCompactingKVs());
4539       }
4540       result = result && (hasCurrentCompactedKVs() == other.hasCurrentCompactedKVs());
4541       if (hasCurrentCompactedKVs()) {
4542         result = result && (getCurrentCompactedKVs()
4543             == other.getCurrentCompactedKVs());
4544       }
4545       result = result && (hasRootIndexSizeKB() == other.hasRootIndexSizeKB());
4546       if (hasRootIndexSizeKB()) {
4547         result = result && (getRootIndexSizeKB()
4548             == other.getRootIndexSizeKB());
4549       }
4550       result = result && (hasTotalStaticIndexSizeKB() == other.hasTotalStaticIndexSizeKB());
4551       if (hasTotalStaticIndexSizeKB()) {
4552         result = result && (getTotalStaticIndexSizeKB()
4553             == other.getTotalStaticIndexSizeKB());
4554       }
4555       result = result && (hasTotalStaticBloomSizeKB() == other.hasTotalStaticBloomSizeKB());
4556       if (hasTotalStaticBloomSizeKB()) {
4557         result = result && (getTotalStaticBloomSizeKB()
4558             == other.getTotalStaticBloomSizeKB());
4559       }
4560       result = result && (hasCompleteSequenceId() == other.hasCompleteSequenceId());
4561       if (hasCompleteSequenceId()) {
4562         result = result && (getCompleteSequenceId()
4563             == other.getCompleteSequenceId());
4564       }
4565       result = result && (hasDataLocality() == other.hasDataLocality());
4566       if (hasDataLocality()) {
4567         result = result && (Float.floatToIntBits(getDataLocality())    == Float.floatToIntBits(other.getDataLocality()));
4568       }
4569       result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
4570       if (hasLastMajorCompactionTs()) {
4571         result = result && (getLastMajorCompactionTs()
4572             == other.getLastMajorCompactionTs());
4573       }
4574       result = result && getStoreCompleteSequenceIdList()
4575           .equals(other.getStoreCompleteSequenceIdList());
4576       result = result &&
4577           getUnknownFields().equals(other.getUnknownFields());
4578       return result;
4579     }
4580 
4581     private int memoizedHashCode = 0;
4582     @java.lang.Override
4583     public int hashCode() {
4584       if (memoizedHashCode != 0) {
4585         return memoizedHashCode;
4586       }
4587       int hash = 41;
4588       hash = (19 * hash) + getDescriptorForType().hashCode();
4589       if (hasRegionSpecifier()) {
4590         hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER;
4591         hash = (53 * hash) + getRegionSpecifier().hashCode();
4592       }
4593       if (hasStores()) {
4594         hash = (37 * hash) + STORES_FIELD_NUMBER;
4595         hash = (53 * hash) + getStores();
4596       }
4597       if (hasStorefiles()) {
4598         hash = (37 * hash) + STOREFILES_FIELD_NUMBER;
4599         hash = (53 * hash) + getStorefiles();
4600       }
4601       if (hasStoreUncompressedSizeMB()) {
4602         hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER;
4603         hash = (53 * hash) + getStoreUncompressedSizeMB();
4604       }
4605       if (hasStorefileSizeMB()) {
4606         hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER;
4607         hash = (53 * hash) + getStorefileSizeMB();
4608       }
4609       if (hasMemstoreSizeMB()) {
4610         hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER;
4611         hash = (53 * hash) + getMemstoreSizeMB();
4612       }
4613       if (hasStorefileIndexSizeMB()) {
4614         hash = (37 * hash) + STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER;
4615         hash = (53 * hash) + getStorefileIndexSizeMB();
4616       }
4617       if (hasReadRequestsCount()) {
4618         hash = (37 * hash) + READ_REQUESTS_COUNT_FIELD_NUMBER;
4619         hash = (53 * hash) + hashLong(getReadRequestsCount());
4620       }
4621       if (hasWriteRequestsCount()) {
4622         hash = (37 * hash) + WRITE_REQUESTS_COUNT_FIELD_NUMBER;
4623         hash = (53 * hash) + hashLong(getWriteRequestsCount());
4624       }
4625       if (hasTotalCompactingKVs()) {
4626         hash = (37 * hash) + TOTAL_COMPACTING_KVS_FIELD_NUMBER;
4627         hash = (53 * hash) + hashLong(getTotalCompactingKVs());
4628       }
4629       if (hasCurrentCompactedKVs()) {
4630         hash = (37 * hash) + CURRENT_COMPACTED_KVS_FIELD_NUMBER;
4631         hash = (53 * hash) + hashLong(getCurrentCompactedKVs());
4632       }
4633       if (hasRootIndexSizeKB()) {
4634         hash = (37 * hash) + ROOT_INDEX_SIZE_KB_FIELD_NUMBER;
4635         hash = (53 * hash) + getRootIndexSizeKB();
4636       }
4637       if (hasTotalStaticIndexSizeKB()) {
4638         hash = (37 * hash) + TOTAL_STATIC_INDEX_SIZE_KB_FIELD_NUMBER;
4639         hash = (53 * hash) + getTotalStaticIndexSizeKB();
4640       }
4641       if (hasTotalStaticBloomSizeKB()) {
4642         hash = (37 * hash) + TOTAL_STATIC_BLOOM_SIZE_KB_FIELD_NUMBER;
4643         hash = (53 * hash) + getTotalStaticBloomSizeKB();
4644       }
4645       if (hasCompleteSequenceId()) {
4646         hash = (37 * hash) + COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
4647         hash = (53 * hash) + hashLong(getCompleteSequenceId());
4648       }
4649       if (hasDataLocality()) {
4650         hash = (37 * hash) + DATA_LOCALITY_FIELD_NUMBER;
4651         hash = (53 * hash) + Float.floatToIntBits(
4652             getDataLocality());
4653       }
4654       if (hasLastMajorCompactionTs()) {
4655         hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
4656         hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
4657       }
4658       if (getStoreCompleteSequenceIdCount() > 0) {
4659         hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
4660         hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode();
4661       }
4662       hash = (29 * hash) + getUnknownFields().hashCode();
4663       memoizedHashCode = hash;
4664       return hash;
4665     }
4666 
4667     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4668         com.google.protobuf.ByteString data)
4669         throws com.google.protobuf.InvalidProtocolBufferException {
4670       return PARSER.parseFrom(data);
4671     }
4672     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4673         com.google.protobuf.ByteString data,
4674         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4675         throws com.google.protobuf.InvalidProtocolBufferException {
4676       return PARSER.parseFrom(data, extensionRegistry);
4677     }
4678     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(byte[] data)
4679         throws com.google.protobuf.InvalidProtocolBufferException {
4680       return PARSER.parseFrom(data);
4681     }
4682     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4683         byte[] data,
4684         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4685         throws com.google.protobuf.InvalidProtocolBufferException {
4686       return PARSER.parseFrom(data, extensionRegistry);
4687     }
4688     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(java.io.InputStream input)
4689         throws java.io.IOException {
4690       return PARSER.parseFrom(input);
4691     }
4692     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4693         java.io.InputStream input,
4694         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4695         throws java.io.IOException {
4696       return PARSER.parseFrom(input, extensionRegistry);
4697     }
4698     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseDelimitedFrom(java.io.InputStream input)
4699         throws java.io.IOException {
4700       return PARSER.parseDelimitedFrom(input);
4701     }
4702     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseDelimitedFrom(
4703         java.io.InputStream input,
4704         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4705         throws java.io.IOException {
4706       return PARSER.parseDelimitedFrom(input, extensionRegistry);
4707     }
4708     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4709         com.google.protobuf.CodedInputStream input)
4710         throws java.io.IOException {
4711       return PARSER.parseFrom(input);
4712     }
4713     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
4714         com.google.protobuf.CodedInputStream input,
4715         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4716         throws java.io.IOException {
4717       return PARSER.parseFrom(input, extensionRegistry);
4718     }
4719 
4720     public static Builder newBuilder() { return Builder.create(); }
4721     public Builder newBuilderForType() { return newBuilder(); }
4722     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad prototype) {
4723       return newBuilder().mergeFrom(prototype);
4724     }
4725     public Builder toBuilder() { return newBuilder(this); }
4726 
4727     @java.lang.Override
4728     protected Builder newBuilderForType(
4729         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4730       Builder builder = new Builder(parent);
4731       return builder;
4732     }
4733     /**
4734      * Protobuf type {@code hbase.pb.RegionLoad}
4735      */
4736     public static final class Builder extends
4737         com.google.protobuf.GeneratedMessage.Builder<Builder>
4738        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder {
4739       public static final com.google.protobuf.Descriptors.Descriptor
4740           getDescriptor() {
4741         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionLoad_descriptor;
4742       }
4743 
4744       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4745           internalGetFieldAccessorTable() {
4746         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionLoad_fieldAccessorTable
4747             .ensureFieldAccessorsInitialized(
4748                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder.class);
4749       }
4750 
4751       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.newBuilder()
4752       private Builder() {
4753         maybeForceBuilderInitialization();
4754       }
4755 
4756       private Builder(
4757           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4758         super(parent);
4759         maybeForceBuilderInitialization();
4760       }
4761       private void maybeForceBuilderInitialization() {
4762         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4763           getRegionSpecifierFieldBuilder();
4764           getStoreCompleteSequenceIdFieldBuilder();
4765         }
4766       }
4767       private static Builder create() {
4768         return new Builder();
4769       }
4770 
4771       public Builder clear() {
4772         super.clear();
4773         if (regionSpecifierBuilder_ == null) {
4774           regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
4775         } else {
4776           regionSpecifierBuilder_.clear();
4777         }
4778         bitField0_ = (bitField0_ & ~0x00000001);
4779         stores_ = 0;
4780         bitField0_ = (bitField0_ & ~0x00000002);
4781         storefiles_ = 0;
4782         bitField0_ = (bitField0_ & ~0x00000004);
4783         storeUncompressedSizeMB_ = 0;
4784         bitField0_ = (bitField0_ & ~0x00000008);
4785         storefileSizeMB_ = 0;
4786         bitField0_ = (bitField0_ & ~0x00000010);
4787         memstoreSizeMB_ = 0;
4788         bitField0_ = (bitField0_ & ~0x00000020);
4789         storefileIndexSizeMB_ = 0;
4790         bitField0_ = (bitField0_ & ~0x00000040);
4791         readRequestsCount_ = 0L;
4792         bitField0_ = (bitField0_ & ~0x00000080);
4793         writeRequestsCount_ = 0L;
4794         bitField0_ = (bitField0_ & ~0x00000100);
4795         totalCompactingKVs_ = 0L;
4796         bitField0_ = (bitField0_ & ~0x00000200);
4797         currentCompactedKVs_ = 0L;
4798         bitField0_ = (bitField0_ & ~0x00000400);
4799         rootIndexSizeKB_ = 0;
4800         bitField0_ = (bitField0_ & ~0x00000800);
4801         totalStaticIndexSizeKB_ = 0;
4802         bitField0_ = (bitField0_ & ~0x00001000);
4803         totalStaticBloomSizeKB_ = 0;
4804         bitField0_ = (bitField0_ & ~0x00002000);
4805         completeSequenceId_ = 0L;
4806         bitField0_ = (bitField0_ & ~0x00004000);
4807         dataLocality_ = 0F;
4808         bitField0_ = (bitField0_ & ~0x00008000);
4809         lastMajorCompactionTs_ = 0L;
4810         bitField0_ = (bitField0_ & ~0x00010000);
4811         if (storeCompleteSequenceIdBuilder_ == null) {
4812           storeCompleteSequenceId_ = java.util.Collections.emptyList();
4813           bitField0_ = (bitField0_ & ~0x00020000);
4814         } else {
4815           storeCompleteSequenceIdBuilder_.clear();
4816         }
4817         return this;
4818       }
4819 
4820       public Builder clone() {
4821         return create().mergeFrom(buildPartial());
4822       }
4823 
4824       public com.google.protobuf.Descriptors.Descriptor
4825           getDescriptorForType() {
4826         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_RegionLoad_descriptor;
4827       }
4828 
4829       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getDefaultInstanceForType() {
4830         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance();
4831       }
4832 
4833       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad build() {
4834         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = buildPartial();
4835         if (!result.isInitialized()) {
4836           throw newUninitializedMessageException(result);
4837         }
4838         return result;
4839       }
4840 
4841       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() {
4842         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this);
4843         int from_bitField0_ = bitField0_;
4844         int to_bitField0_ = 0;
4845         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4846           to_bitField0_ |= 0x00000001;
4847         }
4848         if (regionSpecifierBuilder_ == null) {
4849           result.regionSpecifier_ = regionSpecifier_;
4850         } else {
4851           result.regionSpecifier_ = regionSpecifierBuilder_.build();
4852         }
4853         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
4854           to_bitField0_ |= 0x00000002;
4855         }
4856         result.stores_ = stores_;
4857         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
4858           to_bitField0_ |= 0x00000004;
4859         }
4860         result.storefiles_ = storefiles_;
4861         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
4862           to_bitField0_ |= 0x00000008;
4863         }
4864         result.storeUncompressedSizeMB_ = storeUncompressedSizeMB_;
4865         if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
4866           to_bitField0_ |= 0x00000010;
4867         }
4868         result.storefileSizeMB_ = storefileSizeMB_;
4869         if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
4870           to_bitField0_ |= 0x00000020;
4871         }
4872         result.memstoreSizeMB_ = memstoreSizeMB_;
4873         if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
4874           to_bitField0_ |= 0x00000040;
4875         }
4876         result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
4877         if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
4878           to_bitField0_ |= 0x00000080;
4879         }
4880         result.readRequestsCount_ = readRequestsCount_;
4881         if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
4882           to_bitField0_ |= 0x00000100;
4883         }
4884         result.writeRequestsCount_ = writeRequestsCount_;
4885         if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
4886           to_bitField0_ |= 0x00000200;
4887         }
4888         result.totalCompactingKVs_ = totalCompactingKVs_;
4889         if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
4890           to_bitField0_ |= 0x00000400;
4891         }
4892         result.currentCompactedKVs_ = currentCompactedKVs_;
4893         if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
4894           to_bitField0_ |= 0x00000800;
4895         }
4896         result.rootIndexSizeKB_ = rootIndexSizeKB_;
4897         if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
4898           to_bitField0_ |= 0x00001000;
4899         }
4900         result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
4901         if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
4902           to_bitField0_ |= 0x00002000;
4903         }
4904         result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
4905         if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
4906           to_bitField0_ |= 0x00004000;
4907         }
4908         result.completeSequenceId_ = completeSequenceId_;
4909         if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
4910           to_bitField0_ |= 0x00008000;
4911         }
4912         result.dataLocality_ = dataLocality_;
4913         if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
4914           to_bitField0_ |= 0x00010000;
4915         }
4916         result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
4917         if (storeCompleteSequenceIdBuilder_ == null) {
4918           if (((bitField0_ & 0x00020000) == 0x00020000)) {
4919             storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
4920             bitField0_ = (bitField0_ & ~0x00020000);
4921           }
4922           result.storeCompleteSequenceId_ = storeCompleteSequenceId_;
4923         } else {
4924           result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build();
4925         }
4926         result.bitField0_ = to_bitField0_;
4927         onBuilt();
4928         return result;
4929       }
4930 
4931       public Builder mergeFrom(com.google.protobuf.Message other) {
4932         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) {
4933           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad)other);
4934         } else {
4935           super.mergeFrom(other);
4936           return this;
4937         }
4938       }
4939 
4940       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
4941         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
4942         if (other.hasRegionSpecifier()) {
4943           mergeRegionSpecifier(other.getRegionSpecifier());
4944         }
4945         if (other.hasStores()) {
4946           setStores(other.getStores());
4947         }
4948         if (other.hasStorefiles()) {
4949           setStorefiles(other.getStorefiles());
4950         }
4951         if (other.hasStoreUncompressedSizeMB()) {
4952           setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
4953         }
4954         if (other.hasStorefileSizeMB()) {
4955           setStorefileSizeMB(other.getStorefileSizeMB());
4956         }
4957         if (other.hasMemstoreSizeMB()) {
4958           setMemstoreSizeMB(other.getMemstoreSizeMB());
4959         }
4960         if (other.hasStorefileIndexSizeMB()) {
4961           setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
4962         }
4963         if (other.hasReadRequestsCount()) {
4964           setReadRequestsCount(other.getReadRequestsCount());
4965         }
4966         if (other.hasWriteRequestsCount()) {
4967           setWriteRequestsCount(other.getWriteRequestsCount());
4968         }
4969         if (other.hasTotalCompactingKVs()) {
4970           setTotalCompactingKVs(other.getTotalCompactingKVs());
4971         }
4972         if (other.hasCurrentCompactedKVs()) {
4973           setCurrentCompactedKVs(other.getCurrentCompactedKVs());
4974         }
4975         if (other.hasRootIndexSizeKB()) {
4976           setRootIndexSizeKB(other.getRootIndexSizeKB());
4977         }
4978         if (other.hasTotalStaticIndexSizeKB()) {
4979           setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
4980         }
4981         if (other.hasTotalStaticBloomSizeKB()) {
4982           setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
4983         }
4984         if (other.hasCompleteSequenceId()) {
4985           setCompleteSequenceId(other.getCompleteSequenceId());
4986         }
4987         if (other.hasDataLocality()) {
4988           setDataLocality(other.getDataLocality());
4989         }
4990         if (other.hasLastMajorCompactionTs()) {
4991           setLastMajorCompactionTs(other.getLastMajorCompactionTs());
4992         }
4993         if (storeCompleteSequenceIdBuilder_ == null) {
4994           if (!other.storeCompleteSequenceId_.isEmpty()) {
4995             if (storeCompleteSequenceId_.isEmpty()) {
4996               storeCompleteSequenceId_ = other.storeCompleteSequenceId_;
4997               bitField0_ = (bitField0_ & ~0x00020000);
4998             } else {
4999               ensureStoreCompleteSequenceIdIsMutable();
5000               storeCompleteSequenceId_.addAll(other.storeCompleteSequenceId_);
5001             }
5002             onChanged();
5003           }
5004         } else {
5005           if (!other.storeCompleteSequenceId_.isEmpty()) {
5006             if (storeCompleteSequenceIdBuilder_.isEmpty()) {
5007               storeCompleteSequenceIdBuilder_.dispose();
5008               storeCompleteSequenceIdBuilder_ = null;
5009               storeCompleteSequenceId_ = other.storeCompleteSequenceId_;
5010               bitField0_ = (bitField0_ & ~0x00020000);
5011               storeCompleteSequenceIdBuilder_ = 
5012                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
5013                    getStoreCompleteSequenceIdFieldBuilder() : null;
5014             } else {
5015               storeCompleteSequenceIdBuilder_.addAllMessages(other.storeCompleteSequenceId_);
5016             }
5017           }
5018         }
5019         this.mergeUnknownFields(other.getUnknownFields());
5020         return this;
5021       }
5022 
5023       public final boolean isInitialized() {
5024         if (!hasRegionSpecifier()) {
5025           
5026           return false;
5027         }
5028         if (!getRegionSpecifier().isInitialized()) {
5029           
5030           return false;
5031         }
5032         for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) {
5033           if (!getStoreCompleteSequenceId(i).isInitialized()) {
5034             
5035             return false;
5036           }
5037         }
5038         return true;
5039       }
5040 
5041       public Builder mergeFrom(
5042           com.google.protobuf.CodedInputStream input,
5043           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5044           throws java.io.IOException {
5045         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parsedMessage = null;
5046         try {
5047           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5048         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5049           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) e.getUnfinishedMessage();
5050           throw e;
5051         } finally {
5052           if (parsedMessage != null) {
5053             mergeFrom(parsedMessage);
5054           }
5055         }
5056         return this;
5057       }
5058       private int bitField0_;
5059 
5060       // required .hbase.pb.RegionSpecifier region_specifier = 1;
5061       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
5062       private com.google.protobuf.SingleFieldBuilder<
5063           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionSpecifierBuilder_;
5064       /**
5065        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5066        *
5067        * <pre>
5068        ** the region specifier 
5069        * </pre>
5070        */
5071       public boolean hasRegionSpecifier() {
5072         return ((bitField0_ & 0x00000001) == 0x00000001);
5073       }
5074       /**
5075        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5076        *
5077        * <pre>
5078        ** the region specifier 
5079        * </pre>
5080        */
5081       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() {
5082         if (regionSpecifierBuilder_ == null) {
5083           return regionSpecifier_;
5084         } else {
5085           return regionSpecifierBuilder_.getMessage();
5086         }
5087       }
5088       /**
5089        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5090        *
5091        * <pre>
5092        ** the region specifier 
5093        * </pre>
5094        */
5095       public Builder setRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
5096         if (regionSpecifierBuilder_ == null) {
5097           if (value == null) {
5098             throw new NullPointerException();
5099           }
5100           regionSpecifier_ = value;
5101           onChanged();
5102         } else {
5103           regionSpecifierBuilder_.setMessage(value);
5104         }
5105         bitField0_ |= 0x00000001;
5106         return this;
5107       }
5108       /**
5109        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5110        *
5111        * <pre>
5112        ** the region specifier 
5113        * </pre>
5114        */
5115       public Builder setRegionSpecifier(
5116           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
5117         if (regionSpecifierBuilder_ == null) {
5118           regionSpecifier_ = builderForValue.build();
5119           onChanged();
5120         } else {
5121           regionSpecifierBuilder_.setMessage(builderForValue.build());
5122         }
5123         bitField0_ |= 0x00000001;
5124         return this;
5125       }
5126       /**
5127        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5128        *
5129        * <pre>
5130        ** the region specifier 
5131        * </pre>
5132        */
5133       public Builder mergeRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
5134         if (regionSpecifierBuilder_ == null) {
5135           if (((bitField0_ & 0x00000001) == 0x00000001) &&
5136               regionSpecifier_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
5137             regionSpecifier_ =
5138               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionSpecifier_).mergeFrom(value).buildPartial();
5139           } else {
5140             regionSpecifier_ = value;
5141           }
5142           onChanged();
5143         } else {
5144           regionSpecifierBuilder_.mergeFrom(value);
5145         }
5146         bitField0_ |= 0x00000001;
5147         return this;
5148       }
5149       /**
5150        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5151        *
5152        * <pre>
5153        ** the region specifier 
5154        * </pre>
5155        */
5156       public Builder clearRegionSpecifier() {
5157         if (regionSpecifierBuilder_ == null) {
5158           regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
5159           onChanged();
5160         } else {
5161           regionSpecifierBuilder_.clear();
5162         }
5163         bitField0_ = (bitField0_ & ~0x00000001);
5164         return this;
5165       }
5166       /**
5167        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5168        *
5169        * <pre>
5170        ** the region specifier 
5171        * </pre>
5172        */
5173       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionSpecifierBuilder() {
5174         bitField0_ |= 0x00000001;
5175         onChanged();
5176         return getRegionSpecifierFieldBuilder().getBuilder();
5177       }
5178       /**
5179        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5180        *
5181        * <pre>
5182        ** the region specifier 
5183        * </pre>
5184        */
5185       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() {
5186         if (regionSpecifierBuilder_ != null) {
5187           return regionSpecifierBuilder_.getMessageOrBuilder();
5188         } else {
5189           return regionSpecifier_;
5190         }
5191       }
5192       /**
5193        * <code>required .hbase.pb.RegionSpecifier region_specifier = 1;</code>
5194        *
5195        * <pre>
5196        ** the region specifier 
5197        * </pre>
5198        */
5199       private com.google.protobuf.SingleFieldBuilder<
5200           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
5201           getRegionSpecifierFieldBuilder() {
5202         if (regionSpecifierBuilder_ == null) {
5203           regionSpecifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5204               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
5205                   regionSpecifier_,
5206                   getParentForChildren(),
5207                   isClean());
5208           regionSpecifier_ = null;
5209         }
5210         return regionSpecifierBuilder_;
5211       }
5212 
5213       // optional uint32 stores = 2;
5214       private int stores_ ;
5215       /**
5216        * <code>optional uint32 stores = 2;</code>
5217        *
5218        * <pre>
5219        ** the number of stores for the region 
5220        * </pre>
5221        */
5222       public boolean hasStores() {
5223         return ((bitField0_ & 0x00000002) == 0x00000002);
5224       }
5225       /**
5226        * <code>optional uint32 stores = 2;</code>
5227        *
5228        * <pre>
5229        ** the number of stores for the region 
5230        * </pre>
5231        */
5232       public int getStores() {
5233         return stores_;
5234       }
5235       /**
5236        * <code>optional uint32 stores = 2;</code>
5237        *
5238        * <pre>
5239        ** the number of stores for the region 
5240        * </pre>
5241        */
5242       public Builder setStores(int value) {
5243         bitField0_ |= 0x00000002;
5244         stores_ = value;
5245         onChanged();
5246         return this;
5247       }
5248       /**
5249        * <code>optional uint32 stores = 2;</code>
5250        *
5251        * <pre>
5252        ** the number of stores for the region 
5253        * </pre>
5254        */
5255       public Builder clearStores() {
5256         bitField0_ = (bitField0_ & ~0x00000002);
5257         stores_ = 0;
5258         onChanged();
5259         return this;
5260       }
5261 
5262       // optional uint32 storefiles = 3;
5263       private int storefiles_ ;
5264       /**
5265        * <code>optional uint32 storefiles = 3;</code>
5266        *
5267        * <pre>
5268        ** the number of storefiles for the region 
5269        * </pre>
5270        */
5271       public boolean hasStorefiles() {
5272         return ((bitField0_ & 0x00000004) == 0x00000004);
5273       }
5274       /**
5275        * <code>optional uint32 storefiles = 3;</code>
5276        *
5277        * <pre>
5278        ** the number of storefiles for the region 
5279        * </pre>
5280        */
5281       public int getStorefiles() {
5282         return storefiles_;
5283       }
5284       /**
5285        * <code>optional uint32 storefiles = 3;</code>
5286        *
5287        * <pre>
5288        ** the number of storefiles for the region 
5289        * </pre>
5290        */
5291       public Builder setStorefiles(int value) {
5292         bitField0_ |= 0x00000004;
5293         storefiles_ = value;
5294         onChanged();
5295         return this;
5296       }
5297       /**
5298        * <code>optional uint32 storefiles = 3;</code>
5299        *
5300        * <pre>
5301        ** the number of storefiles for the region 
5302        * </pre>
5303        */
5304       public Builder clearStorefiles() {
5305         bitField0_ = (bitField0_ & ~0x00000004);
5306         storefiles_ = 0;
5307         onChanged();
5308         return this;
5309       }
5310 
5311       // optional uint32 store_uncompressed_size_MB = 4;
5312       private int storeUncompressedSizeMB_ ;
5313       /**
5314        * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
5315        *
5316        * <pre>
5317        ** the total size of the store files for the region, uncompressed, in MB 
5318        * </pre>
5319        */
5320       public boolean hasStoreUncompressedSizeMB() {
5321         return ((bitField0_ & 0x00000008) == 0x00000008);
5322       }
5323       /**
5324        * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
5325        *
5326        * <pre>
5327        ** the total size of the store files for the region, uncompressed, in MB 
5328        * </pre>
5329        */
5330       public int getStoreUncompressedSizeMB() {
5331         return storeUncompressedSizeMB_;
5332       }
5333       /**
5334        * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
5335        *
5336        * <pre>
5337        ** the total size of the store files for the region, uncompressed, in MB 
5338        * </pre>
5339        */
5340       public Builder setStoreUncompressedSizeMB(int value) {
5341         bitField0_ |= 0x00000008;
5342         storeUncompressedSizeMB_ = value;
5343         onChanged();
5344         return this;
5345       }
5346       /**
5347        * <code>optional uint32 store_uncompressed_size_MB = 4;</code>
5348        *
5349        * <pre>
5350        ** the total size of the store files for the region, uncompressed, in MB 
5351        * </pre>
5352        */
5353       public Builder clearStoreUncompressedSizeMB() {
5354         bitField0_ = (bitField0_ & ~0x00000008);
5355         storeUncompressedSizeMB_ = 0;
5356         onChanged();
5357         return this;
5358       }
5359 
5360       // optional uint32 storefile_size_MB = 5;
5361       private int storefileSizeMB_ ;
5362       /**
5363        * <code>optional uint32 storefile_size_MB = 5;</code>
5364        *
5365        * <pre>
5366        ** the current total size of the store files for the region, in MB 
5367        * </pre>
5368        */
5369       public boolean hasStorefileSizeMB() {
5370         return ((bitField0_ & 0x00000010) == 0x00000010);
5371       }
5372       /**
5373        * <code>optional uint32 storefile_size_MB = 5;</code>
5374        *
5375        * <pre>
5376        ** the current total size of the store files for the region, in MB 
5377        * </pre>
5378        */
5379       public int getStorefileSizeMB() {
5380         return storefileSizeMB_;
5381       }
5382       /**
5383        * <code>optional uint32 storefile_size_MB = 5;</code>
5384        *
5385        * <pre>
5386        ** the current total size of the store files for the region, in MB 
5387        * </pre>
5388        */
5389       public Builder setStorefileSizeMB(int value) {
5390         bitField0_ |= 0x00000010;
5391         storefileSizeMB_ = value;
5392         onChanged();
5393         return this;
5394       }
5395       /**
5396        * <code>optional uint32 storefile_size_MB = 5;</code>
5397        *
5398        * <pre>
5399        ** the current total size of the store files for the region, in MB 
5400        * </pre>
5401        */
5402       public Builder clearStorefileSizeMB() {
5403         bitField0_ = (bitField0_ & ~0x00000010);
5404         storefileSizeMB_ = 0;
5405         onChanged();
5406         return this;
5407       }
5408 
5409       // optional uint32 memstore_size_MB = 6;
5410       private int memstoreSizeMB_ ;
5411       /**
5412        * <code>optional uint32 memstore_size_MB = 6;</code>
5413        *
5414        * <pre>
5415        ** the current size of the memstore for the region, in MB 
5416        * </pre>
5417        */
5418       public boolean hasMemstoreSizeMB() {
5419         return ((bitField0_ & 0x00000020) == 0x00000020);
5420       }
5421       /**
5422        * <code>optional uint32 memstore_size_MB = 6;</code>
5423        *
5424        * <pre>
5425        ** the current size of the memstore for the region, in MB 
5426        * </pre>
5427        */
5428       public int getMemstoreSizeMB() {
5429         return memstoreSizeMB_;
5430       }
5431       /**
5432        * <code>optional uint32 memstore_size_MB = 6;</code>
5433        *
5434        * <pre>
5435        ** the current size of the memstore for the region, in MB 
5436        * </pre>
5437        */
5438       public Builder setMemstoreSizeMB(int value) {
5439         bitField0_ |= 0x00000020;
5440         memstoreSizeMB_ = value;
5441         onChanged();
5442         return this;
5443       }
5444       /**
5445        * <code>optional uint32 memstore_size_MB = 6;</code>
5446        *
5447        * <pre>
5448        ** the current size of the memstore for the region, in MB 
5449        * </pre>
5450        */
5451       public Builder clearMemstoreSizeMB() {
5452         bitField0_ = (bitField0_ & ~0x00000020);
5453         memstoreSizeMB_ = 0;
5454         onChanged();
5455         return this;
5456       }
5457 
5458       // optional uint32 storefile_index_size_MB = 7;
5459       private int storefileIndexSizeMB_ ;
5460       /**
5461        * <code>optional uint32 storefile_index_size_MB = 7;</code>
5462        *
5463        * <pre>
5464        **
5465        * The current total size of root-level store file indexes for the region,
5466        * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
5467        * </pre>
5468        */
5469       public boolean hasStorefileIndexSizeMB() {
5470         return ((bitField0_ & 0x00000040) == 0x00000040);
5471       }
5472       /**
5473        * <code>optional uint32 storefile_index_size_MB = 7;</code>
5474        *
5475        * <pre>
5476        **
5477        * The current total size of root-level store file indexes for the region,
5478        * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
5479        * </pre>
5480        */
5481       public int getStorefileIndexSizeMB() {
5482         return storefileIndexSizeMB_;
5483       }
5484       /**
5485        * <code>optional uint32 storefile_index_size_MB = 7;</code>
5486        *
5487        * <pre>
5488        **
5489        * The current total size of root-level store file indexes for the region,
5490        * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
5491        * </pre>
5492        */
5493       public Builder setStorefileIndexSizeMB(int value) {
5494         bitField0_ |= 0x00000040;
5495         storefileIndexSizeMB_ = value;
5496         onChanged();
5497         return this;
5498       }
5499       /**
5500        * <code>optional uint32 storefile_index_size_MB = 7;</code>
5501        *
5502        * <pre>
5503        **
5504        * The current total size of root-level store file indexes for the region,
5505        * in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
5506        * </pre>
5507        */
5508       public Builder clearStorefileIndexSizeMB() {
5509         bitField0_ = (bitField0_ & ~0x00000040);
5510         storefileIndexSizeMB_ = 0;
5511         onChanged();
5512         return this;
5513       }
5514 
5515       // optional uint64 read_requests_count = 8;
5516       private long readRequestsCount_ ;
5517       /**
5518        * <code>optional uint64 read_requests_count = 8;</code>
5519        *
5520        * <pre>
5521        ** the current total read requests made to region 
5522        * </pre>
5523        */
5524       public boolean hasReadRequestsCount() {
5525         return ((bitField0_ & 0x00000080) == 0x00000080);
5526       }
5527       /**
5528        * <code>optional uint64 read_requests_count = 8;</code>
5529        *
5530        * <pre>
5531        ** the current total read requests made to region 
5532        * </pre>
5533        */
5534       public long getReadRequestsCount() {
5535         return readRequestsCount_;
5536       }
5537       /**
5538        * <code>optional uint64 read_requests_count = 8;</code>
5539        *
5540        * <pre>
5541        ** the current total read requests made to region 
5542        * </pre>
5543        */
5544       public Builder setReadRequestsCount(long value) {
5545         bitField0_ |= 0x00000080;
5546         readRequestsCount_ = value;
5547         onChanged();
5548         return this;
5549       }
5550       /**
5551        * <code>optional uint64 read_requests_count = 8;</code>
5552        *
5553        * <pre>
5554        ** the current total read requests made to region 
5555        * </pre>
5556        */
5557       public Builder clearReadRequestsCount() {
5558         bitField0_ = (bitField0_ & ~0x00000080);
5559         readRequestsCount_ = 0L;
5560         onChanged();
5561         return this;
5562       }
5563 
5564       // optional uint64 write_requests_count = 9;
5565       private long writeRequestsCount_ ;
5566       /**
5567        * <code>optional uint64 write_requests_count = 9;</code>
5568        *
5569        * <pre>
5570        ** the current total write requests made to region 
5571        * </pre>
5572        */
5573       public boolean hasWriteRequestsCount() {
5574         return ((bitField0_ & 0x00000100) == 0x00000100);
5575       }
5576       /**
5577        * <code>optional uint64 write_requests_count = 9;</code>
5578        *
5579        * <pre>
5580        ** the current total write requests made to region 
5581        * </pre>
5582        */
5583       public long getWriteRequestsCount() {
5584         return writeRequestsCount_;
5585       }
5586       /**
5587        * <code>optional uint64 write_requests_count = 9;</code>
5588        *
5589        * <pre>
5590        ** the current total write requests made to region 
5591        * </pre>
5592        */
5593       public Builder setWriteRequestsCount(long value) {
5594         bitField0_ |= 0x00000100;
5595         writeRequestsCount_ = value;
5596         onChanged();
5597         return this;
5598       }
5599       /**
5600        * <code>optional uint64 write_requests_count = 9;</code>
5601        *
5602        * <pre>
5603        ** the current total write requests made to region 
5604        * </pre>
5605        */
5606       public Builder clearWriteRequestsCount() {
5607         bitField0_ = (bitField0_ & ~0x00000100);
5608         writeRequestsCount_ = 0L;
5609         onChanged();
5610         return this;
5611       }
5612 
5613       // optional uint64 total_compacting_KVs = 10;
5614       private long totalCompactingKVs_ ;
5615       /**
5616        * <code>optional uint64 total_compacting_KVs = 10;</code>
5617        *
5618        * <pre>
5619        ** the total compacting key values in currently running compaction 
5620        * </pre>
5621        */
5622       public boolean hasTotalCompactingKVs() {
5623         return ((bitField0_ & 0x00000200) == 0x00000200);
5624       }
5625       /**
5626        * <code>optional uint64 total_compacting_KVs = 10;</code>
5627        *
5628        * <pre>
5629        ** the total compacting key values in currently running compaction 
5630        * </pre>
5631        */
5632       public long getTotalCompactingKVs() {
5633         return totalCompactingKVs_;
5634       }
5635       /**
5636        * <code>optional uint64 total_compacting_KVs = 10;</code>
5637        *
5638        * <pre>
5639        ** the total compacting key values in currently running compaction 
5640        * </pre>
5641        */
5642       public Builder setTotalCompactingKVs(long value) {
5643         bitField0_ |= 0x00000200;
5644         totalCompactingKVs_ = value;
5645         onChanged();
5646         return this;
5647       }
5648       /**
5649        * <code>optional uint64 total_compacting_KVs = 10;</code>
5650        *
5651        * <pre>
5652        ** the total compacting key values in currently running compaction 
5653        * </pre>
5654        */
5655       public Builder clearTotalCompactingKVs() {
5656         bitField0_ = (bitField0_ & ~0x00000200);
5657         totalCompactingKVs_ = 0L;
5658         onChanged();
5659         return this;
5660       }
5661 
5662       // optional uint64 current_compacted_KVs = 11;
5663       private long currentCompactedKVs_ ;
5664       /**
5665        * <code>optional uint64 current_compacted_KVs = 11;</code>
5666        *
5667        * <pre>
5668        ** the completed count of key values in currently running compaction 
5669        * </pre>
5670        */
5671       public boolean hasCurrentCompactedKVs() {
5672         return ((bitField0_ & 0x00000400) == 0x00000400);
5673       }
5674       /**
5675        * <code>optional uint64 current_compacted_KVs = 11;</code>
5676        *
5677        * <pre>
5678        ** the completed count of key values in currently running compaction 
5679        * </pre>
5680        */
5681       public long getCurrentCompactedKVs() {
5682         return currentCompactedKVs_;
5683       }
5684       /**
5685        * <code>optional uint64 current_compacted_KVs = 11;</code>
5686        *
5687        * <pre>
5688        ** the completed count of key values in currently running compaction 
5689        * </pre>
5690        */
5691       public Builder setCurrentCompactedKVs(long value) {
5692         bitField0_ |= 0x00000400;
5693         currentCompactedKVs_ = value;
5694         onChanged();
5695         return this;
5696       }
5697       /**
5698        * <code>optional uint64 current_compacted_KVs = 11;</code>
5699        *
5700        * <pre>
5701        ** the completed count of key values in currently running compaction 
5702        * </pre>
5703        */
5704       public Builder clearCurrentCompactedKVs() {
5705         bitField0_ = (bitField0_ & ~0x00000400);
5706         currentCompactedKVs_ = 0L;
5707         onChanged();
5708         return this;
5709       }
5710 
5711       // optional uint32 root_index_size_KB = 12;
5712       private int rootIndexSizeKB_ ;
5713       /**
5714        * <code>optional uint32 root_index_size_KB = 12;</code>
5715        *
5716        * <pre>
5717        ** The current total size of root-level indexes for the region, in KB. 
5718        * </pre>
5719        */
5720       public boolean hasRootIndexSizeKB() {
5721         return ((bitField0_ & 0x00000800) == 0x00000800);
5722       }
5723       /**
5724        * <code>optional uint32 root_index_size_KB = 12;</code>
5725        *
5726        * <pre>
5727        ** The current total size of root-level indexes for the region, in KB. 
5728        * </pre>
5729        */
5730       public int getRootIndexSizeKB() {
5731         return rootIndexSizeKB_;
5732       }
5733       /**
5734        * <code>optional uint32 root_index_size_KB = 12;</code>
5735        *
5736        * <pre>
5737        ** The current total size of root-level indexes for the region, in KB. 
5738        * </pre>
5739        */
5740       public Builder setRootIndexSizeKB(int value) {
5741         bitField0_ |= 0x00000800;
5742         rootIndexSizeKB_ = value;
5743         onChanged();
5744         return this;
5745       }
5746       /**
5747        * <code>optional uint32 root_index_size_KB = 12;</code>
5748        *
5749        * <pre>
5750        ** The current total size of root-level indexes for the region, in KB. 
5751        * </pre>
5752        */
5753       public Builder clearRootIndexSizeKB() {
5754         bitField0_ = (bitField0_ & ~0x00000800);
5755         rootIndexSizeKB_ = 0;
5756         onChanged();
5757         return this;
5758       }
5759 
5760       // optional uint32 total_static_index_size_KB = 13;
5761       private int totalStaticIndexSizeKB_ ;
5762       /**
5763        * <code>optional uint32 total_static_index_size_KB = 13;</code>
5764        *
5765        * <pre>
5766        ** The total size of all index blocks, not just the root level, in KB. 
5767        * </pre>
5768        */
5769       public boolean hasTotalStaticIndexSizeKB() {
5770         return ((bitField0_ & 0x00001000) == 0x00001000);
5771       }
5772       /**
5773        * <code>optional uint32 total_static_index_size_KB = 13;</code>
5774        *
5775        * <pre>
5776        ** The total size of all index blocks, not just the root level, in KB. 
5777        * </pre>
5778        */
5779       public int getTotalStaticIndexSizeKB() {
5780         return totalStaticIndexSizeKB_;
5781       }
5782       /**
5783        * <code>optional uint32 total_static_index_size_KB = 13;</code>
5784        *
5785        * <pre>
5786        ** The total size of all index blocks, not just the root level, in KB. 
5787        * </pre>
5788        */
5789       public Builder setTotalStaticIndexSizeKB(int value) {
5790         bitField0_ |= 0x00001000;
5791         totalStaticIndexSizeKB_ = value;
5792         onChanged();
5793         return this;
5794       }
5795       /**
5796        * <code>optional uint32 total_static_index_size_KB = 13;</code>
5797        *
5798        * <pre>
5799        ** The total size of all index blocks, not just the root level, in KB. 
5800        * </pre>
5801        */
5802       public Builder clearTotalStaticIndexSizeKB() {
5803         bitField0_ = (bitField0_ & ~0x00001000);
5804         totalStaticIndexSizeKB_ = 0;
5805         onChanged();
5806         return this;
5807       }
5808 
5809       // optional uint32 total_static_bloom_size_KB = 14;
5810       private int totalStaticBloomSizeKB_ ;
5811       /**
5812        * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
5813        *
5814        * <pre>
5815        **
5816        * The total size of all Bloom filter blocks, not just loaded into the
5817        * block cache, in KB.
5818        * </pre>
5819        */
5820       public boolean hasTotalStaticBloomSizeKB() {
5821         return ((bitField0_ & 0x00002000) == 0x00002000);
5822       }
5823       /**
5824        * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
5825        *
5826        * <pre>
5827        **
5828        * The total size of all Bloom filter blocks, not just loaded into the
5829        * block cache, in KB.
5830        * </pre>
5831        */
5832       public int getTotalStaticBloomSizeKB() {
5833         return totalStaticBloomSizeKB_;
5834       }
5835       /**
5836        * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
5837        *
5838        * <pre>
5839        **
5840        * The total size of all Bloom filter blocks, not just loaded into the
5841        * block cache, in KB.
5842        * </pre>
5843        */
5844       public Builder setTotalStaticBloomSizeKB(int value) {
5845         bitField0_ |= 0x00002000;
5846         totalStaticBloomSizeKB_ = value;
5847         onChanged();
5848         return this;
5849       }
5850       /**
5851        * <code>optional uint32 total_static_bloom_size_KB = 14;</code>
5852        *
5853        * <pre>
5854        **
5855        * The total size of all Bloom filter blocks, not just loaded into the
5856        * block cache, in KB.
5857        * </pre>
5858        */
5859       public Builder clearTotalStaticBloomSizeKB() {
5860         bitField0_ = (bitField0_ & ~0x00002000);
5861         totalStaticBloomSizeKB_ = 0;
5862         onChanged();
5863         return this;
5864       }
5865 
5866       // optional uint64 complete_sequence_id = 15;
5867       private long completeSequenceId_ ;
5868       /**
5869        * <code>optional uint64 complete_sequence_id = 15;</code>
5870        *
5871        * <pre>
5872        ** the most recent sequence Id from cache flush 
5873        * </pre>
5874        */
5875       public boolean hasCompleteSequenceId() {
5876         return ((bitField0_ & 0x00004000) == 0x00004000);
5877       }
5878       /**
5879        * <code>optional uint64 complete_sequence_id = 15;</code>
5880        *
5881        * <pre>
5882        ** the most recent sequence Id from cache flush 
5883        * </pre>
5884        */
5885       public long getCompleteSequenceId() {
5886         return completeSequenceId_;
5887       }
5888       /**
5889        * <code>optional uint64 complete_sequence_id = 15;</code>
5890        *
5891        * <pre>
5892        ** the most recent sequence Id from cache flush 
5893        * </pre>
5894        */
5895       public Builder setCompleteSequenceId(long value) {
5896         bitField0_ |= 0x00004000;
5897         completeSequenceId_ = value;
5898         onChanged();
5899         return this;
5900       }
5901       /**
5902        * <code>optional uint64 complete_sequence_id = 15;</code>
5903        *
5904        * <pre>
5905        ** the most recent sequence Id from cache flush 
5906        * </pre>
5907        */
5908       public Builder clearCompleteSequenceId() {
5909         bitField0_ = (bitField0_ & ~0x00004000);
5910         completeSequenceId_ = 0L;
5911         onChanged();
5912         return this;
5913       }
5914 
5915       // optional float data_locality = 16;
5916       private float dataLocality_ ;
5917       /**
5918        * <code>optional float data_locality = 16;</code>
5919        *
5920        * <pre>
5921        ** The current data locality for region in the regionserver 
5922        * </pre>
5923        */
5924       public boolean hasDataLocality() {
5925         return ((bitField0_ & 0x00008000) == 0x00008000);
5926       }
5927       /**
5928        * <code>optional float data_locality = 16;</code>
5929        *
5930        * <pre>
5931        ** The current data locality for region in the regionserver 
5932        * </pre>
5933        */
5934       public float getDataLocality() {
5935         return dataLocality_;
5936       }
5937       /**
5938        * <code>optional float data_locality = 16;</code>
5939        *
5940        * <pre>
5941        ** The current data locality for region in the regionserver 
5942        * </pre>
5943        */
5944       public Builder setDataLocality(float value) {
5945         bitField0_ |= 0x00008000;
5946         dataLocality_ = value;
5947         onChanged();
5948         return this;
5949       }
5950       /**
5951        * <code>optional float data_locality = 16;</code>
5952        *
5953        * <pre>
5954        ** The current data locality for region in the regionserver 
5955        * </pre>
5956        */
5957       public Builder clearDataLocality() {
5958         bitField0_ = (bitField0_ & ~0x00008000);
5959         dataLocality_ = 0F;
5960         onChanged();
5961         return this;
5962       }
5963 
5964       // optional uint64 last_major_compaction_ts = 17 [default = 0];
5965       private long lastMajorCompactionTs_ ;
5966       /**
5967        * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
5968        */
5969       public boolean hasLastMajorCompactionTs() {
5970         return ((bitField0_ & 0x00010000) == 0x00010000);
5971       }
5972       /**
5973        * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
5974        */
5975       public long getLastMajorCompactionTs() {
5976         return lastMajorCompactionTs_;
5977       }
5978       /**
5979        * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
5980        */
5981       public Builder setLastMajorCompactionTs(long value) {
5982         bitField0_ |= 0x00010000;
5983         lastMajorCompactionTs_ = value;
5984         onChanged();
5985         return this;
5986       }
5987       /**
5988        * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
5989        */
5990       public Builder clearLastMajorCompactionTs() {
5991         bitField0_ = (bitField0_ & ~0x00010000);
5992         lastMajorCompactionTs_ = 0L;
5993         onChanged();
5994         return this;
5995       }
5996 
5997       // repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;
5998       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeCompleteSequenceId_ =
5999         java.util.Collections.emptyList();
6000       private void ensureStoreCompleteSequenceIdIsMutable() {
6001         if (!((bitField0_ & 0x00020000) == 0x00020000)) {
6002           storeCompleteSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>(storeCompleteSequenceId_);
6003           bitField0_ |= 0x00020000;
6004          }
6005       }
6006 
6007       private com.google.protobuf.RepeatedFieldBuilder<
6008           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeCompleteSequenceIdBuilder_;
6009 
6010       /**
6011        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6012        *
6013        * <pre>
6014        ** the most recent sequence Id of store from cache flush 
6015        * </pre>
6016        */
6017       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreCompleteSequenceIdList() {
6018         if (storeCompleteSequenceIdBuilder_ == null) {
6019           return java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
6020         } else {
6021           return storeCompleteSequenceIdBuilder_.getMessageList();
6022         }
6023       }
6024       /**
6025        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6026        *
6027        * <pre>
6028        ** the most recent sequence Id of store from cache flush 
6029        * </pre>
6030        */
6031       public int getStoreCompleteSequenceIdCount() {
6032         if (storeCompleteSequenceIdBuilder_ == null) {
6033           return storeCompleteSequenceId_.size();
6034         } else {
6035           return storeCompleteSequenceIdBuilder_.getCount();
6036         }
6037       }
6038       /**
6039        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6040        *
6041        * <pre>
6042        ** the most recent sequence Id of store from cache flush 
6043        * </pre>
6044        */
6045       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) {
6046         if (storeCompleteSequenceIdBuilder_ == null) {
6047           return storeCompleteSequenceId_.get(index);
6048         } else {
6049           return storeCompleteSequenceIdBuilder_.getMessage(index);
6050         }
6051       }
6052       /**
6053        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6054        *
6055        * <pre>
6056        ** the most recent sequence Id of store from cache flush 
6057        * </pre>
6058        */
6059       public Builder setStoreCompleteSequenceId(
6060           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
6061         if (storeCompleteSequenceIdBuilder_ == null) {
6062           if (value == null) {
6063             throw new NullPointerException();
6064           }
6065           ensureStoreCompleteSequenceIdIsMutable();
6066           storeCompleteSequenceId_.set(index, value);
6067           onChanged();
6068         } else {
6069           storeCompleteSequenceIdBuilder_.setMessage(index, value);
6070         }
6071         return this;
6072       }
6073       /**
6074        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6075        *
6076        * <pre>
6077        ** the most recent sequence Id of store from cache flush 
6078        * </pre>
6079        */
6080       public Builder setStoreCompleteSequenceId(
6081           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
6082         if (storeCompleteSequenceIdBuilder_ == null) {
6083           ensureStoreCompleteSequenceIdIsMutable();
6084           storeCompleteSequenceId_.set(index, builderForValue.build());
6085           onChanged();
6086         } else {
6087           storeCompleteSequenceIdBuilder_.setMessage(index, builderForValue.build());
6088         }
6089         return this;
6090       }
6091       /**
6092        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6093        *
6094        * <pre>
6095        ** the most recent sequence Id of store from cache flush 
6096        * </pre>
6097        */
6098       public Builder addStoreCompleteSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
6099         if (storeCompleteSequenceIdBuilder_ == null) {
6100           if (value == null) {
6101             throw new NullPointerException();
6102           }
6103           ensureStoreCompleteSequenceIdIsMutable();
6104           storeCompleteSequenceId_.add(value);
6105           onChanged();
6106         } else {
6107           storeCompleteSequenceIdBuilder_.addMessage(value);
6108         }
6109         return this;
6110       }
6111       /**
6112        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6113        *
6114        * <pre>
6115        ** the most recent sequence Id of store from cache flush 
6116        * </pre>
6117        */
6118       public Builder addStoreCompleteSequenceId(
6119           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
6120         if (storeCompleteSequenceIdBuilder_ == null) {
6121           if (value == null) {
6122             throw new NullPointerException();
6123           }
6124           ensureStoreCompleteSequenceIdIsMutable();
6125           storeCompleteSequenceId_.add(index, value);
6126           onChanged();
6127         } else {
6128           storeCompleteSequenceIdBuilder_.addMessage(index, value);
6129         }
6130         return this;
6131       }
6132       /**
6133        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6134        *
6135        * <pre>
6136        ** the most recent sequence Id of store from cache flush 
6137        * </pre>
6138        */
6139       public Builder addStoreCompleteSequenceId(
6140           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
6141         if (storeCompleteSequenceIdBuilder_ == null) {
6142           ensureStoreCompleteSequenceIdIsMutable();
6143           storeCompleteSequenceId_.add(builderForValue.build());
6144           onChanged();
6145         } else {
6146           storeCompleteSequenceIdBuilder_.addMessage(builderForValue.build());
6147         }
6148         return this;
6149       }
6150       /**
6151        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6152        *
6153        * <pre>
6154        ** the most recent sequence Id of store from cache flush 
6155        * </pre>
6156        */
6157       public Builder addStoreCompleteSequenceId(
6158           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
6159         if (storeCompleteSequenceIdBuilder_ == null) {
6160           ensureStoreCompleteSequenceIdIsMutable();
6161           storeCompleteSequenceId_.add(index, builderForValue.build());
6162           onChanged();
6163         } else {
6164           storeCompleteSequenceIdBuilder_.addMessage(index, builderForValue.build());
6165         }
6166         return this;
6167       }
6168       /**
6169        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6170        *
6171        * <pre>
6172        ** the most recent sequence Id of store from cache flush 
6173        * </pre>
6174        */
6175       public Builder addAllStoreCompleteSequenceId(
6176           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> values) {
6177         if (storeCompleteSequenceIdBuilder_ == null) {
6178           ensureStoreCompleteSequenceIdIsMutable();
6179           super.addAll(values, storeCompleteSequenceId_);
6180           onChanged();
6181         } else {
6182           storeCompleteSequenceIdBuilder_.addAllMessages(values);
6183         }
6184         return this;
6185       }
6186       /**
6187        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6188        *
6189        * <pre>
6190        ** the most recent sequence Id of store from cache flush 
6191        * </pre>
6192        */
6193       public Builder clearStoreCompleteSequenceId() {
6194         if (storeCompleteSequenceIdBuilder_ == null) {
6195           storeCompleteSequenceId_ = java.util.Collections.emptyList();
6196           bitField0_ = (bitField0_ & ~0x00020000);
6197           onChanged();
6198         } else {
6199           storeCompleteSequenceIdBuilder_.clear();
6200         }
6201         return this;
6202       }
6203       /**
6204        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6205        *
6206        * <pre>
6207        ** the most recent sequence Id of store from cache flush 
6208        * </pre>
6209        */
6210       public Builder removeStoreCompleteSequenceId(int index) {
6211         if (storeCompleteSequenceIdBuilder_ == null) {
6212           ensureStoreCompleteSequenceIdIsMutable();
6213           storeCompleteSequenceId_.remove(index);
6214           onChanged();
6215         } else {
6216           storeCompleteSequenceIdBuilder_.remove(index);
6217         }
6218         return this;
6219       }
6220       /**
6221        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6222        *
6223        * <pre>
6224        ** the most recent sequence Id of store from cache flush 
6225        * </pre>
6226        */
6227       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreCompleteSequenceIdBuilder(
6228           int index) {
6229         return getStoreCompleteSequenceIdFieldBuilder().getBuilder(index);
6230       }
6231       /**
6232        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6233        *
6234        * <pre>
6235        ** the most recent sequence Id of store from cache flush 
6236        * </pre>
6237        */
6238       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
6239           int index) {
6240         if (storeCompleteSequenceIdBuilder_ == null) {
6241           return storeCompleteSequenceId_.get(index);  } else {
6242           return storeCompleteSequenceIdBuilder_.getMessageOrBuilder(index);
6243         }
6244       }
6245       /**
6246        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6247        *
6248        * <pre>
6249        ** the most recent sequence Id of store from cache flush 
6250        * </pre>
6251        */
6252       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
6253            getStoreCompleteSequenceIdOrBuilderList() {
6254         if (storeCompleteSequenceIdBuilder_ != null) {
6255           return storeCompleteSequenceIdBuilder_.getMessageOrBuilderList();
6256         } else {
6257           return java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
6258         }
6259       }
6260       /**
6261        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6262        *
6263        * <pre>
6264        ** the most recent sequence Id of store from cache flush 
6265        * </pre>
6266        */
6267       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder() {
6268         return getStoreCompleteSequenceIdFieldBuilder().addBuilder(
6269             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
6270       }
6271       /**
6272        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6273        *
6274        * <pre>
6275        ** the most recent sequence Id of store from cache flush 
6276        * </pre>
6277        */
6278       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder(
6279           int index) {
6280         return getStoreCompleteSequenceIdFieldBuilder().addBuilder(
6281             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
6282       }
6283       /**
6284        * <code>repeated .hbase.pb.StoreSequenceId store_complete_sequence_id = 18;</code>
6285        *
6286        * <pre>
6287        ** the most recent sequence Id of store from cache flush 
6288        * </pre>
6289        */
6290       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder> 
6291            getStoreCompleteSequenceIdBuilderList() {
6292         return getStoreCompleteSequenceIdFieldBuilder().getBuilderList();
6293       }
6294       private com.google.protobuf.RepeatedFieldBuilder<
6295           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> 
6296           getStoreCompleteSequenceIdFieldBuilder() {
6297         if (storeCompleteSequenceIdBuilder_ == null) {
6298           storeCompleteSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
6299               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>(
6300                   storeCompleteSequenceId_,
6301                   ((bitField0_ & 0x00020000) == 0x00020000),
6302                   getParentForChildren(),
6303                   isClean());
6304           storeCompleteSequenceId_ = null;
6305         }
6306         return storeCompleteSequenceIdBuilder_;
6307       }
6308 
6309       // @@protoc_insertion_point(builder_scope:hbase.pb.RegionLoad)
6310     }
6311 
6312     static {
6313       defaultInstance = new RegionLoad(true);
6314       defaultInstance.initFields();
6315     }
6316 
6317     // @@protoc_insertion_point(class_scope:hbase.pb.RegionLoad)
6318   }
6319 
6320   public interface ReplicationLoadSinkOrBuilder
6321       extends com.google.protobuf.MessageOrBuilder {
6322 
6323     // required uint64 ageOfLastAppliedOp = 1;
6324     /**
6325      * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6326      */
6327     boolean hasAgeOfLastAppliedOp();
6328     /**
6329      * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6330      */
6331     long getAgeOfLastAppliedOp();
6332 
6333     // required uint64 timeStampsOfLastAppliedOp = 2;
6334     /**
6335      * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6336      */
6337     boolean hasTimeStampsOfLastAppliedOp();
6338     /**
6339      * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6340      */
6341     long getTimeStampsOfLastAppliedOp();
6342   }
6343   /**
6344    * Protobuf type {@code hbase.pb.ReplicationLoadSink}
6345    */
6346   public static final class ReplicationLoadSink extends
6347       com.google.protobuf.GeneratedMessage
6348       implements ReplicationLoadSinkOrBuilder {
6349     // Use ReplicationLoadSink.newBuilder() to construct.
6350     private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6351       super(builder);
6352       this.unknownFields = builder.getUnknownFields();
6353     }
6354     private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6355 
6356     private static final ReplicationLoadSink defaultInstance;
6357     public static ReplicationLoadSink getDefaultInstance() {
6358       return defaultInstance;
6359     }
6360 
6361     public ReplicationLoadSink getDefaultInstanceForType() {
6362       return defaultInstance;
6363     }
6364 
6365     private final com.google.protobuf.UnknownFieldSet unknownFields;
6366     @java.lang.Override
6367     public final com.google.protobuf.UnknownFieldSet
6368         getUnknownFields() {
6369       return this.unknownFields;
6370     }
6371     private ReplicationLoadSink(
6372         com.google.protobuf.CodedInputStream input,
6373         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6374         throws com.google.protobuf.InvalidProtocolBufferException {
6375       initFields();
6376       int mutable_bitField0_ = 0;
6377       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6378           com.google.protobuf.UnknownFieldSet.newBuilder();
6379       try {
6380         boolean done = false;
6381         while (!done) {
6382           int tag = input.readTag();
6383           switch (tag) {
6384             case 0:
6385               done = true;
6386               break;
6387             default: {
6388               if (!parseUnknownField(input, unknownFields,
6389                                      extensionRegistry, tag)) {
6390                 done = true;
6391               }
6392               break;
6393             }
6394             case 8: {
6395               bitField0_ |= 0x00000001;
6396               ageOfLastAppliedOp_ = input.readUInt64();
6397               break;
6398             }
6399             case 16: {
6400               bitField0_ |= 0x00000002;
6401               timeStampsOfLastAppliedOp_ = input.readUInt64();
6402               break;
6403             }
6404           }
6405         }
6406       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6407         throw e.setUnfinishedMessage(this);
6408       } catch (java.io.IOException e) {
6409         throw new com.google.protobuf.InvalidProtocolBufferException(
6410             e.getMessage()).setUnfinishedMessage(this);
6411       } finally {
6412         this.unknownFields = unknownFields.build();
6413         makeExtensionsImmutable();
6414       }
6415     }
6416     public static final com.google.protobuf.Descriptors.Descriptor
6417         getDescriptor() {
6418       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSink_descriptor;
6419     }
6420 
6421     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6422         internalGetFieldAccessorTable() {
6423       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable
6424           .ensureFieldAccessorsInitialized(
6425               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
6426     }
6427 
6428     public static com.google.protobuf.Parser<ReplicationLoadSink> PARSER =
6429         new com.google.protobuf.AbstractParser<ReplicationLoadSink>() {
6430       public ReplicationLoadSink parsePartialFrom(
6431           com.google.protobuf.CodedInputStream input,
6432           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6433           throws com.google.protobuf.InvalidProtocolBufferException {
6434         return new ReplicationLoadSink(input, extensionRegistry);
6435       }
6436     };
6437 
6438     @java.lang.Override
6439     public com.google.protobuf.Parser<ReplicationLoadSink> getParserForType() {
6440       return PARSER;
6441     }
6442 
6443     private int bitField0_;
6444     // required uint64 ageOfLastAppliedOp = 1;
6445     public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1;
6446     private long ageOfLastAppliedOp_;
6447     /**
6448      * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6449      */
6450     public boolean hasAgeOfLastAppliedOp() {
6451       return ((bitField0_ & 0x00000001) == 0x00000001);
6452     }
6453     /**
6454      * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6455      */
6456     public long getAgeOfLastAppliedOp() {
6457       return ageOfLastAppliedOp_;
6458     }
6459 
6460     // required uint64 timeStampsOfLastAppliedOp = 2;
6461     public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2;
6462     private long timeStampsOfLastAppliedOp_;
6463     /**
6464      * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6465      */
6466     public boolean hasTimeStampsOfLastAppliedOp() {
6467       return ((bitField0_ & 0x00000002) == 0x00000002);
6468     }
6469     /**
6470      * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6471      */
6472     public long getTimeStampsOfLastAppliedOp() {
6473       return timeStampsOfLastAppliedOp_;
6474     }
6475 
6476     private void initFields() {
6477       ageOfLastAppliedOp_ = 0L;
6478       timeStampsOfLastAppliedOp_ = 0L;
6479     }
6480     private byte memoizedIsInitialized = -1;
6481     public final boolean isInitialized() {
6482       byte isInitialized = memoizedIsInitialized;
6483       if (isInitialized != -1) return isInitialized == 1;
6484 
6485       if (!hasAgeOfLastAppliedOp()) {
6486         memoizedIsInitialized = 0;
6487         return false;
6488       }
6489       if (!hasTimeStampsOfLastAppliedOp()) {
6490         memoizedIsInitialized = 0;
6491         return false;
6492       }
6493       memoizedIsInitialized = 1;
6494       return true;
6495     }
6496 
6497     public void writeTo(com.google.protobuf.CodedOutputStream output)
6498                         throws java.io.IOException {
6499       getSerializedSize();
6500       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6501         output.writeUInt64(1, ageOfLastAppliedOp_);
6502       }
6503       if (((bitField0_ & 0x00000002) == 0x00000002)) {
6504         output.writeUInt64(2, timeStampsOfLastAppliedOp_);
6505       }
6506       getUnknownFields().writeTo(output);
6507     }
6508 
6509     private int memoizedSerializedSize = -1;
6510     public int getSerializedSize() {
6511       int size = memoizedSerializedSize;
6512       if (size != -1) return size;
6513 
6514       size = 0;
6515       if (((bitField0_ & 0x00000001) == 0x00000001)) {
6516         size += com.google.protobuf.CodedOutputStream
6517           .computeUInt64Size(1, ageOfLastAppliedOp_);
6518       }
6519       if (((bitField0_ & 0x00000002) == 0x00000002)) {
6520         size += com.google.protobuf.CodedOutputStream
6521           .computeUInt64Size(2, timeStampsOfLastAppliedOp_);
6522       }
6523       size += getUnknownFields().getSerializedSize();
6524       memoizedSerializedSize = size;
6525       return size;
6526     }
6527 
6528     private static final long serialVersionUID = 0L;
6529     @java.lang.Override
6530     protected java.lang.Object writeReplace()
6531         throws java.io.ObjectStreamException {
6532       return super.writeReplace();
6533     }
6534 
6535     @java.lang.Override
6536     public boolean equals(final java.lang.Object obj) {
6537       if (obj == this) {
6538        return true;
6539       }
6540       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) {
6541         return super.equals(obj);
6542       }
6543       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj;
6544 
6545       boolean result = true;
6546       result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp());
6547       if (hasAgeOfLastAppliedOp()) {
6548         result = result && (getAgeOfLastAppliedOp()
6549             == other.getAgeOfLastAppliedOp());
6550       }
6551       result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp());
6552       if (hasTimeStampsOfLastAppliedOp()) {
6553         result = result && (getTimeStampsOfLastAppliedOp()
6554             == other.getTimeStampsOfLastAppliedOp());
6555       }
6556       result = result &&
6557           getUnknownFields().equals(other.getUnknownFields());
6558       return result;
6559     }
6560 
6561     private int memoizedHashCode = 0;
6562     @java.lang.Override
6563     public int hashCode() {
6564       if (memoizedHashCode != 0) {
6565         return memoizedHashCode;
6566       }
6567       int hash = 41;
6568       hash = (19 * hash) + getDescriptorForType().hashCode();
6569       if (hasAgeOfLastAppliedOp()) {
6570         hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER;
6571         hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp());
6572       }
6573       if (hasTimeStampsOfLastAppliedOp()) {
6574         hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER;
6575         hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp());
6576       }
6577       hash = (29 * hash) + getUnknownFields().hashCode();
6578       memoizedHashCode = hash;
6579       return hash;
6580     }
6581 
6582     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6583         com.google.protobuf.ByteString data)
6584         throws com.google.protobuf.InvalidProtocolBufferException {
6585       return PARSER.parseFrom(data);
6586     }
6587     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6588         com.google.protobuf.ByteString data,
6589         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6590         throws com.google.protobuf.InvalidProtocolBufferException {
6591       return PARSER.parseFrom(data, extensionRegistry);
6592     }
6593     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data)
6594         throws com.google.protobuf.InvalidProtocolBufferException {
6595       return PARSER.parseFrom(data);
6596     }
6597     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6598         byte[] data,
6599         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6600         throws com.google.protobuf.InvalidProtocolBufferException {
6601       return PARSER.parseFrom(data, extensionRegistry);
6602     }
6603     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input)
6604         throws java.io.IOException {
6605       return PARSER.parseFrom(input);
6606     }
6607     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6608         java.io.InputStream input,
6609         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6610         throws java.io.IOException {
6611       return PARSER.parseFrom(input, extensionRegistry);
6612     }
6613     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input)
6614         throws java.io.IOException {
6615       return PARSER.parseDelimitedFrom(input);
6616     }
6617     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(
6618         java.io.InputStream input,
6619         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6620         throws java.io.IOException {
6621       return PARSER.parseDelimitedFrom(input, extensionRegistry);
6622     }
6623     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6624         com.google.protobuf.CodedInputStream input)
6625         throws java.io.IOException {
6626       return PARSER.parseFrom(input);
6627     }
6628     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
6629         com.google.protobuf.CodedInputStream input,
6630         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6631         throws java.io.IOException {
6632       return PARSER.parseFrom(input, extensionRegistry);
6633     }
6634 
6635     public static Builder newBuilder() { return Builder.create(); }
6636     public Builder newBuilderForType() { return newBuilder(); }
6637     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) {
6638       return newBuilder().mergeFrom(prototype);
6639     }
6640     public Builder toBuilder() { return newBuilder(this); }
6641 
6642     @java.lang.Override
6643     protected Builder newBuilderForType(
6644         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6645       Builder builder = new Builder(parent);
6646       return builder;
6647     }
6648     /**
6649      * Protobuf type {@code hbase.pb.ReplicationLoadSink}
6650      */
6651     public static final class Builder extends
6652         com.google.protobuf.GeneratedMessage.Builder<Builder>
6653        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder {
6654       public static final com.google.protobuf.Descriptors.Descriptor
6655           getDescriptor() {
6656         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSink_descriptor;
6657       }
6658 
6659       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6660           internalGetFieldAccessorTable() {
6661         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable
6662             .ensureFieldAccessorsInitialized(
6663                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
6664       }
6665 
6666       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder()
6667       private Builder() {
6668         maybeForceBuilderInitialization();
6669       }
6670 
6671       private Builder(
6672           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6673         super(parent);
6674         maybeForceBuilderInitialization();
6675       }
6676       private void maybeForceBuilderInitialization() {
6677         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6678         }
6679       }
6680       private static Builder create() {
6681         return new Builder();
6682       }
6683 
6684       public Builder clear() {
6685         super.clear();
6686         ageOfLastAppliedOp_ = 0L;
6687         bitField0_ = (bitField0_ & ~0x00000001);
6688         timeStampsOfLastAppliedOp_ = 0L;
6689         bitField0_ = (bitField0_ & ~0x00000002);
6690         return this;
6691       }
6692 
6693       public Builder clone() {
6694         return create().mergeFrom(buildPartial());
6695       }
6696 
6697       public com.google.protobuf.Descriptors.Descriptor
6698           getDescriptorForType() {
6699         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSink_descriptor;
6700       }
6701 
6702       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() {
6703         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
6704       }
6705 
6706       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() {
6707         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial();
6708         if (!result.isInitialized()) {
6709           throw newUninitializedMessageException(result);
6710         }
6711         return result;
6712       }
6713 
6714       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() {
6715         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this);
6716         int from_bitField0_ = bitField0_;
6717         int to_bitField0_ = 0;
6718         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6719           to_bitField0_ |= 0x00000001;
6720         }
6721         result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_;
6722         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6723           to_bitField0_ |= 0x00000002;
6724         }
6725         result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_;
6726         result.bitField0_ = to_bitField0_;
6727         onBuilt();
6728         return result;
6729       }
6730 
6731       public Builder mergeFrom(com.google.protobuf.Message other) {
6732         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) {
6733           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other);
6734         } else {
6735           super.mergeFrom(other);
6736           return this;
6737         }
6738       }
6739 
6740       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) {
6741         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this;
6742         if (other.hasAgeOfLastAppliedOp()) {
6743           setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp());
6744         }
6745         if (other.hasTimeStampsOfLastAppliedOp()) {
6746           setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp());
6747         }
6748         this.mergeUnknownFields(other.getUnknownFields());
6749         return this;
6750       }
6751 
6752       public final boolean isInitialized() {
6753         if (!hasAgeOfLastAppliedOp()) {
6754           
6755           return false;
6756         }
6757         if (!hasTimeStampsOfLastAppliedOp()) {
6758           
6759           return false;
6760         }
6761         return true;
6762       }
6763 
6764       public Builder mergeFrom(
6765           com.google.protobuf.CodedInputStream input,
6766           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6767           throws java.io.IOException {
6768         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null;
6769         try {
6770           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6771         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6772           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage();
6773           throw e;
6774         } finally {
6775           if (parsedMessage != null) {
6776             mergeFrom(parsedMessage);
6777           }
6778         }
6779         return this;
6780       }
6781       private int bitField0_;
6782 
6783       // required uint64 ageOfLastAppliedOp = 1;
6784       private long ageOfLastAppliedOp_ ;
6785       /**
6786        * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6787        */
6788       public boolean hasAgeOfLastAppliedOp() {
6789         return ((bitField0_ & 0x00000001) == 0x00000001);
6790       }
6791       /**
6792        * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6793        */
6794       public long getAgeOfLastAppliedOp() {
6795         return ageOfLastAppliedOp_;
6796       }
6797       /**
6798        * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6799        */
6800       public Builder setAgeOfLastAppliedOp(long value) {
6801         bitField0_ |= 0x00000001;
6802         ageOfLastAppliedOp_ = value;
6803         onChanged();
6804         return this;
6805       }
6806       /**
6807        * <code>required uint64 ageOfLastAppliedOp = 1;</code>
6808        */
6809       public Builder clearAgeOfLastAppliedOp() {
6810         bitField0_ = (bitField0_ & ~0x00000001);
6811         ageOfLastAppliedOp_ = 0L;
6812         onChanged();
6813         return this;
6814       }
6815 
6816       // required uint64 timeStampsOfLastAppliedOp = 2;
6817       private long timeStampsOfLastAppliedOp_ ;
6818       /**
6819        * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6820        */
6821       public boolean hasTimeStampsOfLastAppliedOp() {
6822         return ((bitField0_ & 0x00000002) == 0x00000002);
6823       }
6824       /**
6825        * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6826        */
6827       public long getTimeStampsOfLastAppliedOp() {
6828         return timeStampsOfLastAppliedOp_;
6829       }
6830       /**
6831        * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6832        */
6833       public Builder setTimeStampsOfLastAppliedOp(long value) {
6834         bitField0_ |= 0x00000002;
6835         timeStampsOfLastAppliedOp_ = value;
6836         onChanged();
6837         return this;
6838       }
6839       /**
6840        * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
6841        */
6842       public Builder clearTimeStampsOfLastAppliedOp() {
6843         bitField0_ = (bitField0_ & ~0x00000002);
6844         timeStampsOfLastAppliedOp_ = 0L;
6845         onChanged();
6846         return this;
6847       }
6848 
6849       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationLoadSink)
6850     }
6851 
6852     static {
6853       defaultInstance = new ReplicationLoadSink(true);
6854       defaultInstance.initFields();
6855     }
6856 
6857     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationLoadSink)
6858   }
6859 
6860   public interface ReplicationLoadSourceOrBuilder
6861       extends com.google.protobuf.MessageOrBuilder {
6862 
6863     // required string peerID = 1;
6864     /**
6865      * <code>required string peerID = 1;</code>
6866      */
6867     boolean hasPeerID();
6868     /**
6869      * <code>required string peerID = 1;</code>
6870      */
6871     java.lang.String getPeerID();
6872     /**
6873      * <code>required string peerID = 1;</code>
6874      */
6875     com.google.protobuf.ByteString
6876         getPeerIDBytes();
6877 
6878     // required uint64 ageOfLastShippedOp = 2;
6879     /**
6880      * <code>required uint64 ageOfLastShippedOp = 2;</code>
6881      */
6882     boolean hasAgeOfLastShippedOp();
6883     /**
6884      * <code>required uint64 ageOfLastShippedOp = 2;</code>
6885      */
6886     long getAgeOfLastShippedOp();
6887 
6888     // required uint32 sizeOfLogQueue = 3;
6889     /**
6890      * <code>required uint32 sizeOfLogQueue = 3;</code>
6891      */
6892     boolean hasSizeOfLogQueue();
6893     /**
6894      * <code>required uint32 sizeOfLogQueue = 3;</code>
6895      */
6896     int getSizeOfLogQueue();
6897 
6898     // required uint64 timeStampOfLastShippedOp = 4;
6899     /**
6900      * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
6901      */
6902     boolean hasTimeStampOfLastShippedOp();
6903     /**
6904      * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
6905      */
6906     long getTimeStampOfLastShippedOp();
6907 
6908     // required uint64 replicationLag = 5;
6909     /**
6910      * <code>required uint64 replicationLag = 5;</code>
6911      */
6912     boolean hasReplicationLag();
6913     /**
6914      * <code>required uint64 replicationLag = 5;</code>
6915      */
6916     long getReplicationLag();
6917   }
6918   /**
6919    * Protobuf type {@code hbase.pb.ReplicationLoadSource}
6920    */
6921   public static final class ReplicationLoadSource extends
6922       com.google.protobuf.GeneratedMessage
6923       implements ReplicationLoadSourceOrBuilder {
6924     // Use ReplicationLoadSource.newBuilder() to construct.
6925     private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6926       super(builder);
6927       this.unknownFields = builder.getUnknownFields();
6928     }
6929     private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6930 
6931     private static final ReplicationLoadSource defaultInstance;
6932     public static ReplicationLoadSource getDefaultInstance() {
6933       return defaultInstance;
6934     }
6935 
6936     public ReplicationLoadSource getDefaultInstanceForType() {
6937       return defaultInstance;
6938     }
6939 
6940     private final com.google.protobuf.UnknownFieldSet unknownFields;
6941     @java.lang.Override
6942     public final com.google.protobuf.UnknownFieldSet
6943         getUnknownFields() {
6944       return this.unknownFields;
6945     }
6946     private ReplicationLoadSource(
6947         com.google.protobuf.CodedInputStream input,
6948         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6949         throws com.google.protobuf.InvalidProtocolBufferException {
6950       initFields();
6951       int mutable_bitField0_ = 0;
6952       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6953           com.google.protobuf.UnknownFieldSet.newBuilder();
6954       try {
6955         boolean done = false;
6956         while (!done) {
6957           int tag = input.readTag();
6958           switch (tag) {
6959             case 0:
6960               done = true;
6961               break;
6962             default: {
6963               if (!parseUnknownField(input, unknownFields,
6964                                      extensionRegistry, tag)) {
6965                 done = true;
6966               }
6967               break;
6968             }
6969             case 10: {
6970               bitField0_ |= 0x00000001;
6971               peerID_ = input.readBytes();
6972               break;
6973             }
6974             case 16: {
6975               bitField0_ |= 0x00000002;
6976               ageOfLastShippedOp_ = input.readUInt64();
6977               break;
6978             }
6979             case 24: {
6980               bitField0_ |= 0x00000004;
6981               sizeOfLogQueue_ = input.readUInt32();
6982               break;
6983             }
6984             case 32: {
6985               bitField0_ |= 0x00000008;
6986               timeStampOfLastShippedOp_ = input.readUInt64();
6987               break;
6988             }
6989             case 40: {
6990               bitField0_ |= 0x00000010;
6991               replicationLag_ = input.readUInt64();
6992               break;
6993             }
6994           }
6995         }
6996       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6997         throw e.setUnfinishedMessage(this);
6998       } catch (java.io.IOException e) {
6999         throw new com.google.protobuf.InvalidProtocolBufferException(
7000             e.getMessage()).setUnfinishedMessage(this);
7001       } finally {
7002         this.unknownFields = unknownFields.build();
7003         makeExtensionsImmutable();
7004       }
7005     }
7006     public static final com.google.protobuf.Descriptors.Descriptor
7007         getDescriptor() {
7008       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSource_descriptor;
7009     }
7010 
7011     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7012         internalGetFieldAccessorTable() {
7013       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSource_fieldAccessorTable
7014           .ensureFieldAccessorsInitialized(
7015               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
7016     }
7017 
7018     public static com.google.protobuf.Parser<ReplicationLoadSource> PARSER =
7019         new com.google.protobuf.AbstractParser<ReplicationLoadSource>() {
7020       public ReplicationLoadSource parsePartialFrom(
7021           com.google.protobuf.CodedInputStream input,
7022           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7023           throws com.google.protobuf.InvalidProtocolBufferException {
7024         return new ReplicationLoadSource(input, extensionRegistry);
7025       }
7026     };
7027 
7028     @java.lang.Override
7029     public com.google.protobuf.Parser<ReplicationLoadSource> getParserForType() {
7030       return PARSER;
7031     }
7032 
7033     private int bitField0_;
7034     // required string peerID = 1;
7035     public static final int PEERID_FIELD_NUMBER = 1;
7036     private java.lang.Object peerID_;
7037     /**
7038      * <code>required string peerID = 1;</code>
7039      */
7040     public boolean hasPeerID() {
7041       return ((bitField0_ & 0x00000001) == 0x00000001);
7042     }
7043     /**
7044      * <code>required string peerID = 1;</code>
7045      */
7046     public java.lang.String getPeerID() {
7047       java.lang.Object ref = peerID_;
7048       if (ref instanceof java.lang.String) {
7049         return (java.lang.String) ref;
7050       } else {
7051         com.google.protobuf.ByteString bs = 
7052             (com.google.protobuf.ByteString) ref;
7053         java.lang.String s = bs.toStringUtf8();
7054         if (bs.isValidUtf8()) {
7055           peerID_ = s;
7056         }
7057         return s;
7058       }
7059     }
7060     /**
7061      * <code>required string peerID = 1;</code>
7062      */
7063     public com.google.protobuf.ByteString
7064         getPeerIDBytes() {
7065       java.lang.Object ref = peerID_;
7066       if (ref instanceof java.lang.String) {
7067         com.google.protobuf.ByteString b = 
7068             com.google.protobuf.ByteString.copyFromUtf8(
7069                 (java.lang.String) ref);
7070         peerID_ = b;
7071         return b;
7072       } else {
7073         return (com.google.protobuf.ByteString) ref;
7074       }
7075     }
7076 
7077     // required uint64 ageOfLastShippedOp = 2;
7078     public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2;
7079     private long ageOfLastShippedOp_;
7080     /**
7081      * <code>required uint64 ageOfLastShippedOp = 2;</code>
7082      */
7083     public boolean hasAgeOfLastShippedOp() {
7084       return ((bitField0_ & 0x00000002) == 0x00000002);
7085     }
7086     /**
7087      * <code>required uint64 ageOfLastShippedOp = 2;</code>
7088      */
7089     public long getAgeOfLastShippedOp() {
7090       return ageOfLastShippedOp_;
7091     }
7092 
7093     // required uint32 sizeOfLogQueue = 3;
7094     public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3;
7095     private int sizeOfLogQueue_;
7096     /**
7097      * <code>required uint32 sizeOfLogQueue = 3;</code>
7098      */
7099     public boolean hasSizeOfLogQueue() {
7100       return ((bitField0_ & 0x00000004) == 0x00000004);
7101     }
7102     /**
7103      * <code>required uint32 sizeOfLogQueue = 3;</code>
7104      */
7105     public int getSizeOfLogQueue() {
7106       return sizeOfLogQueue_;
7107     }
7108 
7109     // required uint64 timeStampOfLastShippedOp = 4;
7110     public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4;
7111     private long timeStampOfLastShippedOp_;
7112     /**
7113      * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7114      */
7115     public boolean hasTimeStampOfLastShippedOp() {
7116       return ((bitField0_ & 0x00000008) == 0x00000008);
7117     }
7118     /**
7119      * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7120      */
7121     public long getTimeStampOfLastShippedOp() {
7122       return timeStampOfLastShippedOp_;
7123     }
7124 
7125     // required uint64 replicationLag = 5;
7126     public static final int REPLICATIONLAG_FIELD_NUMBER = 5;
7127     private long replicationLag_;
7128     /**
7129      * <code>required uint64 replicationLag = 5;</code>
7130      */
7131     public boolean hasReplicationLag() {
7132       return ((bitField0_ & 0x00000010) == 0x00000010);
7133     }
7134     /**
7135      * <code>required uint64 replicationLag = 5;</code>
7136      */
7137     public long getReplicationLag() {
7138       return replicationLag_;
7139     }
7140 
7141     private void initFields() {
7142       peerID_ = "";
7143       ageOfLastShippedOp_ = 0L;
7144       sizeOfLogQueue_ = 0;
7145       timeStampOfLastShippedOp_ = 0L;
7146       replicationLag_ = 0L;
7147     }
7148     private byte memoizedIsInitialized = -1;
7149     public final boolean isInitialized() {
7150       byte isInitialized = memoizedIsInitialized;
7151       if (isInitialized != -1) return isInitialized == 1;
7152 
7153       if (!hasPeerID()) {
7154         memoizedIsInitialized = 0;
7155         return false;
7156       }
7157       if (!hasAgeOfLastShippedOp()) {
7158         memoizedIsInitialized = 0;
7159         return false;
7160       }
7161       if (!hasSizeOfLogQueue()) {
7162         memoizedIsInitialized = 0;
7163         return false;
7164       }
7165       if (!hasTimeStampOfLastShippedOp()) {
7166         memoizedIsInitialized = 0;
7167         return false;
7168       }
7169       if (!hasReplicationLag()) {
7170         memoizedIsInitialized = 0;
7171         return false;
7172       }
7173       memoizedIsInitialized = 1;
7174       return true;
7175     }
7176 
7177     public void writeTo(com.google.protobuf.CodedOutputStream output)
7178                         throws java.io.IOException {
7179       getSerializedSize();
7180       if (((bitField0_ & 0x00000001) == 0x00000001)) {
7181         output.writeBytes(1, getPeerIDBytes());
7182       }
7183       if (((bitField0_ & 0x00000002) == 0x00000002)) {
7184         output.writeUInt64(2, ageOfLastShippedOp_);
7185       }
7186       if (((bitField0_ & 0x00000004) == 0x00000004)) {
7187         output.writeUInt32(3, sizeOfLogQueue_);
7188       }
7189       if (((bitField0_ & 0x00000008) == 0x00000008)) {
7190         output.writeUInt64(4, timeStampOfLastShippedOp_);
7191       }
7192       if (((bitField0_ & 0x00000010) == 0x00000010)) {
7193         output.writeUInt64(5, replicationLag_);
7194       }
7195       getUnknownFields().writeTo(output);
7196     }
7197 
7198     private int memoizedSerializedSize = -1;
7199     public int getSerializedSize() {
7200       int size = memoizedSerializedSize;
7201       if (size != -1) return size;
7202 
7203       size = 0;
7204       if (((bitField0_ & 0x00000001) == 0x00000001)) {
7205         size += com.google.protobuf.CodedOutputStream
7206           .computeBytesSize(1, getPeerIDBytes());
7207       }
7208       if (((bitField0_ & 0x00000002) == 0x00000002)) {
7209         size += com.google.protobuf.CodedOutputStream
7210           .computeUInt64Size(2, ageOfLastShippedOp_);
7211       }
7212       if (((bitField0_ & 0x00000004) == 0x00000004)) {
7213         size += com.google.protobuf.CodedOutputStream
7214           .computeUInt32Size(3, sizeOfLogQueue_);
7215       }
7216       if (((bitField0_ & 0x00000008) == 0x00000008)) {
7217         size += com.google.protobuf.CodedOutputStream
7218           .computeUInt64Size(4, timeStampOfLastShippedOp_);
7219       }
7220       if (((bitField0_ & 0x00000010) == 0x00000010)) {
7221         size += com.google.protobuf.CodedOutputStream
7222           .computeUInt64Size(5, replicationLag_);
7223       }
7224       size += getUnknownFields().getSerializedSize();
7225       memoizedSerializedSize = size;
7226       return size;
7227     }
7228 
7229     private static final long serialVersionUID = 0L;
7230     @java.lang.Override
7231     protected java.lang.Object writeReplace()
7232         throws java.io.ObjectStreamException {
7233       return super.writeReplace();
7234     }
7235 
7236     @java.lang.Override
7237     public boolean equals(final java.lang.Object obj) {
7238       if (obj == this) {
7239        return true;
7240       }
7241       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) {
7242         return super.equals(obj);
7243       }
7244       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj;
7245 
7246       boolean result = true;
7247       result = result && (hasPeerID() == other.hasPeerID());
7248       if (hasPeerID()) {
7249         result = result && getPeerID()
7250             .equals(other.getPeerID());
7251       }
7252       result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp());
7253       if (hasAgeOfLastShippedOp()) {
7254         result = result && (getAgeOfLastShippedOp()
7255             == other.getAgeOfLastShippedOp());
7256       }
7257       result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue());
7258       if (hasSizeOfLogQueue()) {
7259         result = result && (getSizeOfLogQueue()
7260             == other.getSizeOfLogQueue());
7261       }
7262       result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp());
7263       if (hasTimeStampOfLastShippedOp()) {
7264         result = result && (getTimeStampOfLastShippedOp()
7265             == other.getTimeStampOfLastShippedOp());
7266       }
7267       result = result && (hasReplicationLag() == other.hasReplicationLag());
7268       if (hasReplicationLag()) {
7269         result = result && (getReplicationLag()
7270             == other.getReplicationLag());
7271       }
7272       result = result &&
7273           getUnknownFields().equals(other.getUnknownFields());
7274       return result;
7275     }
7276 
7277     private int memoizedHashCode = 0;
7278     @java.lang.Override
7279     public int hashCode() {
7280       if (memoizedHashCode != 0) {
7281         return memoizedHashCode;
7282       }
7283       int hash = 41;
7284       hash = (19 * hash) + getDescriptorForType().hashCode();
7285       if (hasPeerID()) {
7286         hash = (37 * hash) + PEERID_FIELD_NUMBER;
7287         hash = (53 * hash) + getPeerID().hashCode();
7288       }
7289       if (hasAgeOfLastShippedOp()) {
7290         hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER;
7291         hash = (53 * hash) + hashLong(getAgeOfLastShippedOp());
7292       }
7293       if (hasSizeOfLogQueue()) {
7294         hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER;
7295         hash = (53 * hash) + getSizeOfLogQueue();
7296       }
7297       if (hasTimeStampOfLastShippedOp()) {
7298         hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER;
7299         hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp());
7300       }
7301       if (hasReplicationLag()) {
7302         hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER;
7303         hash = (53 * hash) + hashLong(getReplicationLag());
7304       }
7305       hash = (29 * hash) + getUnknownFields().hashCode();
7306       memoizedHashCode = hash;
7307       return hash;
7308     }
7309 
7310     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7311         com.google.protobuf.ByteString data)
7312         throws com.google.protobuf.InvalidProtocolBufferException {
7313       return PARSER.parseFrom(data);
7314     }
7315     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7316         com.google.protobuf.ByteString data,
7317         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7318         throws com.google.protobuf.InvalidProtocolBufferException {
7319       return PARSER.parseFrom(data, extensionRegistry);
7320     }
7321     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data)
7322         throws com.google.protobuf.InvalidProtocolBufferException {
7323       return PARSER.parseFrom(data);
7324     }
7325     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7326         byte[] data,
7327         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7328         throws com.google.protobuf.InvalidProtocolBufferException {
7329       return PARSER.parseFrom(data, extensionRegistry);
7330     }
7331     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input)
7332         throws java.io.IOException {
7333       return PARSER.parseFrom(input);
7334     }
7335     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7336         java.io.InputStream input,
7337         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7338         throws java.io.IOException {
7339       return PARSER.parseFrom(input, extensionRegistry);
7340     }
7341     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input)
7342         throws java.io.IOException {
7343       return PARSER.parseDelimitedFrom(input);
7344     }
7345     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(
7346         java.io.InputStream input,
7347         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7348         throws java.io.IOException {
7349       return PARSER.parseDelimitedFrom(input, extensionRegistry);
7350     }
7351     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7352         com.google.protobuf.CodedInputStream input)
7353         throws java.io.IOException {
7354       return PARSER.parseFrom(input);
7355     }
7356     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
7357         com.google.protobuf.CodedInputStream input,
7358         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7359         throws java.io.IOException {
7360       return PARSER.parseFrom(input, extensionRegistry);
7361     }
7362 
7363     public static Builder newBuilder() { return Builder.create(); }
7364     public Builder newBuilderForType() { return newBuilder(); }
7365     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) {
7366       return newBuilder().mergeFrom(prototype);
7367     }
7368     public Builder toBuilder() { return newBuilder(this); }
7369 
7370     @java.lang.Override
7371     protected Builder newBuilderForType(
7372         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7373       Builder builder = new Builder(parent);
7374       return builder;
7375     }
7376     /**
7377      * Protobuf type {@code hbase.pb.ReplicationLoadSource}
7378      */
7379     public static final class Builder extends
7380         com.google.protobuf.GeneratedMessage.Builder<Builder>
7381        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder {
7382       public static final com.google.protobuf.Descriptors.Descriptor
7383           getDescriptor() {
7384         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSource_descriptor;
7385       }
7386 
7387       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7388           internalGetFieldAccessorTable() {
7389         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSource_fieldAccessorTable
7390             .ensureFieldAccessorsInitialized(
7391                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
7392       }
7393 
7394       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder()
7395       private Builder() {
7396         maybeForceBuilderInitialization();
7397       }
7398 
7399       private Builder(
7400           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7401         super(parent);
7402         maybeForceBuilderInitialization();
7403       }
7404       private void maybeForceBuilderInitialization() {
7405         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7406         }
7407       }
7408       private static Builder create() {
7409         return new Builder();
7410       }
7411 
7412       public Builder clear() {
7413         super.clear();
7414         peerID_ = "";
7415         bitField0_ = (bitField0_ & ~0x00000001);
7416         ageOfLastShippedOp_ = 0L;
7417         bitField0_ = (bitField0_ & ~0x00000002);
7418         sizeOfLogQueue_ = 0;
7419         bitField0_ = (bitField0_ & ~0x00000004);
7420         timeStampOfLastShippedOp_ = 0L;
7421         bitField0_ = (bitField0_ & ~0x00000008);
7422         replicationLag_ = 0L;
7423         bitField0_ = (bitField0_ & ~0x00000010);
7424         return this;
7425       }
7426 
7427       public Builder clone() {
7428         return create().mergeFrom(buildPartial());
7429       }
7430 
7431       public com.google.protobuf.Descriptors.Descriptor
7432           getDescriptorForType() {
7433         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ReplicationLoadSource_descriptor;
7434       }
7435 
7436       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() {
7437         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance();
7438       }
7439 
7440       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() {
7441         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial();
7442         if (!result.isInitialized()) {
7443           throw newUninitializedMessageException(result);
7444         }
7445         return result;
7446       }
7447 
7448       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() {
7449         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this);
7450         int from_bitField0_ = bitField0_;
7451         int to_bitField0_ = 0;
7452         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7453           to_bitField0_ |= 0x00000001;
7454         }
7455         result.peerID_ = peerID_;
7456         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7457           to_bitField0_ |= 0x00000002;
7458         }
7459         result.ageOfLastShippedOp_ = ageOfLastShippedOp_;
7460         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
7461           to_bitField0_ |= 0x00000004;
7462         }
7463         result.sizeOfLogQueue_ = sizeOfLogQueue_;
7464         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
7465           to_bitField0_ |= 0x00000008;
7466         }
7467         result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_;
7468         if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
7469           to_bitField0_ |= 0x00000010;
7470         }
7471         result.replicationLag_ = replicationLag_;
7472         result.bitField0_ = to_bitField0_;
7473         onBuilt();
7474         return result;
7475       }
7476 
7477       public Builder mergeFrom(com.google.protobuf.Message other) {
7478         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) {
7479           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other);
7480         } else {
7481           super.mergeFrom(other);
7482           return this;
7483         }
7484       }
7485 
7486       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) {
7487         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this;
7488         if (other.hasPeerID()) {
7489           bitField0_ |= 0x00000001;
7490           peerID_ = other.peerID_;
7491           onChanged();
7492         }
7493         if (other.hasAgeOfLastShippedOp()) {
7494           setAgeOfLastShippedOp(other.getAgeOfLastShippedOp());
7495         }
7496         if (other.hasSizeOfLogQueue()) {
7497           setSizeOfLogQueue(other.getSizeOfLogQueue());
7498         }
7499         if (other.hasTimeStampOfLastShippedOp()) {
7500           setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp());
7501         }
7502         if (other.hasReplicationLag()) {
7503           setReplicationLag(other.getReplicationLag());
7504         }
7505         this.mergeUnknownFields(other.getUnknownFields());
7506         return this;
7507       }
7508 
7509       public final boolean isInitialized() {
7510         if (!hasPeerID()) {
7511           
7512           return false;
7513         }
7514         if (!hasAgeOfLastShippedOp()) {
7515           
7516           return false;
7517         }
7518         if (!hasSizeOfLogQueue()) {
7519           
7520           return false;
7521         }
7522         if (!hasTimeStampOfLastShippedOp()) {
7523           
7524           return false;
7525         }
7526         if (!hasReplicationLag()) {
7527           
7528           return false;
7529         }
7530         return true;
7531       }
7532 
7533       public Builder mergeFrom(
7534           com.google.protobuf.CodedInputStream input,
7535           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7536           throws java.io.IOException {
7537         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null;
7538         try {
7539           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7540         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7541           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage();
7542           throw e;
7543         } finally {
7544           if (parsedMessage != null) {
7545             mergeFrom(parsedMessage);
7546           }
7547         }
7548         return this;
7549       }
7550       private int bitField0_;
7551 
7552       // required string peerID = 1;
7553       private java.lang.Object peerID_ = "";
7554       /**
7555        * <code>required string peerID = 1;</code>
7556        */
7557       public boolean hasPeerID() {
7558         return ((bitField0_ & 0x00000001) == 0x00000001);
7559       }
7560       /**
7561        * <code>required string peerID = 1;</code>
7562        */
7563       public java.lang.String getPeerID() {
7564         java.lang.Object ref = peerID_;
7565         if (!(ref instanceof java.lang.String)) {
7566           java.lang.String s = ((com.google.protobuf.ByteString) ref)
7567               .toStringUtf8();
7568           peerID_ = s;
7569           return s;
7570         } else {
7571           return (java.lang.String) ref;
7572         }
7573       }
7574       /**
7575        * <code>required string peerID = 1;</code>
7576        */
7577       public com.google.protobuf.ByteString
7578           getPeerIDBytes() {
7579         java.lang.Object ref = peerID_;
7580         if (ref instanceof String) {
7581           com.google.protobuf.ByteString b = 
7582               com.google.protobuf.ByteString.copyFromUtf8(
7583                   (java.lang.String) ref);
7584           peerID_ = b;
7585           return b;
7586         } else {
7587           return (com.google.protobuf.ByteString) ref;
7588         }
7589       }
7590       /**
7591        * <code>required string peerID = 1;</code>
7592        */
7593       public Builder setPeerID(
7594           java.lang.String value) {
7595         if (value == null) {
7596     throw new NullPointerException();
7597   }
7598   bitField0_ |= 0x00000001;
7599         peerID_ = value;
7600         onChanged();
7601         return this;
7602       }
7603       /**
7604        * <code>required string peerID = 1;</code>
7605        */
7606       public Builder clearPeerID() {
7607         bitField0_ = (bitField0_ & ~0x00000001);
7608         peerID_ = getDefaultInstance().getPeerID();
7609         onChanged();
7610         return this;
7611       }
7612       /**
7613        * <code>required string peerID = 1;</code>
7614        */
7615       public Builder setPeerIDBytes(
7616           com.google.protobuf.ByteString value) {
7617         if (value == null) {
7618     throw new NullPointerException();
7619   }
7620   bitField0_ |= 0x00000001;
7621         peerID_ = value;
7622         onChanged();
7623         return this;
7624       }
7625 
7626       // required uint64 ageOfLastShippedOp = 2;
7627       private long ageOfLastShippedOp_ ;
7628       /**
7629        * <code>required uint64 ageOfLastShippedOp = 2;</code>
7630        */
7631       public boolean hasAgeOfLastShippedOp() {
7632         return ((bitField0_ & 0x00000002) == 0x00000002);
7633       }
7634       /**
7635        * <code>required uint64 ageOfLastShippedOp = 2;</code>
7636        */
7637       public long getAgeOfLastShippedOp() {
7638         return ageOfLastShippedOp_;
7639       }
7640       /**
7641        * <code>required uint64 ageOfLastShippedOp = 2;</code>
7642        */
7643       public Builder setAgeOfLastShippedOp(long value) {
7644         bitField0_ |= 0x00000002;
7645         ageOfLastShippedOp_ = value;
7646         onChanged();
7647         return this;
7648       }
7649       /**
7650        * <code>required uint64 ageOfLastShippedOp = 2;</code>
7651        */
7652       public Builder clearAgeOfLastShippedOp() {
7653         bitField0_ = (bitField0_ & ~0x00000002);
7654         ageOfLastShippedOp_ = 0L;
7655         onChanged();
7656         return this;
7657       }
7658 
7659       // required uint32 sizeOfLogQueue = 3;
7660       private int sizeOfLogQueue_ ;
7661       /**
7662        * <code>required uint32 sizeOfLogQueue = 3;</code>
7663        */
7664       public boolean hasSizeOfLogQueue() {
7665         return ((bitField0_ & 0x00000004) == 0x00000004);
7666       }
7667       /**
7668        * <code>required uint32 sizeOfLogQueue = 3;</code>
7669        */
7670       public int getSizeOfLogQueue() {
7671         return sizeOfLogQueue_;
7672       }
7673       /**
7674        * <code>required uint32 sizeOfLogQueue = 3;</code>
7675        */
7676       public Builder setSizeOfLogQueue(int value) {
7677         bitField0_ |= 0x00000004;
7678         sizeOfLogQueue_ = value;
7679         onChanged();
7680         return this;
7681       }
7682       /**
7683        * <code>required uint32 sizeOfLogQueue = 3;</code>
7684        */
7685       public Builder clearSizeOfLogQueue() {
7686         bitField0_ = (bitField0_ & ~0x00000004);
7687         sizeOfLogQueue_ = 0;
7688         onChanged();
7689         return this;
7690       }
7691 
7692       // required uint64 timeStampOfLastShippedOp = 4;
7693       private long timeStampOfLastShippedOp_ ;
7694       /**
7695        * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7696        */
7697       public boolean hasTimeStampOfLastShippedOp() {
7698         return ((bitField0_ & 0x00000008) == 0x00000008);
7699       }
7700       /**
7701        * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7702        */
7703       public long getTimeStampOfLastShippedOp() {
7704         return timeStampOfLastShippedOp_;
7705       }
7706       /**
7707        * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7708        */
7709       public Builder setTimeStampOfLastShippedOp(long value) {
7710         bitField0_ |= 0x00000008;
7711         timeStampOfLastShippedOp_ = value;
7712         onChanged();
7713         return this;
7714       }
7715       /**
7716        * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
7717        */
7718       public Builder clearTimeStampOfLastShippedOp() {
7719         bitField0_ = (bitField0_ & ~0x00000008);
7720         timeStampOfLastShippedOp_ = 0L;
7721         onChanged();
7722         return this;
7723       }
7724 
7725       // required uint64 replicationLag = 5;
7726       private long replicationLag_ ;
7727       /**
7728        * <code>required uint64 replicationLag = 5;</code>
7729        */
7730       public boolean hasReplicationLag() {
7731         return ((bitField0_ & 0x00000010) == 0x00000010);
7732       }
7733       /**
7734        * <code>required uint64 replicationLag = 5;</code>
7735        */
7736       public long getReplicationLag() {
7737         return replicationLag_;
7738       }
7739       /**
7740        * <code>required uint64 replicationLag = 5;</code>
7741        */
7742       public Builder setReplicationLag(long value) {
7743         bitField0_ |= 0x00000010;
7744         replicationLag_ = value;
7745         onChanged();
7746         return this;
7747       }
7748       /**
7749        * <code>required uint64 replicationLag = 5;</code>
7750        */
7751       public Builder clearReplicationLag() {
7752         bitField0_ = (bitField0_ & ~0x00000010);
7753         replicationLag_ = 0L;
7754         onChanged();
7755         return this;
7756       }
7757 
7758       // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationLoadSource)
7759     }
7760 
7761     static {
7762       defaultInstance = new ReplicationLoadSource(true);
7763       defaultInstance.initFields();
7764     }
7765 
7766     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationLoadSource)
7767   }
7768 
7769   public interface ServerLoadOrBuilder
7770       extends com.google.protobuf.MessageOrBuilder {
7771 
7772     // optional uint64 number_of_requests = 1;
7773     /**
7774      * <code>optional uint64 number_of_requests = 1;</code>
7775      *
7776      * <pre>
7777      ** Number of requests since last report. 
7778      * </pre>
7779      */
7780     boolean hasNumberOfRequests();
7781     /**
7782      * <code>optional uint64 number_of_requests = 1;</code>
7783      *
7784      * <pre>
7785      ** Number of requests since last report. 
7786      * </pre>
7787      */
7788     long getNumberOfRequests();
7789 
7790     // optional uint64 total_number_of_requests = 2;
7791     /**
7792      * <code>optional uint64 total_number_of_requests = 2;</code>
7793      *
7794      * <pre>
7795      ** Total Number of requests from the start of the region server. 
7796      * </pre>
7797      */
7798     boolean hasTotalNumberOfRequests();
7799     /**
7800      * <code>optional uint64 total_number_of_requests = 2;</code>
7801      *
7802      * <pre>
7803      ** Total Number of requests from the start of the region server. 
7804      * </pre>
7805      */
7806     long getTotalNumberOfRequests();
7807 
7808     // optional uint32 used_heap_MB = 3;
7809     /**
7810      * <code>optional uint32 used_heap_MB = 3;</code>
7811      *
7812      * <pre>
7813      ** the amount of used heap, in MB. 
7814      * </pre>
7815      */
7816     boolean hasUsedHeapMB();
7817     /**
7818      * <code>optional uint32 used_heap_MB = 3;</code>
7819      *
7820      * <pre>
7821      ** the amount of used heap, in MB. 
7822      * </pre>
7823      */
7824     int getUsedHeapMB();
7825 
7826     // optional uint32 max_heap_MB = 4;
7827     /**
7828      * <code>optional uint32 max_heap_MB = 4;</code>
7829      *
7830      * <pre>
7831      ** the maximum allowable size of the heap, in MB. 
7832      * </pre>
7833      */
7834     boolean hasMaxHeapMB();
7835     /**
7836      * <code>optional uint32 max_heap_MB = 4;</code>
7837      *
7838      * <pre>
7839      ** the maximum allowable size of the heap, in MB. 
7840      * </pre>
7841      */
7842     int getMaxHeapMB();
7843 
7844     // repeated .hbase.pb.RegionLoad region_loads = 5;
7845     /**
7846      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
7847      *
7848      * <pre>
7849      ** Information on the load of individual regions. 
7850      * </pre>
7851      */
7852     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> 
7853         getRegionLoadsList();
7854     /**
7855      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
7856      *
7857      * <pre>
7858      ** Information on the load of individual regions. 
7859      * </pre>
7860      */
7861     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index);
7862     /**
7863      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
7864      *
7865      * <pre>
7866      ** Information on the load of individual regions. 
7867      * </pre>
7868      */
7869     int getRegionLoadsCount();
7870     /**
7871      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
7872      *
7873      * <pre>
7874      ** Information on the load of individual regions. 
7875      * </pre>
7876      */
7877     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
7878         getRegionLoadsOrBuilderList();
7879     /**
7880      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
7881      *
7882      * <pre>
7883      ** Information on the load of individual regions. 
7884      * </pre>
7885      */
7886     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
7887         int index);
7888 
7889     // repeated .hbase.pb.Coprocessor coprocessors = 6;
7890     /**
7891      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
7892      *
7893      * <pre>
7894      **
7895      * Regionserver-level coprocessors, e.g., WALObserver implementations.
7896      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
7897      * objects.
7898      * </pre>
7899      */
7900     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> 
7901         getCoprocessorsList();
7902     /**
7903      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
7904      *
7905      * <pre>
7906      **
7907      * Regionserver-level coprocessors, e.g., WALObserver implementations.
7908      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
7909      * objects.
7910      * </pre>
7911      */
7912     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index);
7913     /**
7914      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
7915      *
7916      * <pre>
7917      **
7918      * Regionserver-level coprocessors, e.g., WALObserver implementations.
7919      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
7920      * objects.
7921      * </pre>
7922      */
7923     int getCoprocessorsCount();
7924     /**
7925      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
7926      *
7927      * <pre>
7928      **
7929      * Regionserver-level coprocessors, e.g., WALObserver implementations.
7930      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
7931      * objects.
7932      * </pre>
7933      */
7934     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
7935         getCoprocessorsOrBuilderList();
7936     /**
7937      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
7938      *
7939      * <pre>
7940      **
7941      * Regionserver-level coprocessors, e.g., WALObserver implementations.
7942      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
7943      * objects.
7944      * </pre>
7945      */
7946     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
7947         int index);
7948 
7949     // optional uint64 report_start_time = 7;
7950     /**
7951      * <code>optional uint64 report_start_time = 7;</code>
7952      *
7953      * <pre>
7954      **
7955      * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
7956      * time is measured as the difference, measured in milliseconds, between the current time
7957      * and midnight, January 1, 1970 UTC.
7958      * </pre>
7959      */
7960     boolean hasReportStartTime();
7961     /**
7962      * <code>optional uint64 report_start_time = 7;</code>
7963      *
7964      * <pre>
7965      **
7966      * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
7967      * time is measured as the difference, measured in milliseconds, between the current time
7968      * and midnight, January 1, 1970 UTC.
7969      * </pre>
7970      */
7971     long getReportStartTime();
7972 
7973     // optional uint64 report_end_time = 8;
7974     /**
7975      * <code>optional uint64 report_end_time = 8;</code>
7976      *
7977      * <pre>
7978      **
7979      * Time when report was generated.
7980      * time is measured as the difference, measured in milliseconds, between the current time
7981      * and midnight, January 1, 1970 UTC.
7982      * </pre>
7983      */
7984     boolean hasReportEndTime();
7985     /**
7986      * <code>optional uint64 report_end_time = 8;</code>
7987      *
7988      * <pre>
7989      **
7990      * Time when report was generated.
7991      * time is measured as the difference, measured in milliseconds, between the current time
7992      * and midnight, January 1, 1970 UTC.
7993      * </pre>
7994      */
7995     long getReportEndTime();
7996 
7997     // optional uint32 info_server_port = 9;
7998     /**
7999      * <code>optional uint32 info_server_port = 9;</code>
8000      *
8001      * <pre>
8002      **
8003      * The port number that this region server is hosing an info server on.
8004      * </pre>
8005      */
8006     boolean hasInfoServerPort();
8007     /**
8008      * <code>optional uint32 info_server_port = 9;</code>
8009      *
8010      * <pre>
8011      **
8012      * The port number that this region server is hosing an info server on.
8013      * </pre>
8014      */
8015     int getInfoServerPort();
8016 
8017     // repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;
8018     /**
8019      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8020      *
8021      * <pre>
8022      **
8023      * The replicationLoadSource for the replication Source status of this region server.
8024      * </pre>
8025      */
8026     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> 
8027         getReplLoadSourceList();
8028     /**
8029      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8030      *
8031      * <pre>
8032      **
8033      * The replicationLoadSource for the replication Source status of this region server.
8034      * </pre>
8035      */
8036     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index);
8037     /**
8038      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8039      *
8040      * <pre>
8041      **
8042      * The replicationLoadSource for the replication Source status of this region server.
8043      * </pre>
8044      */
8045     int getReplLoadSourceCount();
8046     /**
8047      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8048      *
8049      * <pre>
8050      **
8051      * The replicationLoadSource for the replication Source status of this region server.
8052      * </pre>
8053      */
8054     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> 
8055         getReplLoadSourceOrBuilderList();
8056     /**
8057      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8058      *
8059      * <pre>
8060      **
8061      * The replicationLoadSource for the replication Source status of this region server.
8062      * </pre>
8063      */
8064     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
8065         int index);
8066 
8067     // optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;
8068     /**
8069      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8070      *
8071      * <pre>
8072      **
8073      * The replicationLoadSink for the replication Sink status of this region server.
8074      * </pre>
8075      */
8076     boolean hasReplLoadSink();
8077     /**
8078      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8079      *
8080      * <pre>
8081      **
8082      * The replicationLoadSink for the replication Sink status of this region server.
8083      * </pre>
8084      */
8085     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink();
8086     /**
8087      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8088      *
8089      * <pre>
8090      **
8091      * The replicationLoadSink for the replication Sink status of this region server.
8092      * </pre>
8093      */
8094     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder();
8095   }
8096   /**
8097    * Protobuf type {@code hbase.pb.ServerLoad}
8098    */
8099   public static final class ServerLoad extends
8100       com.google.protobuf.GeneratedMessage
8101       implements ServerLoadOrBuilder {
8102     // Use ServerLoad.newBuilder() to construct.
8103     private ServerLoad(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8104       super(builder);
8105       this.unknownFields = builder.getUnknownFields();
8106     }
8107     private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8108 
8109     private static final ServerLoad defaultInstance;
8110     public static ServerLoad getDefaultInstance() {
8111       return defaultInstance;
8112     }
8113 
8114     public ServerLoad getDefaultInstanceForType() {
8115       return defaultInstance;
8116     }
8117 
8118     private final com.google.protobuf.UnknownFieldSet unknownFields;
8119     @java.lang.Override
8120     public final com.google.protobuf.UnknownFieldSet
8121         getUnknownFields() {
8122       return this.unknownFields;
8123     }
8124     private ServerLoad(
8125         com.google.protobuf.CodedInputStream input,
8126         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8127         throws com.google.protobuf.InvalidProtocolBufferException {
8128       initFields();
8129       int mutable_bitField0_ = 0;
8130       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8131           com.google.protobuf.UnknownFieldSet.newBuilder();
8132       try {
8133         boolean done = false;
8134         while (!done) {
8135           int tag = input.readTag();
8136           switch (tag) {
8137             case 0:
8138               done = true;
8139               break;
8140             default: {
8141               if (!parseUnknownField(input, unknownFields,
8142                                      extensionRegistry, tag)) {
8143                 done = true;
8144               }
8145               break;
8146             }
8147             case 8: {
8148               bitField0_ |= 0x00000001;
8149               numberOfRequests_ = input.readUInt64();
8150               break;
8151             }
8152             case 16: {
8153               bitField0_ |= 0x00000002;
8154               totalNumberOfRequests_ = input.readUInt64();
8155               break;
8156             }
8157             case 24: {
8158               bitField0_ |= 0x00000004;
8159               usedHeapMB_ = input.readUInt32();
8160               break;
8161             }
8162             case 32: {
8163               bitField0_ |= 0x00000008;
8164               maxHeapMB_ = input.readUInt32();
8165               break;
8166             }
8167             case 42: {
8168               if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
8169                 regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>();
8170                 mutable_bitField0_ |= 0x00000010;
8171               }
8172               regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry));
8173               break;
8174             }
8175             case 50: {
8176               if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
8177                 coprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>();
8178                 mutable_bitField0_ |= 0x00000020;
8179               }
8180               coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry));
8181               break;
8182             }
8183             case 56: {
8184               bitField0_ |= 0x00000010;
8185               reportStartTime_ = input.readUInt64();
8186               break;
8187             }
8188             case 64: {
8189               bitField0_ |= 0x00000020;
8190               reportEndTime_ = input.readUInt64();
8191               break;
8192             }
8193             case 72: {
8194               bitField0_ |= 0x00000040;
8195               infoServerPort_ = input.readUInt32();
8196               break;
8197             }
8198             case 82: {
8199               if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
8200                 replLoadSource_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource>();
8201                 mutable_bitField0_ |= 0x00000200;
8202               }
8203               replLoadSource_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.PARSER, extensionRegistry));
8204               break;
8205             }
8206             case 90: {
8207               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder subBuilder = null;
8208               if (((bitField0_ & 0x00000080) == 0x00000080)) {
8209                 subBuilder = replLoadSink_.toBuilder();
8210               }
8211               replLoadSink_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.PARSER, extensionRegistry);
8212               if (subBuilder != null) {
8213                 subBuilder.mergeFrom(replLoadSink_);
8214                 replLoadSink_ = subBuilder.buildPartial();
8215               }
8216               bitField0_ |= 0x00000080;
8217               break;
8218             }
8219           }
8220         }
8221       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8222         throw e.setUnfinishedMessage(this);
8223       } catch (java.io.IOException e) {
8224         throw new com.google.protobuf.InvalidProtocolBufferException(
8225             e.getMessage()).setUnfinishedMessage(this);
8226       } finally {
8227         if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
8228           regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
8229         }
8230         if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
8231           coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_);
8232         }
8233         if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
8234           replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_);
8235         }
8236         this.unknownFields = unknownFields.build();
8237         makeExtensionsImmutable();
8238       }
8239     }
8240     public static final com.google.protobuf.Descriptors.Descriptor
8241         getDescriptor() {
8242       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ServerLoad_descriptor;
8243     }
8244 
8245     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8246         internalGetFieldAccessorTable() {
8247       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ServerLoad_fieldAccessorTable
8248           .ensureFieldAccessorsInitialized(
8249               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
8250     }
8251 
8252     public static com.google.protobuf.Parser<ServerLoad> PARSER =
8253         new com.google.protobuf.AbstractParser<ServerLoad>() {
8254       public ServerLoad parsePartialFrom(
8255           com.google.protobuf.CodedInputStream input,
8256           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8257           throws com.google.protobuf.InvalidProtocolBufferException {
8258         return new ServerLoad(input, extensionRegistry);
8259       }
8260     };
8261 
8262     @java.lang.Override
8263     public com.google.protobuf.Parser<ServerLoad> getParserForType() {
8264       return PARSER;
8265     }
8266 
8267     private int bitField0_;
8268     // optional uint64 number_of_requests = 1;
8269     public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1;
8270     private long numberOfRequests_;
8271     /**
8272      * <code>optional uint64 number_of_requests = 1;</code>
8273      *
8274      * <pre>
8275      ** Number of requests since last report. 
8276      * </pre>
8277      */
8278     public boolean hasNumberOfRequests() {
8279       return ((bitField0_ & 0x00000001) == 0x00000001);
8280     }
8281     /**
8282      * <code>optional uint64 number_of_requests = 1;</code>
8283      *
8284      * <pre>
8285      ** Number of requests since last report. 
8286      * </pre>
8287      */
8288     public long getNumberOfRequests() {
8289       return numberOfRequests_;
8290     }
8291 
8292     // optional uint64 total_number_of_requests = 2;
8293     public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2;
8294     private long totalNumberOfRequests_;
8295     /**
8296      * <code>optional uint64 total_number_of_requests = 2;</code>
8297      *
8298      * <pre>
8299      ** Total Number of requests from the start of the region server. 
8300      * </pre>
8301      */
8302     public boolean hasTotalNumberOfRequests() {
8303       return ((bitField0_ & 0x00000002) == 0x00000002);
8304     }
8305     /**
8306      * <code>optional uint64 total_number_of_requests = 2;</code>
8307      *
8308      * <pre>
8309      ** Total Number of requests from the start of the region server. 
8310      * </pre>
8311      */
8312     public long getTotalNumberOfRequests() {
8313       return totalNumberOfRequests_;
8314     }
8315 
8316     // optional uint32 used_heap_MB = 3;
8317     public static final int USED_HEAP_MB_FIELD_NUMBER = 3;
8318     private int usedHeapMB_;
8319     /**
8320      * <code>optional uint32 used_heap_MB = 3;</code>
8321      *
8322      * <pre>
8323      ** the amount of used heap, in MB. 
8324      * </pre>
8325      */
8326     public boolean hasUsedHeapMB() {
8327       return ((bitField0_ & 0x00000004) == 0x00000004);
8328     }
8329     /**
8330      * <code>optional uint32 used_heap_MB = 3;</code>
8331      *
8332      * <pre>
8333      ** the amount of used heap, in MB. 
8334      * </pre>
8335      */
8336     public int getUsedHeapMB() {
8337       return usedHeapMB_;
8338     }
8339 
8340     // optional uint32 max_heap_MB = 4;
8341     public static final int MAX_HEAP_MB_FIELD_NUMBER = 4;
8342     private int maxHeapMB_;
8343     /**
8344      * <code>optional uint32 max_heap_MB = 4;</code>
8345      *
8346      * <pre>
8347      ** the maximum allowable size of the heap, in MB. 
8348      * </pre>
8349      */
8350     public boolean hasMaxHeapMB() {
8351       return ((bitField0_ & 0x00000008) == 0x00000008);
8352     }
8353     /**
8354      * <code>optional uint32 max_heap_MB = 4;</code>
8355      *
8356      * <pre>
8357      ** the maximum allowable size of the heap, in MB. 
8358      * </pre>
8359      */
8360     public int getMaxHeapMB() {
8361       return maxHeapMB_;
8362     }
8363 
8364     // repeated .hbase.pb.RegionLoad region_loads = 5;
8365     public static final int REGION_LOADS_FIELD_NUMBER = 5;
8366     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_;
8367     /**
8368      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
8369      *
8370      * <pre>
8371      ** Information on the load of individual regions. 
8372      * </pre>
8373      */
8374     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
8375       return regionLoads_;
8376     }
8377     /**
8378      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
8379      *
8380      * <pre>
8381      ** Information on the load of individual regions. 
8382      * </pre>
8383      */
8384     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
8385         getRegionLoadsOrBuilderList() {
8386       return regionLoads_;
8387     }
8388     /**
8389      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
8390      *
8391      * <pre>
8392      ** Information on the load of individual regions. 
8393      * </pre>
8394      */
8395     public int getRegionLoadsCount() {
8396       return regionLoads_.size();
8397     }
8398     /**
8399      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
8400      *
8401      * <pre>
8402      ** Information on the load of individual regions. 
8403      * </pre>
8404      */
8405     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
8406       return regionLoads_.get(index);
8407     }
8408     /**
8409      * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
8410      *
8411      * <pre>
8412      ** Information on the load of individual regions. 
8413      * </pre>
8414      */
8415     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
8416         int index) {
8417       return regionLoads_.get(index);
8418     }
8419 
8420     // repeated .hbase.pb.Coprocessor coprocessors = 6;
8421     public static final int COPROCESSORS_FIELD_NUMBER = 6;
8422     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> coprocessors_;
8423     /**
8424      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
8425      *
8426      * <pre>
8427      **
8428      * Regionserver-level coprocessors, e.g., WALObserver implementations.
8429      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
8430      * objects.
8431      * </pre>
8432      */
8433     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getCoprocessorsList() {
8434       return coprocessors_;
8435     }
8436     /**
8437      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
8438      *
8439      * <pre>
8440      **
8441      * Regionserver-level coprocessors, e.g., WALObserver implementations.
8442      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
8443      * objects.
8444      * </pre>
8445      */
8446     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
8447         getCoprocessorsOrBuilderList() {
8448       return coprocessors_;
8449     }
8450     /**
8451      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
8452      *
8453      * <pre>
8454      **
8455      * Regionserver-level coprocessors, e.g., WALObserver implementations.
8456      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
8457      * objects.
8458      * </pre>
8459      */
8460     public int getCoprocessorsCount() {
8461       return coprocessors_.size();
8462     }
8463     /**
8464      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
8465      *
8466      * <pre>
8467      **
8468      * Regionserver-level coprocessors, e.g., WALObserver implementations.
8469      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
8470      * objects.
8471      * </pre>
8472      */
8473     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) {
8474       return coprocessors_.get(index);
8475     }
8476     /**
8477      * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
8478      *
8479      * <pre>
8480      **
8481      * Regionserver-level coprocessors, e.g., WALObserver implementations.
8482      * Region-level coprocessors, on the other hand, are stored inside RegionLoad
8483      * objects.
8484      * </pre>
8485      */
8486     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
8487         int index) {
8488       return coprocessors_.get(index);
8489     }
8490 
8491     // optional uint64 report_start_time = 7;
8492     public static final int REPORT_START_TIME_FIELD_NUMBER = 7;
8493     private long reportStartTime_;
8494     /**
8495      * <code>optional uint64 report_start_time = 7;</code>
8496      *
8497      * <pre>
8498      **
8499      * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
8500      * time is measured as the difference, measured in milliseconds, between the current time
8501      * and midnight, January 1, 1970 UTC.
8502      * </pre>
8503      */
8504     public boolean hasReportStartTime() {
8505       return ((bitField0_ & 0x00000010) == 0x00000010);
8506     }
8507     /**
8508      * <code>optional uint64 report_start_time = 7;</code>
8509      *
8510      * <pre>
8511      **
8512      * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
8513      * time is measured as the difference, measured in milliseconds, between the current time
8514      * and midnight, January 1, 1970 UTC.
8515      * </pre>
8516      */
8517     public long getReportStartTime() {
8518       return reportStartTime_;
8519     }
8520 
8521     // optional uint64 report_end_time = 8;
8522     public static final int REPORT_END_TIME_FIELD_NUMBER = 8;
8523     private long reportEndTime_;
8524     /**
8525      * <code>optional uint64 report_end_time = 8;</code>
8526      *
8527      * <pre>
8528      **
8529      * Time when report was generated.
8530      * time is measured as the difference, measured in milliseconds, between the current time
8531      * and midnight, January 1, 1970 UTC.
8532      * </pre>
8533      */
8534     public boolean hasReportEndTime() {
8535       return ((bitField0_ & 0x00000020) == 0x00000020);
8536     }
8537     /**
8538      * <code>optional uint64 report_end_time = 8;</code>
8539      *
8540      * <pre>
8541      **
8542      * Time when report was generated.
8543      * time is measured as the difference, measured in milliseconds, between the current time
8544      * and midnight, January 1, 1970 UTC.
8545      * </pre>
8546      */
8547     public long getReportEndTime() {
8548       return reportEndTime_;
8549     }
8550 
8551     // optional uint32 info_server_port = 9;
8552     public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9;
8553     private int infoServerPort_;
8554     /**
8555      * <code>optional uint32 info_server_port = 9;</code>
8556      *
8557      * <pre>
8558      **
8559      * The port number that this region server is hosing an info server on.
8560      * </pre>
8561      */
8562     public boolean hasInfoServerPort() {
8563       return ((bitField0_ & 0x00000040) == 0x00000040);
8564     }
8565     /**
8566      * <code>optional uint32 info_server_port = 9;</code>
8567      *
8568      * <pre>
8569      **
8570      * The port number that this region server is hosing an info server on.
8571      * </pre>
8572      */
8573     public int getInfoServerPort() {
8574       return infoServerPort_;
8575     }
8576 
8577     // repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;
8578     public static final int REPLLOADSOURCE_FIELD_NUMBER = 10;
8579     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> replLoadSource_;
8580     /**
8581      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8582      *
8583      * <pre>
8584      **
8585      * The replicationLoadSource for the replication Source status of this region server.
8586      * </pre>
8587      */
8588     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> getReplLoadSourceList() {
8589       return replLoadSource_;
8590     }
8591     /**
8592      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8593      *
8594      * <pre>
8595      **
8596      * The replicationLoadSource for the replication Source status of this region server.
8597      * </pre>
8598      */
8599     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> 
8600         getReplLoadSourceOrBuilderList() {
8601       return replLoadSource_;
8602     }
8603     /**
8604      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8605      *
8606      * <pre>
8607      **
8608      * The replicationLoadSource for the replication Source status of this region server.
8609      * </pre>
8610      */
8611     public int getReplLoadSourceCount() {
8612       return replLoadSource_.size();
8613     }
8614     /**
8615      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8616      *
8617      * <pre>
8618      **
8619      * The replicationLoadSource for the replication Source status of this region server.
8620      * </pre>
8621      */
8622     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) {
8623       return replLoadSource_.get(index);
8624     }
8625     /**
8626      * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
8627      *
8628      * <pre>
8629      **
8630      * The replicationLoadSource for the replication Source status of this region server.
8631      * </pre>
8632      */
8633     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
8634         int index) {
8635       return replLoadSource_.get(index);
8636     }
8637 
8638     // optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;
8639     public static final int REPLLOADSINK_FIELD_NUMBER = 11;
8640     private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_;
8641     /**
8642      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8643      *
8644      * <pre>
8645      **
8646      * The replicationLoadSink for the replication Sink status of this region server.
8647      * </pre>
8648      */
8649     public boolean hasReplLoadSink() {
8650       return ((bitField0_ & 0x00000080) == 0x00000080);
8651     }
8652     /**
8653      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8654      *
8655      * <pre>
8656      **
8657      * The replicationLoadSink for the replication Sink status of this region server.
8658      * </pre>
8659      */
8660     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() {
8661       return replLoadSink_;
8662     }
8663     /**
8664      * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
8665      *
8666      * <pre>
8667      **
8668      * The replicationLoadSink for the replication Sink status of this region server.
8669      * </pre>
8670      */
8671     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() {
8672       return replLoadSink_;
8673     }
8674 
8675     private void initFields() {
8676       numberOfRequests_ = 0L;
8677       totalNumberOfRequests_ = 0L;
8678       usedHeapMB_ = 0;
8679       maxHeapMB_ = 0;
8680       regionLoads_ = java.util.Collections.emptyList();
8681       coprocessors_ = java.util.Collections.emptyList();
8682       reportStartTime_ = 0L;
8683       reportEndTime_ = 0L;
8684       infoServerPort_ = 0;
8685       replLoadSource_ = java.util.Collections.emptyList();
8686       replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
8687     }
8688     private byte memoizedIsInitialized = -1;
8689     public final boolean isInitialized() {
8690       byte isInitialized = memoizedIsInitialized;
8691       if (isInitialized != -1) return isInitialized == 1;
8692 
8693       for (int i = 0; i < getRegionLoadsCount(); i++) {
8694         if (!getRegionLoads(i).isInitialized()) {
8695           memoizedIsInitialized = 0;
8696           return false;
8697         }
8698       }
8699       for (int i = 0; i < getCoprocessorsCount(); i++) {
8700         if (!getCoprocessors(i).isInitialized()) {
8701           memoizedIsInitialized = 0;
8702           return false;
8703         }
8704       }
8705       for (int i = 0; i < getReplLoadSourceCount(); i++) {
8706         if (!getReplLoadSource(i).isInitialized()) {
8707           memoizedIsInitialized = 0;
8708           return false;
8709         }
8710       }
8711       if (hasReplLoadSink()) {
8712         if (!getReplLoadSink().isInitialized()) {
8713           memoizedIsInitialized = 0;
8714           return false;
8715         }
8716       }
8717       memoizedIsInitialized = 1;
8718       return true;
8719     }
8720 
8721     public void writeTo(com.google.protobuf.CodedOutputStream output)
8722                         throws java.io.IOException {
8723       getSerializedSize();
8724       if (((bitField0_ & 0x00000001) == 0x00000001)) {
8725         output.writeUInt64(1, numberOfRequests_);
8726       }
8727       if (((bitField0_ & 0x00000002) == 0x00000002)) {
8728         output.writeUInt64(2, totalNumberOfRequests_);
8729       }
8730       if (((bitField0_ & 0x00000004) == 0x00000004)) {
8731         output.writeUInt32(3, usedHeapMB_);
8732       }
8733       if (((bitField0_ & 0x00000008) == 0x00000008)) {
8734         output.writeUInt32(4, maxHeapMB_);
8735       }
8736       for (int i = 0; i < regionLoads_.size(); i++) {
8737         output.writeMessage(5, regionLoads_.get(i));
8738       }
8739       for (int i = 0; i < coprocessors_.size(); i++) {
8740         output.writeMessage(6, coprocessors_.get(i));
8741       }
8742       if (((bitField0_ & 0x00000010) == 0x00000010)) {
8743         output.writeUInt64(7, reportStartTime_);
8744       }
8745       if (((bitField0_ & 0x00000020) == 0x00000020)) {
8746         output.writeUInt64(8, reportEndTime_);
8747       }
8748       if (((bitField0_ & 0x00000040) == 0x00000040)) {
8749         output.writeUInt32(9, infoServerPort_);
8750       }
8751       for (int i = 0; i < replLoadSource_.size(); i++) {
8752         output.writeMessage(10, replLoadSource_.get(i));
8753       }
8754       if (((bitField0_ & 0x00000080) == 0x00000080)) {
8755         output.writeMessage(11, replLoadSink_);
8756       }
8757       getUnknownFields().writeTo(output);
8758     }
8759 
8760     private int memoizedSerializedSize = -1;
8761     public int getSerializedSize() {
8762       int size = memoizedSerializedSize;
8763       if (size != -1) return size;
8764 
8765       size = 0;
8766       if (((bitField0_ & 0x00000001) == 0x00000001)) {
8767         size += com.google.protobuf.CodedOutputStream
8768           .computeUInt64Size(1, numberOfRequests_);
8769       }
8770       if (((bitField0_ & 0x00000002) == 0x00000002)) {
8771         size += com.google.protobuf.CodedOutputStream
8772           .computeUInt64Size(2, totalNumberOfRequests_);
8773       }
8774       if (((bitField0_ & 0x00000004) == 0x00000004)) {
8775         size += com.google.protobuf.CodedOutputStream
8776           .computeUInt32Size(3, usedHeapMB_);
8777       }
8778       if (((bitField0_ & 0x00000008) == 0x00000008)) {
8779         size += com.google.protobuf.CodedOutputStream
8780           .computeUInt32Size(4, maxHeapMB_);
8781       }
8782       for (int i = 0; i < regionLoads_.size(); i++) {
8783         size += com.google.protobuf.CodedOutputStream
8784           .computeMessageSize(5, regionLoads_.get(i));
8785       }
8786       for (int i = 0; i < coprocessors_.size(); i++) {
8787         size += com.google.protobuf.CodedOutputStream
8788           .computeMessageSize(6, coprocessors_.get(i));
8789       }
8790       if (((bitField0_ & 0x00000010) == 0x00000010)) {
8791         size += com.google.protobuf.CodedOutputStream
8792           .computeUInt64Size(7, reportStartTime_);
8793       }
8794       if (((bitField0_ & 0x00000020) == 0x00000020)) {
8795         size += com.google.protobuf.CodedOutputStream
8796           .computeUInt64Size(8, reportEndTime_);
8797       }
8798       if (((bitField0_ & 0x00000040) == 0x00000040)) {
8799         size += com.google.protobuf.CodedOutputStream
8800           .computeUInt32Size(9, infoServerPort_);
8801       }
8802       for (int i = 0; i < replLoadSource_.size(); i++) {
8803         size += com.google.protobuf.CodedOutputStream
8804           .computeMessageSize(10, replLoadSource_.get(i));
8805       }
8806       if (((bitField0_ & 0x00000080) == 0x00000080)) {
8807         size += com.google.protobuf.CodedOutputStream
8808           .computeMessageSize(11, replLoadSink_);
8809       }
8810       size += getUnknownFields().getSerializedSize();
8811       memoizedSerializedSize = size;
8812       return size;
8813     }
8814 
8815     private static final long serialVersionUID = 0L;
8816     @java.lang.Override
8817     protected java.lang.Object writeReplace()
8818         throws java.io.ObjectStreamException {
8819       return super.writeReplace();
8820     }
8821 
8822     @java.lang.Override
8823     public boolean equals(final java.lang.Object obj) {
8824       if (obj == this) {
8825        return true;
8826       }
8827       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) {
8828         return super.equals(obj);
8829       }
8830       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj;
8831 
8832       boolean result = true;
8833       result = result && (hasNumberOfRequests() == other.hasNumberOfRequests());
8834       if (hasNumberOfRequests()) {
8835         result = result && (getNumberOfRequests()
8836             == other.getNumberOfRequests());
8837       }
8838       result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests());
8839       if (hasTotalNumberOfRequests()) {
8840         result = result && (getTotalNumberOfRequests()
8841             == other.getTotalNumberOfRequests());
8842       }
8843       result = result && (hasUsedHeapMB() == other.hasUsedHeapMB());
8844       if (hasUsedHeapMB()) {
8845         result = result && (getUsedHeapMB()
8846             == other.getUsedHeapMB());
8847       }
8848       result = result && (hasMaxHeapMB() == other.hasMaxHeapMB());
8849       if (hasMaxHeapMB()) {
8850         result = result && (getMaxHeapMB()
8851             == other.getMaxHeapMB());
8852       }
8853       result = result && getRegionLoadsList()
8854           .equals(other.getRegionLoadsList());
8855       result = result && getCoprocessorsList()
8856           .equals(other.getCoprocessorsList());
8857       result = result && (hasReportStartTime() == other.hasReportStartTime());
8858       if (hasReportStartTime()) {
8859         result = result && (getReportStartTime()
8860             == other.getReportStartTime());
8861       }
8862       result = result && (hasReportEndTime() == other.hasReportEndTime());
8863       if (hasReportEndTime()) {
8864         result = result && (getReportEndTime()
8865             == other.getReportEndTime());
8866       }
8867       result = result && (hasInfoServerPort() == other.hasInfoServerPort());
8868       if (hasInfoServerPort()) {
8869         result = result && (getInfoServerPort()
8870             == other.getInfoServerPort());
8871       }
8872       result = result && getReplLoadSourceList()
8873           .equals(other.getReplLoadSourceList());
8874       result = result && (hasReplLoadSink() == other.hasReplLoadSink());
8875       if (hasReplLoadSink()) {
8876         result = result && getReplLoadSink()
8877             .equals(other.getReplLoadSink());
8878       }
8879       result = result &&
8880           getUnknownFields().equals(other.getUnknownFields());
8881       return result;
8882     }
8883 
8884     private int memoizedHashCode = 0;
8885     @java.lang.Override
8886     public int hashCode() {
8887       if (memoizedHashCode != 0) {
8888         return memoizedHashCode;
8889       }
8890       int hash = 41;
8891       hash = (19 * hash) + getDescriptorForType().hashCode();
8892       if (hasNumberOfRequests()) {
8893         hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER;
8894         hash = (53 * hash) + hashLong(getNumberOfRequests());
8895       }
8896       if (hasTotalNumberOfRequests()) {
8897         hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER;
8898         hash = (53 * hash) + hashLong(getTotalNumberOfRequests());
8899       }
8900       if (hasUsedHeapMB()) {
8901         hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER;
8902         hash = (53 * hash) + getUsedHeapMB();
8903       }
8904       if (hasMaxHeapMB()) {
8905         hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER;
8906         hash = (53 * hash) + getMaxHeapMB();
8907       }
8908       if (getRegionLoadsCount() > 0) {
8909         hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER;
8910         hash = (53 * hash) + getRegionLoadsList().hashCode();
8911       }
8912       if (getCoprocessorsCount() > 0) {
8913         hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
8914         hash = (53 * hash) + getCoprocessorsList().hashCode();
8915       }
8916       if (hasReportStartTime()) {
8917         hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER;
8918         hash = (53 * hash) + hashLong(getReportStartTime());
8919       }
8920       if (hasReportEndTime()) {
8921         hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER;
8922         hash = (53 * hash) + hashLong(getReportEndTime());
8923       }
8924       if (hasInfoServerPort()) {
8925         hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER;
8926         hash = (53 * hash) + getInfoServerPort();
8927       }
8928       if (getReplLoadSourceCount() > 0) {
8929         hash = (37 * hash) + REPLLOADSOURCE_FIELD_NUMBER;
8930         hash = (53 * hash) + getReplLoadSourceList().hashCode();
8931       }
8932       if (hasReplLoadSink()) {
8933         hash = (37 * hash) + REPLLOADSINK_FIELD_NUMBER;
8934         hash = (53 * hash) + getReplLoadSink().hashCode();
8935       }
8936       hash = (29 * hash) + getUnknownFields().hashCode();
8937       memoizedHashCode = hash;
8938       return hash;
8939     }
8940 
8941     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8942         com.google.protobuf.ByteString data)
8943         throws com.google.protobuf.InvalidProtocolBufferException {
8944       return PARSER.parseFrom(data);
8945     }
8946     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8947         com.google.protobuf.ByteString data,
8948         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8949         throws com.google.protobuf.InvalidProtocolBufferException {
8950       return PARSER.parseFrom(data, extensionRegistry);
8951     }
8952     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data)
8953         throws com.google.protobuf.InvalidProtocolBufferException {
8954       return PARSER.parseFrom(data);
8955     }
8956     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8957         byte[] data,
8958         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8959         throws com.google.protobuf.InvalidProtocolBufferException {
8960       return PARSER.parseFrom(data, extensionRegistry);
8961     }
8962     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input)
8963         throws java.io.IOException {
8964       return PARSER.parseFrom(input);
8965     }
8966     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8967         java.io.InputStream input,
8968         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8969         throws java.io.IOException {
8970       return PARSER.parseFrom(input, extensionRegistry);
8971     }
8972     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input)
8973         throws java.io.IOException {
8974       return PARSER.parseDelimitedFrom(input);
8975     }
8976     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(
8977         java.io.InputStream input,
8978         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8979         throws java.io.IOException {
8980       return PARSER.parseDelimitedFrom(input, extensionRegistry);
8981     }
8982     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8983         com.google.protobuf.CodedInputStream input)
8984         throws java.io.IOException {
8985       return PARSER.parseFrom(input);
8986     }
8987     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
8988         com.google.protobuf.CodedInputStream input,
8989         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8990         throws java.io.IOException {
8991       return PARSER.parseFrom(input, extensionRegistry);
8992     }
8993 
8994     public static Builder newBuilder() { return Builder.create(); }
8995     public Builder newBuilderForType() { return newBuilder(); }
8996     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) {
8997       return newBuilder().mergeFrom(prototype);
8998     }
8999     public Builder toBuilder() { return newBuilder(this); }
9000 
9001     @java.lang.Override
9002     protected Builder newBuilderForType(
9003         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9004       Builder builder = new Builder(parent);
9005       return builder;
9006     }
9007     /**
9008      * Protobuf type {@code hbase.pb.ServerLoad}
9009      */
9010     public static final class Builder extends
9011         com.google.protobuf.GeneratedMessage.Builder<Builder>
9012        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder {
9013       public static final com.google.protobuf.Descriptors.Descriptor
9014           getDescriptor() {
9015         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ServerLoad_descriptor;
9016       }
9017 
9018       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9019           internalGetFieldAccessorTable() {
9020         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ServerLoad_fieldAccessorTable
9021             .ensureFieldAccessorsInitialized(
9022                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
9023       }
9024 
9025       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder()
9026       private Builder() {
9027         maybeForceBuilderInitialization();
9028       }
9029 
9030       private Builder(
9031           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9032         super(parent);
9033         maybeForceBuilderInitialization();
9034       }
9035       private void maybeForceBuilderInitialization() {
9036         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9037           getRegionLoadsFieldBuilder();
9038           getCoprocessorsFieldBuilder();
9039           getReplLoadSourceFieldBuilder();
9040           getReplLoadSinkFieldBuilder();
9041         }
9042       }
9043       private static Builder create() {
9044         return new Builder();
9045       }
9046 
9047       public Builder clear() {
9048         super.clear();
9049         numberOfRequests_ = 0L;
9050         bitField0_ = (bitField0_ & ~0x00000001);
9051         totalNumberOfRequests_ = 0L;
9052         bitField0_ = (bitField0_ & ~0x00000002);
9053         usedHeapMB_ = 0;
9054         bitField0_ = (bitField0_ & ~0x00000004);
9055         maxHeapMB_ = 0;
9056         bitField0_ = (bitField0_ & ~0x00000008);
9057         if (regionLoadsBuilder_ == null) {
9058           regionLoads_ = java.util.Collections.emptyList();
9059           bitField0_ = (bitField0_ & ~0x00000010);
9060         } else {
9061           regionLoadsBuilder_.clear();
9062         }
9063         if (coprocessorsBuilder_ == null) {
9064           coprocessors_ = java.util.Collections.emptyList();
9065           bitField0_ = (bitField0_ & ~0x00000020);
9066         } else {
9067           coprocessorsBuilder_.clear();
9068         }
9069         reportStartTime_ = 0L;
9070         bitField0_ = (bitField0_ & ~0x00000040);
9071         reportEndTime_ = 0L;
9072         bitField0_ = (bitField0_ & ~0x00000080);
9073         infoServerPort_ = 0;
9074         bitField0_ = (bitField0_ & ~0x00000100);
9075         if (replLoadSourceBuilder_ == null) {
9076           replLoadSource_ = java.util.Collections.emptyList();
9077           bitField0_ = (bitField0_ & ~0x00000200);
9078         } else {
9079           replLoadSourceBuilder_.clear();
9080         }
9081         if (replLoadSinkBuilder_ == null) {
9082           replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
9083         } else {
9084           replLoadSinkBuilder_.clear();
9085         }
9086         bitField0_ = (bitField0_ & ~0x00000400);
9087         return this;
9088       }
9089 
9090       public Builder clone() {
9091         return create().mergeFrom(buildPartial());
9092       }
9093 
9094       public com.google.protobuf.Descriptors.Descriptor
9095           getDescriptorForType() {
9096         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ServerLoad_descriptor;
9097       }
9098 
9099       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() {
9100         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
9101       }
9102 
9103       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() {
9104         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial();
9105         if (!result.isInitialized()) {
9106           throw newUninitializedMessageException(result);
9107         }
9108         return result;
9109       }
9110 
9111       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() {
9112         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this);
9113         int from_bitField0_ = bitField0_;
9114         int to_bitField0_ = 0;
9115         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9116           to_bitField0_ |= 0x00000001;
9117         }
9118         result.numberOfRequests_ = numberOfRequests_;
9119         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
9120           to_bitField0_ |= 0x00000002;
9121         }
9122         result.totalNumberOfRequests_ = totalNumberOfRequests_;
9123         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
9124           to_bitField0_ |= 0x00000004;
9125         }
9126         result.usedHeapMB_ = usedHeapMB_;
9127         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
9128           to_bitField0_ |= 0x00000008;
9129         }
9130         result.maxHeapMB_ = maxHeapMB_;
9131         if (regionLoadsBuilder_ == null) {
9132           if (((bitField0_ & 0x00000010) == 0x00000010)) {
9133             regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
9134             bitField0_ = (bitField0_ & ~0x00000010);
9135           }
9136           result.regionLoads_ = regionLoads_;
9137         } else {
9138           result.regionLoads_ = regionLoadsBuilder_.build();
9139         }
9140         if (coprocessorsBuilder_ == null) {
9141           if (((bitField0_ & 0x00000020) == 0x00000020)) {
9142             coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_);
9143             bitField0_ = (bitField0_ & ~0x00000020);
9144           }
9145           result.coprocessors_ = coprocessors_;
9146         } else {
9147           result.coprocessors_ = coprocessorsBuilder_.build();
9148         }
9149         if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
9150           to_bitField0_ |= 0x00000010;
9151         }
9152         result.reportStartTime_ = reportStartTime_;
9153         if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
9154           to_bitField0_ |= 0x00000020;
9155         }
9156         result.reportEndTime_ = reportEndTime_;
9157         if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
9158           to_bitField0_ |= 0x00000040;
9159         }
9160         result.infoServerPort_ = infoServerPort_;
9161         if (replLoadSourceBuilder_ == null) {
9162           if (((bitField0_ & 0x00000200) == 0x00000200)) {
9163             replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_);
9164             bitField0_ = (bitField0_ & ~0x00000200);
9165           }
9166           result.replLoadSource_ = replLoadSource_;
9167         } else {
9168           result.replLoadSource_ = replLoadSourceBuilder_.build();
9169         }
9170         if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
9171           to_bitField0_ |= 0x00000080;
9172         }
9173         if (replLoadSinkBuilder_ == null) {
9174           result.replLoadSink_ = replLoadSink_;
9175         } else {
9176           result.replLoadSink_ = replLoadSinkBuilder_.build();
9177         }
9178         result.bitField0_ = to_bitField0_;
9179         onBuilt();
9180         return result;
9181       }
9182 
9183       public Builder mergeFrom(com.google.protobuf.Message other) {
9184         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) {
9185           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)other);
9186         } else {
9187           super.mergeFrom(other);
9188           return this;
9189         }
9190       }
9191 
9192       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other) {
9193         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this;
9194         if (other.hasNumberOfRequests()) {
9195           setNumberOfRequests(other.getNumberOfRequests());
9196         }
9197         if (other.hasTotalNumberOfRequests()) {
9198           setTotalNumberOfRequests(other.getTotalNumberOfRequests());
9199         }
9200         if (other.hasUsedHeapMB()) {
9201           setUsedHeapMB(other.getUsedHeapMB());
9202         }
9203         if (other.hasMaxHeapMB()) {
9204           setMaxHeapMB(other.getMaxHeapMB());
9205         }
9206         if (regionLoadsBuilder_ == null) {
9207           if (!other.regionLoads_.isEmpty()) {
9208             if (regionLoads_.isEmpty()) {
9209               regionLoads_ = other.regionLoads_;
9210               bitField0_ = (bitField0_ & ~0x00000010);
9211             } else {
9212               ensureRegionLoadsIsMutable();
9213               regionLoads_.addAll(other.regionLoads_);
9214             }
9215             onChanged();
9216           }
9217         } else {
9218           if (!other.regionLoads_.isEmpty()) {
9219             if (regionLoadsBuilder_.isEmpty()) {
9220               regionLoadsBuilder_.dispose();
9221               regionLoadsBuilder_ = null;
9222               regionLoads_ = other.regionLoads_;
9223               bitField0_ = (bitField0_ & ~0x00000010);
9224               regionLoadsBuilder_ = 
9225                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
9226                    getRegionLoadsFieldBuilder() : null;
9227             } else {
9228               regionLoadsBuilder_.addAllMessages(other.regionLoads_);
9229             }
9230           }
9231         }
9232         if (coprocessorsBuilder_ == null) {
9233           if (!other.coprocessors_.isEmpty()) {
9234             if (coprocessors_.isEmpty()) {
9235               coprocessors_ = other.coprocessors_;
9236               bitField0_ = (bitField0_ & ~0x00000020);
9237             } else {
9238               ensureCoprocessorsIsMutable();
9239               coprocessors_.addAll(other.coprocessors_);
9240             }
9241             onChanged();
9242           }
9243         } else {
9244           if (!other.coprocessors_.isEmpty()) {
9245             if (coprocessorsBuilder_.isEmpty()) {
9246               coprocessorsBuilder_.dispose();
9247               coprocessorsBuilder_ = null;
9248               coprocessors_ = other.coprocessors_;
9249               bitField0_ = (bitField0_ & ~0x00000020);
9250               coprocessorsBuilder_ = 
9251                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
9252                    getCoprocessorsFieldBuilder() : null;
9253             } else {
9254               coprocessorsBuilder_.addAllMessages(other.coprocessors_);
9255             }
9256           }
9257         }
9258         if (other.hasReportStartTime()) {
9259           setReportStartTime(other.getReportStartTime());
9260         }
9261         if (other.hasReportEndTime()) {
9262           setReportEndTime(other.getReportEndTime());
9263         }
9264         if (other.hasInfoServerPort()) {
9265           setInfoServerPort(other.getInfoServerPort());
9266         }
9267         if (replLoadSourceBuilder_ == null) {
9268           if (!other.replLoadSource_.isEmpty()) {
9269             if (replLoadSource_.isEmpty()) {
9270               replLoadSource_ = other.replLoadSource_;
9271               bitField0_ = (bitField0_ & ~0x00000200);
9272             } else {
9273               ensureReplLoadSourceIsMutable();
9274               replLoadSource_.addAll(other.replLoadSource_);
9275             }
9276             onChanged();
9277           }
9278         } else {
9279           if (!other.replLoadSource_.isEmpty()) {
9280             if (replLoadSourceBuilder_.isEmpty()) {
9281               replLoadSourceBuilder_.dispose();
9282               replLoadSourceBuilder_ = null;
9283               replLoadSource_ = other.replLoadSource_;
9284               bitField0_ = (bitField0_ & ~0x00000200);
9285               replLoadSourceBuilder_ = 
9286                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
9287                    getReplLoadSourceFieldBuilder() : null;
9288             } else {
9289               replLoadSourceBuilder_.addAllMessages(other.replLoadSource_);
9290             }
9291           }
9292         }
9293         if (other.hasReplLoadSink()) {
9294           mergeReplLoadSink(other.getReplLoadSink());
9295         }
9296         this.mergeUnknownFields(other.getUnknownFields());
9297         return this;
9298       }
9299 
9300       public final boolean isInitialized() {
9301         for (int i = 0; i < getRegionLoadsCount(); i++) {
9302           if (!getRegionLoads(i).isInitialized()) {
9303             
9304             return false;
9305           }
9306         }
9307         for (int i = 0; i < getCoprocessorsCount(); i++) {
9308           if (!getCoprocessors(i).isInitialized()) {
9309             
9310             return false;
9311           }
9312         }
9313         for (int i = 0; i < getReplLoadSourceCount(); i++) {
9314           if (!getReplLoadSource(i).isInitialized()) {
9315             
9316             return false;
9317           }
9318         }
9319         if (hasReplLoadSink()) {
9320           if (!getReplLoadSink().isInitialized()) {
9321             
9322             return false;
9323           }
9324         }
9325         return true;
9326       }
9327 
9328       public Builder mergeFrom(
9329           com.google.protobuf.CodedInputStream input,
9330           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9331           throws java.io.IOException {
9332         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parsedMessage = null;
9333         try {
9334           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9335         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9336           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) e.getUnfinishedMessage();
9337           throw e;
9338         } finally {
9339           if (parsedMessage != null) {
9340             mergeFrom(parsedMessage);
9341           }
9342         }
9343         return this;
9344       }
9345       private int bitField0_;
9346 
9347       // optional uint64 number_of_requests = 1;
9348       private long numberOfRequests_ ;
9349       /**
9350        * <code>optional uint64 number_of_requests = 1;</code>
9351        *
9352        * <pre>
9353        ** Number of requests since last report. 
9354        * </pre>
9355        */
9356       public boolean hasNumberOfRequests() {
9357         return ((bitField0_ & 0x00000001) == 0x00000001);
9358       }
9359       /**
9360        * <code>optional uint64 number_of_requests = 1;</code>
9361        *
9362        * <pre>
9363        ** Number of requests since last report. 
9364        * </pre>
9365        */
9366       public long getNumberOfRequests() {
9367         return numberOfRequests_;
9368       }
9369       /**
9370        * <code>optional uint64 number_of_requests = 1;</code>
9371        *
9372        * <pre>
9373        ** Number of requests since last report. 
9374        * </pre>
9375        */
9376       public Builder setNumberOfRequests(long value) {
9377         bitField0_ |= 0x00000001;
9378         numberOfRequests_ = value;
9379         onChanged();
9380         return this;
9381       }
9382       /**
9383        * <code>optional uint64 number_of_requests = 1;</code>
9384        *
9385        * <pre>
9386        ** Number of requests since last report. 
9387        * </pre>
9388        */
9389       public Builder clearNumberOfRequests() {
9390         bitField0_ = (bitField0_ & ~0x00000001);
9391         numberOfRequests_ = 0L;
9392         onChanged();
9393         return this;
9394       }
9395 
9396       // optional uint64 total_number_of_requests = 2;
9397       private long totalNumberOfRequests_ ;
9398       /**
9399        * <code>optional uint64 total_number_of_requests = 2;</code>
9400        *
9401        * <pre>
9402        ** Total Number of requests from the start of the region server. 
9403        * </pre>
9404        */
9405       public boolean hasTotalNumberOfRequests() {
9406         return ((bitField0_ & 0x00000002) == 0x00000002);
9407       }
9408       /**
9409        * <code>optional uint64 total_number_of_requests = 2;</code>
9410        *
9411        * <pre>
9412        ** Total Number of requests from the start of the region server. 
9413        * </pre>
9414        */
9415       public long getTotalNumberOfRequests() {
9416         return totalNumberOfRequests_;
9417       }
9418       /**
9419        * <code>optional uint64 total_number_of_requests = 2;</code>
9420        *
9421        * <pre>
9422        ** Total Number of requests from the start of the region server. 
9423        * </pre>
9424        */
9425       public Builder setTotalNumberOfRequests(long value) {
9426         bitField0_ |= 0x00000002;
9427         totalNumberOfRequests_ = value;
9428         onChanged();
9429         return this;
9430       }
9431       /**
9432        * <code>optional uint64 total_number_of_requests = 2;</code>
9433        *
9434        * <pre>
9435        ** Total Number of requests from the start of the region server. 
9436        * </pre>
9437        */
9438       public Builder clearTotalNumberOfRequests() {
9439         bitField0_ = (bitField0_ & ~0x00000002);
9440         totalNumberOfRequests_ = 0L;
9441         onChanged();
9442         return this;
9443       }
9444 
9445       // optional uint32 used_heap_MB = 3;
9446       private int usedHeapMB_ ;
9447       /**
9448        * <code>optional uint32 used_heap_MB = 3;</code>
9449        *
9450        * <pre>
9451        ** the amount of used heap, in MB. 
9452        * </pre>
9453        */
9454       public boolean hasUsedHeapMB() {
9455         return ((bitField0_ & 0x00000004) == 0x00000004);
9456       }
9457       /**
9458        * <code>optional uint32 used_heap_MB = 3;</code>
9459        *
9460        * <pre>
9461        ** the amount of used heap, in MB. 
9462        * </pre>
9463        */
9464       public int getUsedHeapMB() {
9465         return usedHeapMB_;
9466       }
9467       /**
9468        * <code>optional uint32 used_heap_MB = 3;</code>
9469        *
9470        * <pre>
9471        ** the amount of used heap, in MB. 
9472        * </pre>
9473        */
9474       public Builder setUsedHeapMB(int value) {
9475         bitField0_ |= 0x00000004;
9476         usedHeapMB_ = value;
9477         onChanged();
9478         return this;
9479       }
9480       /**
9481        * <code>optional uint32 used_heap_MB = 3;</code>
9482        *
9483        * <pre>
9484        ** the amount of used heap, in MB. 
9485        * </pre>
9486        */
9487       public Builder clearUsedHeapMB() {
9488         bitField0_ = (bitField0_ & ~0x00000004);
9489         usedHeapMB_ = 0;
9490         onChanged();
9491         return this;
9492       }
9493 
9494       // optional uint32 max_heap_MB = 4;
9495       private int maxHeapMB_ ;
9496       /**
9497        * <code>optional uint32 max_heap_MB = 4;</code>
9498        *
9499        * <pre>
9500        ** the maximum allowable size of the heap, in MB. 
9501        * </pre>
9502        */
9503       public boolean hasMaxHeapMB() {
9504         return ((bitField0_ & 0x00000008) == 0x00000008);
9505       }
9506       /**
9507        * <code>optional uint32 max_heap_MB = 4;</code>
9508        *
9509        * <pre>
9510        ** the maximum allowable size of the heap, in MB. 
9511        * </pre>
9512        */
9513       public int getMaxHeapMB() {
9514         return maxHeapMB_;
9515       }
9516       /**
9517        * <code>optional uint32 max_heap_MB = 4;</code>
9518        *
9519        * <pre>
9520        ** the maximum allowable size of the heap, in MB. 
9521        * </pre>
9522        */
9523       public Builder setMaxHeapMB(int value) {
9524         bitField0_ |= 0x00000008;
9525         maxHeapMB_ = value;
9526         onChanged();
9527         return this;
9528       }
9529       /**
9530        * <code>optional uint32 max_heap_MB = 4;</code>
9531        *
9532        * <pre>
9533        ** the maximum allowable size of the heap, in MB. 
9534        * </pre>
9535        */
9536       public Builder clearMaxHeapMB() {
9537         bitField0_ = (bitField0_ & ~0x00000008);
9538         maxHeapMB_ = 0;
9539         onChanged();
9540         return this;
9541       }
9542 
9543       // repeated .hbase.pb.RegionLoad region_loads = 5;
9544       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_ =
9545         java.util.Collections.emptyList();
9546       private void ensureRegionLoadsIsMutable() {
9547         if (!((bitField0_ & 0x00000010) == 0x00000010)) {
9548           regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>(regionLoads_);
9549           bitField0_ |= 0x00000010;
9550          }
9551       }
9552 
9553       private com.google.protobuf.RepeatedFieldBuilder<
9554           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_;
9555 
9556       /**
9557        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9558        *
9559        * <pre>
9560        ** Information on the load of individual regions. 
9561        * </pre>
9562        */
9563       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
9564         if (regionLoadsBuilder_ == null) {
9565           return java.util.Collections.unmodifiableList(regionLoads_);
9566         } else {
9567           return regionLoadsBuilder_.getMessageList();
9568         }
9569       }
9570       /**
9571        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9572        *
9573        * <pre>
9574        ** Information on the load of individual regions. 
9575        * </pre>
9576        */
9577       public int getRegionLoadsCount() {
9578         if (regionLoadsBuilder_ == null) {
9579           return regionLoads_.size();
9580         } else {
9581           return regionLoadsBuilder_.getCount();
9582         }
9583       }
9584       /**
9585        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9586        *
9587        * <pre>
9588        ** Information on the load of individual regions. 
9589        * </pre>
9590        */
9591       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
9592         if (regionLoadsBuilder_ == null) {
9593           return regionLoads_.get(index);
9594         } else {
9595           return regionLoadsBuilder_.getMessage(index);
9596         }
9597       }
9598       /**
9599        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9600        *
9601        * <pre>
9602        ** Information on the load of individual regions. 
9603        * </pre>
9604        */
9605       public Builder setRegionLoads(
9606           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
9607         if (regionLoadsBuilder_ == null) {
9608           if (value == null) {
9609             throw new NullPointerException();
9610           }
9611           ensureRegionLoadsIsMutable();
9612           regionLoads_.set(index, value);
9613           onChanged();
9614         } else {
9615           regionLoadsBuilder_.setMessage(index, value);
9616         }
9617         return this;
9618       }
9619       /**
9620        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9621        *
9622        * <pre>
9623        ** Information on the load of individual regions. 
9624        * </pre>
9625        */
9626       public Builder setRegionLoads(
9627           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
9628         if (regionLoadsBuilder_ == null) {
9629           ensureRegionLoadsIsMutable();
9630           regionLoads_.set(index, builderForValue.build());
9631           onChanged();
9632         } else {
9633           regionLoadsBuilder_.setMessage(index, builderForValue.build());
9634         }
9635         return this;
9636       }
9637       /**
9638        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9639        *
9640        * <pre>
9641        ** Information on the load of individual regions. 
9642        * </pre>
9643        */
9644       public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
9645         if (regionLoadsBuilder_ == null) {
9646           if (value == null) {
9647             throw new NullPointerException();
9648           }
9649           ensureRegionLoadsIsMutable();
9650           regionLoads_.add(value);
9651           onChanged();
9652         } else {
9653           regionLoadsBuilder_.addMessage(value);
9654         }
9655         return this;
9656       }
9657       /**
9658        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9659        *
9660        * <pre>
9661        ** Information on the load of individual regions. 
9662        * </pre>
9663        */
9664       public Builder addRegionLoads(
9665           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
9666         if (regionLoadsBuilder_ == null) {
9667           if (value == null) {
9668             throw new NullPointerException();
9669           }
9670           ensureRegionLoadsIsMutable();
9671           regionLoads_.add(index, value);
9672           onChanged();
9673         } else {
9674           regionLoadsBuilder_.addMessage(index, value);
9675         }
9676         return this;
9677       }
9678       /**
9679        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9680        *
9681        * <pre>
9682        ** Information on the load of individual regions. 
9683        * </pre>
9684        */
9685       public Builder addRegionLoads(
9686           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
9687         if (regionLoadsBuilder_ == null) {
9688           ensureRegionLoadsIsMutable();
9689           regionLoads_.add(builderForValue.build());
9690           onChanged();
9691         } else {
9692           regionLoadsBuilder_.addMessage(builderForValue.build());
9693         }
9694         return this;
9695       }
9696       /**
9697        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9698        *
9699        * <pre>
9700        ** Information on the load of individual regions. 
9701        * </pre>
9702        */
9703       public Builder addRegionLoads(
9704           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
9705         if (regionLoadsBuilder_ == null) {
9706           ensureRegionLoadsIsMutable();
9707           regionLoads_.add(index, builderForValue.build());
9708           onChanged();
9709         } else {
9710           regionLoadsBuilder_.addMessage(index, builderForValue.build());
9711         }
9712         return this;
9713       }
9714       /**
9715        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9716        *
9717        * <pre>
9718        ** Information on the load of individual regions. 
9719        * </pre>
9720        */
9721       public Builder addAllRegionLoads(
9722           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> values) {
9723         if (regionLoadsBuilder_ == null) {
9724           ensureRegionLoadsIsMutable();
9725           super.addAll(values, regionLoads_);
9726           onChanged();
9727         } else {
9728           regionLoadsBuilder_.addAllMessages(values);
9729         }
9730         return this;
9731       }
9732       /**
9733        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9734        *
9735        * <pre>
9736        ** Information on the load of individual regions. 
9737        * </pre>
9738        */
9739       public Builder clearRegionLoads() {
9740         if (regionLoadsBuilder_ == null) {
9741           regionLoads_ = java.util.Collections.emptyList();
9742           bitField0_ = (bitField0_ & ~0x00000010);
9743           onChanged();
9744         } else {
9745           regionLoadsBuilder_.clear();
9746         }
9747         return this;
9748       }
9749       /**
9750        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9751        *
9752        * <pre>
9753        ** Information on the load of individual regions. 
9754        * </pre>
9755        */
9756       public Builder removeRegionLoads(int index) {
9757         if (regionLoadsBuilder_ == null) {
9758           ensureRegionLoadsIsMutable();
9759           regionLoads_.remove(index);
9760           onChanged();
9761         } else {
9762           regionLoadsBuilder_.remove(index);
9763         }
9764         return this;
9765       }
9766       /**
9767        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9768        *
9769        * <pre>
9770        ** Information on the load of individual regions. 
9771        * </pre>
9772        */
9773       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder(
9774           int index) {
9775         return getRegionLoadsFieldBuilder().getBuilder(index);
9776       }
9777       /**
9778        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9779        *
9780        * <pre>
9781        ** Information on the load of individual regions. 
9782        * </pre>
9783        */
9784       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
9785           int index) {
9786         if (regionLoadsBuilder_ == null) {
9787           return regionLoads_.get(index);  } else {
9788           return regionLoadsBuilder_.getMessageOrBuilder(index);
9789         }
9790       }
9791       /**
9792        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9793        *
9794        * <pre>
9795        ** Information on the load of individual regions. 
9796        * </pre>
9797        */
9798       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
9799            getRegionLoadsOrBuilderList() {
9800         if (regionLoadsBuilder_ != null) {
9801           return regionLoadsBuilder_.getMessageOrBuilderList();
9802         } else {
9803           return java.util.Collections.unmodifiableList(regionLoads_);
9804         }
9805       }
9806       /**
9807        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9808        *
9809        * <pre>
9810        ** Information on the load of individual regions. 
9811        * </pre>
9812        */
9813       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() {
9814         return getRegionLoadsFieldBuilder().addBuilder(
9815             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
9816       }
9817       /**
9818        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9819        *
9820        * <pre>
9821        ** Information on the load of individual regions. 
9822        * </pre>
9823        */
9824       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder(
9825           int index) {
9826         return getRegionLoadsFieldBuilder().addBuilder(
9827             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
9828       }
9829       /**
9830        * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
9831        *
9832        * <pre>
9833        ** Information on the load of individual regions. 
9834        * </pre>
9835        */
9836       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder> 
9837            getRegionLoadsBuilderList() {
9838         return getRegionLoadsFieldBuilder().getBuilderList();
9839       }
9840       private com.google.protobuf.RepeatedFieldBuilder<
9841           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
9842           getRegionLoadsFieldBuilder() {
9843         if (regionLoadsBuilder_ == null) {
9844           regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
9845               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>(
9846                   regionLoads_,
9847                   ((bitField0_ & 0x00000010) == 0x00000010),
9848                   getParentForChildren(),
9849                   isClean());
9850           regionLoads_ = null;
9851         }
9852         return regionLoadsBuilder_;
9853       }
9854 
9855       // repeated .hbase.pb.Coprocessor coprocessors = 6;
9856       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> coprocessors_ =
9857         java.util.Collections.emptyList();
9858       private void ensureCoprocessorsIsMutable() {
9859         if (!((bitField0_ & 0x00000020) == 0x00000020)) {
9860           coprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>(coprocessors_);
9861           bitField0_ |= 0x00000020;
9862          }
9863       }
9864 
9865       private com.google.protobuf.RepeatedFieldBuilder<
9866           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_;
9867 
9868       /**
9869        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9870        *
9871        * <pre>
9872        **
9873        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9874        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9875        * objects.
9876        * </pre>
9877        */
9878       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getCoprocessorsList() {
9879         if (coprocessorsBuilder_ == null) {
9880           return java.util.Collections.unmodifiableList(coprocessors_);
9881         } else {
9882           return coprocessorsBuilder_.getMessageList();
9883         }
9884       }
9885       /**
9886        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9887        *
9888        * <pre>
9889        **
9890        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9891        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9892        * objects.
9893        * </pre>
9894        */
9895       public int getCoprocessorsCount() {
9896         if (coprocessorsBuilder_ == null) {
9897           return coprocessors_.size();
9898         } else {
9899           return coprocessorsBuilder_.getCount();
9900         }
9901       }
9902       /**
9903        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9904        *
9905        * <pre>
9906        **
9907        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9908        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9909        * objects.
9910        * </pre>
9911        */
9912       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) {
9913         if (coprocessorsBuilder_ == null) {
9914           return coprocessors_.get(index);
9915         } else {
9916           return coprocessorsBuilder_.getMessage(index);
9917         }
9918       }
9919       /**
9920        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9921        *
9922        * <pre>
9923        **
9924        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9925        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9926        * objects.
9927        * </pre>
9928        */
9929       public Builder setCoprocessors(
9930           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
9931         if (coprocessorsBuilder_ == null) {
9932           if (value == null) {
9933             throw new NullPointerException();
9934           }
9935           ensureCoprocessorsIsMutable();
9936           coprocessors_.set(index, value);
9937           onChanged();
9938         } else {
9939           coprocessorsBuilder_.setMessage(index, value);
9940         }
9941         return this;
9942       }
9943       /**
9944        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9945        *
9946        * <pre>
9947        **
9948        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9949        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9950        * objects.
9951        * </pre>
9952        */
9953       public Builder setCoprocessors(
9954           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
9955         if (coprocessorsBuilder_ == null) {
9956           ensureCoprocessorsIsMutable();
9957           coprocessors_.set(index, builderForValue.build());
9958           onChanged();
9959         } else {
9960           coprocessorsBuilder_.setMessage(index, builderForValue.build());
9961         }
9962         return this;
9963       }
9964       /**
9965        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9966        *
9967        * <pre>
9968        **
9969        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9970        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9971        * objects.
9972        * </pre>
9973        */
9974       public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
9975         if (coprocessorsBuilder_ == null) {
9976           if (value == null) {
9977             throw new NullPointerException();
9978           }
9979           ensureCoprocessorsIsMutable();
9980           coprocessors_.add(value);
9981           onChanged();
9982         } else {
9983           coprocessorsBuilder_.addMessage(value);
9984         }
9985         return this;
9986       }
9987       /**
9988        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
9989        *
9990        * <pre>
9991        **
9992        * Regionserver-level coprocessors, e.g., WALObserver implementations.
9993        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
9994        * objects.
9995        * </pre>
9996        */
9997       public Builder addCoprocessors(
9998           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
9999         if (coprocessorsBuilder_ == null) {
10000           if (value == null) {
10001             throw new NullPointerException();
10002           }
10003           ensureCoprocessorsIsMutable();
10004           coprocessors_.add(index, value);
10005           onChanged();
10006         } else {
10007           coprocessorsBuilder_.addMessage(index, value);
10008         }
10009         return this;
10010       }
10011       /**
10012        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10013        *
10014        * <pre>
10015        **
10016        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10017        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10018        * objects.
10019        * </pre>
10020        */
10021       public Builder addCoprocessors(
10022           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
10023         if (coprocessorsBuilder_ == null) {
10024           ensureCoprocessorsIsMutable();
10025           coprocessors_.add(builderForValue.build());
10026           onChanged();
10027         } else {
10028           coprocessorsBuilder_.addMessage(builderForValue.build());
10029         }
10030         return this;
10031       }
10032       /**
10033        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10034        *
10035        * <pre>
10036        **
10037        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10038        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10039        * objects.
10040        * </pre>
10041        */
10042       public Builder addCoprocessors(
10043           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
10044         if (coprocessorsBuilder_ == null) {
10045           ensureCoprocessorsIsMutable();
10046           coprocessors_.add(index, builderForValue.build());
10047           onChanged();
10048         } else {
10049           coprocessorsBuilder_.addMessage(index, builderForValue.build());
10050         }
10051         return this;
10052       }
10053       /**
10054        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10055        *
10056        * <pre>
10057        **
10058        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10059        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10060        * objects.
10061        * </pre>
10062        */
10063       public Builder addAllCoprocessors(
10064           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> values) {
10065         if (coprocessorsBuilder_ == null) {
10066           ensureCoprocessorsIsMutable();
10067           super.addAll(values, coprocessors_);
10068           onChanged();
10069         } else {
10070           coprocessorsBuilder_.addAllMessages(values);
10071         }
10072         return this;
10073       }
10074       /**
10075        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10076        *
10077        * <pre>
10078        **
10079        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10080        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10081        * objects.
10082        * </pre>
10083        */
10084       public Builder clearCoprocessors() {
10085         if (coprocessorsBuilder_ == null) {
10086           coprocessors_ = java.util.Collections.emptyList();
10087           bitField0_ = (bitField0_ & ~0x00000020);
10088           onChanged();
10089         } else {
10090           coprocessorsBuilder_.clear();
10091         }
10092         return this;
10093       }
10094       /**
10095        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10096        *
10097        * <pre>
10098        **
10099        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10100        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10101        * objects.
10102        * </pre>
10103        */
10104       public Builder removeCoprocessors(int index) {
10105         if (coprocessorsBuilder_ == null) {
10106           ensureCoprocessorsIsMutable();
10107           coprocessors_.remove(index);
10108           onChanged();
10109         } else {
10110           coprocessorsBuilder_.remove(index);
10111         }
10112         return this;
10113       }
10114       /**
10115        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10116        *
10117        * <pre>
10118        **
10119        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10120        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10121        * objects.
10122        * </pre>
10123        */
10124       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder(
10125           int index) {
10126         return getCoprocessorsFieldBuilder().getBuilder(index);
10127       }
10128       /**
10129        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10130        *
10131        * <pre>
10132        **
10133        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10134        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10135        * objects.
10136        * </pre>
10137        */
10138       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
10139           int index) {
10140         if (coprocessorsBuilder_ == null) {
10141           return coprocessors_.get(index);  } else {
10142           return coprocessorsBuilder_.getMessageOrBuilder(index);
10143         }
10144       }
10145       /**
10146        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10147        *
10148        * <pre>
10149        **
10150        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10151        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10152        * objects.
10153        * </pre>
10154        */
10155       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
10156            getCoprocessorsOrBuilderList() {
10157         if (coprocessorsBuilder_ != null) {
10158           return coprocessorsBuilder_.getMessageOrBuilderList();
10159         } else {
10160           return java.util.Collections.unmodifiableList(coprocessors_);
10161         }
10162       }
10163       /**
10164        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10165        *
10166        * <pre>
10167        **
10168        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10169        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10170        * objects.
10171        * </pre>
10172        */
10173       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() {
10174         return getCoprocessorsFieldBuilder().addBuilder(
10175             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
10176       }
10177       /**
10178        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10179        *
10180        * <pre>
10181        **
10182        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10183        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10184        * objects.
10185        * </pre>
10186        */
10187       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder(
10188           int index) {
10189         return getCoprocessorsFieldBuilder().addBuilder(
10190             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
10191       }
10192       /**
10193        * <code>repeated .hbase.pb.Coprocessor coprocessors = 6;</code>
10194        *
10195        * <pre>
10196        **
10197        * Regionserver-level coprocessors, e.g., WALObserver implementations.
10198        * Region-level coprocessors, on the other hand, are stored inside RegionLoad
10199        * objects.
10200        * </pre>
10201        */
10202       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder> 
10203            getCoprocessorsBuilderList() {
10204         return getCoprocessorsFieldBuilder().getBuilderList();
10205       }
10206       private com.google.protobuf.RepeatedFieldBuilder<
10207           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
10208           getCoprocessorsFieldBuilder() {
10209         if (coprocessorsBuilder_ == null) {
10210           coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
10211               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>(
10212                   coprocessors_,
10213                   ((bitField0_ & 0x00000020) == 0x00000020),
10214                   getParentForChildren(),
10215                   isClean());
10216           coprocessors_ = null;
10217         }
10218         return coprocessorsBuilder_;
10219       }
10220 
10221       // optional uint64 report_start_time = 7;
10222       private long reportStartTime_ ;
10223       /**
10224        * <code>optional uint64 report_start_time = 7;</code>
10225        *
10226        * <pre>
10227        **
10228        * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
10229        * time is measured as the difference, measured in milliseconds, between the current time
10230        * and midnight, January 1, 1970 UTC.
10231        * </pre>
10232        */
10233       public boolean hasReportStartTime() {
10234         return ((bitField0_ & 0x00000040) == 0x00000040);
10235       }
10236       /**
10237        * <code>optional uint64 report_start_time = 7;</code>
10238        *
10239        * <pre>
10240        **
10241        * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
10242        * time is measured as the difference, measured in milliseconds, between the current time
10243        * and midnight, January 1, 1970 UTC.
10244        * </pre>
10245        */
10246       public long getReportStartTime() {
10247         return reportStartTime_;
10248       }
10249       /**
10250        * <code>optional uint64 report_start_time = 7;</code>
10251        *
10252        * <pre>
10253        **
10254        * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
10255        * time is measured as the difference, measured in milliseconds, between the current time
10256        * and midnight, January 1, 1970 UTC.
10257        * </pre>
10258        */
10259       public Builder setReportStartTime(long value) {
10260         bitField0_ |= 0x00000040;
10261         reportStartTime_ = value;
10262         onChanged();
10263         return this;
10264       }
10265       /**
10266        * <code>optional uint64 report_start_time = 7;</code>
10267        *
10268        * <pre>
10269        **
10270        * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
10271        * time is measured as the difference, measured in milliseconds, between the current time
10272        * and midnight, January 1, 1970 UTC.
10273        * </pre>
10274        */
10275       public Builder clearReportStartTime() {
10276         bitField0_ = (bitField0_ & ~0x00000040);
10277         reportStartTime_ = 0L;
10278         onChanged();
10279         return this;
10280       }
10281 
10282       // optional uint64 report_end_time = 8;
10283       private long reportEndTime_ ;
10284       /**
10285        * <code>optional uint64 report_end_time = 8;</code>
10286        *
10287        * <pre>
10288        **
10289        * Time when report was generated.
10290        * time is measured as the difference, measured in milliseconds, between the current time
10291        * and midnight, January 1, 1970 UTC.
10292        * </pre>
10293        */
10294       public boolean hasReportEndTime() {
10295         return ((bitField0_ & 0x00000080) == 0x00000080);
10296       }
10297       /**
10298        * <code>optional uint64 report_end_time = 8;</code>
10299        *
10300        * <pre>
10301        **
10302        * Time when report was generated.
10303        * time is measured as the difference, measured in milliseconds, between the current time
10304        * and midnight, January 1, 1970 UTC.
10305        * </pre>
10306        */
10307       public long getReportEndTime() {
10308         return reportEndTime_;
10309       }
10310       /**
10311        * <code>optional uint64 report_end_time = 8;</code>
10312        *
10313        * <pre>
10314        **
10315        * Time when report was generated.
10316        * time is measured as the difference, measured in milliseconds, between the current time
10317        * and midnight, January 1, 1970 UTC.
10318        * </pre>
10319        */
10320       public Builder setReportEndTime(long value) {
10321         bitField0_ |= 0x00000080;
10322         reportEndTime_ = value;
10323         onChanged();
10324         return this;
10325       }
10326       /**
10327        * <code>optional uint64 report_end_time = 8;</code>
10328        *
10329        * <pre>
10330        **
10331        * Time when report was generated.
10332        * time is measured as the difference, measured in milliseconds, between the current time
10333        * and midnight, January 1, 1970 UTC.
10334        * </pre>
10335        */
10336       public Builder clearReportEndTime() {
10337         bitField0_ = (bitField0_ & ~0x00000080);
10338         reportEndTime_ = 0L;
10339         onChanged();
10340         return this;
10341       }
10342 
10343       // optional uint32 info_server_port = 9;
10344       private int infoServerPort_ ;
10345       /**
10346        * <code>optional uint32 info_server_port = 9;</code>
10347        *
10348        * <pre>
10349        **
10350        * The port number that this region server is hosing an info server on.
10351        * </pre>
10352        */
10353       public boolean hasInfoServerPort() {
10354         return ((bitField0_ & 0x00000100) == 0x00000100);
10355       }
10356       /**
10357        * <code>optional uint32 info_server_port = 9;</code>
10358        *
10359        * <pre>
10360        **
10361        * The port number that this region server is hosing an info server on.
10362        * </pre>
10363        */
10364       public int getInfoServerPort() {
10365         return infoServerPort_;
10366       }
10367       /**
10368        * <code>optional uint32 info_server_port = 9;</code>
10369        *
10370        * <pre>
10371        **
10372        * The port number that this region server is hosing an info server on.
10373        * </pre>
10374        */
10375       public Builder setInfoServerPort(int value) {
10376         bitField0_ |= 0x00000100;
10377         infoServerPort_ = value;
10378         onChanged();
10379         return this;
10380       }
10381       /**
10382        * <code>optional uint32 info_server_port = 9;</code>
10383        *
10384        * <pre>
10385        **
10386        * The port number that this region server is hosing an info server on.
10387        * </pre>
10388        */
10389       public Builder clearInfoServerPort() {
10390         bitField0_ = (bitField0_ & ~0x00000100);
10391         infoServerPort_ = 0;
10392         onChanged();
10393         return this;
10394       }
10395 
10396       // repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;
10397       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> replLoadSource_ =
10398         java.util.Collections.emptyList();
10399       private void ensureReplLoadSourceIsMutable() {
10400         if (!((bitField0_ & 0x00000200) == 0x00000200)) {
10401           replLoadSource_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource>(replLoadSource_);
10402           bitField0_ |= 0x00000200;
10403          }
10404       }
10405 
10406       private com.google.protobuf.RepeatedFieldBuilder<
10407           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> replLoadSourceBuilder_;
10408 
10409       /**
10410        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10411        *
10412        * <pre>
10413        **
10414        * The replicationLoadSource for the replication Source status of this region server.
10415        * </pre>
10416        */
10417       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> getReplLoadSourceList() {
10418         if (replLoadSourceBuilder_ == null) {
10419           return java.util.Collections.unmodifiableList(replLoadSource_);
10420         } else {
10421           return replLoadSourceBuilder_.getMessageList();
10422         }
10423       }
10424       /**
10425        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10426        *
10427        * <pre>
10428        **
10429        * The replicationLoadSource for the replication Source status of this region server.
10430        * </pre>
10431        */
10432       public int getReplLoadSourceCount() {
10433         if (replLoadSourceBuilder_ == null) {
10434           return replLoadSource_.size();
10435         } else {
10436           return replLoadSourceBuilder_.getCount();
10437         }
10438       }
10439       /**
10440        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10441        *
10442        * <pre>
10443        **
10444        * The replicationLoadSource for the replication Source status of this region server.
10445        * </pre>
10446        */
10447       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) {
10448         if (replLoadSourceBuilder_ == null) {
10449           return replLoadSource_.get(index);
10450         } else {
10451           return replLoadSourceBuilder_.getMessage(index);
10452         }
10453       }
10454       /**
10455        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10456        *
10457        * <pre>
10458        **
10459        * The replicationLoadSource for the replication Source status of this region server.
10460        * </pre>
10461        */
10462       public Builder setReplLoadSource(
10463           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
10464         if (replLoadSourceBuilder_ == null) {
10465           if (value == null) {
10466             throw new NullPointerException();
10467           }
10468           ensureReplLoadSourceIsMutable();
10469           replLoadSource_.set(index, value);
10470           onChanged();
10471         } else {
10472           replLoadSourceBuilder_.setMessage(index, value);
10473         }
10474         return this;
10475       }
10476       /**
10477        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10478        *
10479        * <pre>
10480        **
10481        * The replicationLoadSource for the replication Source status of this region server.
10482        * </pre>
10483        */
10484       public Builder setReplLoadSource(
10485           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
10486         if (replLoadSourceBuilder_ == null) {
10487           ensureReplLoadSourceIsMutable();
10488           replLoadSource_.set(index, builderForValue.build());
10489           onChanged();
10490         } else {
10491           replLoadSourceBuilder_.setMessage(index, builderForValue.build());
10492         }
10493         return this;
10494       }
10495       /**
10496        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10497        *
10498        * <pre>
10499        **
10500        * The replicationLoadSource for the replication Source status of this region server.
10501        * </pre>
10502        */
10503       public Builder addReplLoadSource(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
10504         if (replLoadSourceBuilder_ == null) {
10505           if (value == null) {
10506             throw new NullPointerException();
10507           }
10508           ensureReplLoadSourceIsMutable();
10509           replLoadSource_.add(value);
10510           onChanged();
10511         } else {
10512           replLoadSourceBuilder_.addMessage(value);
10513         }
10514         return this;
10515       }
10516       /**
10517        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10518        *
10519        * <pre>
10520        **
10521        * The replicationLoadSource for the replication Source status of this region server.
10522        * </pre>
10523        */
10524       public Builder addReplLoadSource(
10525           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
10526         if (replLoadSourceBuilder_ == null) {
10527           if (value == null) {
10528             throw new NullPointerException();
10529           }
10530           ensureReplLoadSourceIsMutable();
10531           replLoadSource_.add(index, value);
10532           onChanged();
10533         } else {
10534           replLoadSourceBuilder_.addMessage(index, value);
10535         }
10536         return this;
10537       }
10538       /**
10539        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10540        *
10541        * <pre>
10542        **
10543        * The replicationLoadSource for the replication Source status of this region server.
10544        * </pre>
10545        */
10546       public Builder addReplLoadSource(
10547           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
10548         if (replLoadSourceBuilder_ == null) {
10549           ensureReplLoadSourceIsMutable();
10550           replLoadSource_.add(builderForValue.build());
10551           onChanged();
10552         } else {
10553           replLoadSourceBuilder_.addMessage(builderForValue.build());
10554         }
10555         return this;
10556       }
10557       /**
10558        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10559        *
10560        * <pre>
10561        **
10562        * The replicationLoadSource for the replication Source status of this region server.
10563        * </pre>
10564        */
10565       public Builder addReplLoadSource(
10566           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
10567         if (replLoadSourceBuilder_ == null) {
10568           ensureReplLoadSourceIsMutable();
10569           replLoadSource_.add(index, builderForValue.build());
10570           onChanged();
10571         } else {
10572           replLoadSourceBuilder_.addMessage(index, builderForValue.build());
10573         }
10574         return this;
10575       }
10576       /**
10577        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10578        *
10579        * <pre>
10580        **
10581        * The replicationLoadSource for the replication Source status of this region server.
10582        * </pre>
10583        */
10584       public Builder addAllReplLoadSource(
10585           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> values) {
10586         if (replLoadSourceBuilder_ == null) {
10587           ensureReplLoadSourceIsMutable();
10588           super.addAll(values, replLoadSource_);
10589           onChanged();
10590         } else {
10591           replLoadSourceBuilder_.addAllMessages(values);
10592         }
10593         return this;
10594       }
10595       /**
10596        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10597        *
10598        * <pre>
10599        **
10600        * The replicationLoadSource for the replication Source status of this region server.
10601        * </pre>
10602        */
10603       public Builder clearReplLoadSource() {
10604         if (replLoadSourceBuilder_ == null) {
10605           replLoadSource_ = java.util.Collections.emptyList();
10606           bitField0_ = (bitField0_ & ~0x00000200);
10607           onChanged();
10608         } else {
10609           replLoadSourceBuilder_.clear();
10610         }
10611         return this;
10612       }
10613       /**
10614        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10615        *
10616        * <pre>
10617        **
10618        * The replicationLoadSource for the replication Source status of this region server.
10619        * </pre>
10620        */
10621       public Builder removeReplLoadSource(int index) {
10622         if (replLoadSourceBuilder_ == null) {
10623           ensureReplLoadSourceIsMutable();
10624           replLoadSource_.remove(index);
10625           onChanged();
10626         } else {
10627           replLoadSourceBuilder_.remove(index);
10628         }
10629         return this;
10630       }
10631       /**
10632        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10633        *
10634        * <pre>
10635        **
10636        * The replicationLoadSource for the replication Source status of this region server.
10637        * </pre>
10638        */
10639       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder getReplLoadSourceBuilder(
10640           int index) {
10641         return getReplLoadSourceFieldBuilder().getBuilder(index);
10642       }
10643       /**
10644        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10645        *
10646        * <pre>
10647        **
10648        * The replicationLoadSource for the replication Source status of this region server.
10649        * </pre>
10650        */
10651       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
10652           int index) {
10653         if (replLoadSourceBuilder_ == null) {
10654           return replLoadSource_.get(index);  } else {
10655           return replLoadSourceBuilder_.getMessageOrBuilder(index);
10656         }
10657       }
10658       /**
10659        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10660        *
10661        * <pre>
10662        **
10663        * The replicationLoadSource for the replication Source status of this region server.
10664        * </pre>
10665        */
10666       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> 
10667            getReplLoadSourceOrBuilderList() {
10668         if (replLoadSourceBuilder_ != null) {
10669           return replLoadSourceBuilder_.getMessageOrBuilderList();
10670         } else {
10671           return java.util.Collections.unmodifiableList(replLoadSource_);
10672         }
10673       }
10674       /**
10675        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10676        *
10677        * <pre>
10678        **
10679        * The replicationLoadSource for the replication Source status of this region server.
10680        * </pre>
10681        */
10682       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder() {
10683         return getReplLoadSourceFieldBuilder().addBuilder(
10684             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance());
10685       }
10686       /**
10687        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10688        *
10689        * <pre>
10690        **
10691        * The replicationLoadSource for the replication Source status of this region server.
10692        * </pre>
10693        */
10694       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder(
10695           int index) {
10696         return getReplLoadSourceFieldBuilder().addBuilder(
10697             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance());
10698       }
10699       /**
10700        * <code>repeated .hbase.pb.ReplicationLoadSource replLoadSource = 10;</code>
10701        *
10702        * <pre>
10703        **
10704        * The replicationLoadSource for the replication Source status of this region server.
10705        * </pre>
10706        */
10707       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder> 
10708            getReplLoadSourceBuilderList() {
10709         return getReplLoadSourceFieldBuilder().getBuilderList();
10710       }
10711       private com.google.protobuf.RepeatedFieldBuilder<
10712           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> 
10713           getReplLoadSourceFieldBuilder() {
10714         if (replLoadSourceBuilder_ == null) {
10715           replLoadSourceBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
10716               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>(
10717                   replLoadSource_,
10718                   ((bitField0_ & 0x00000200) == 0x00000200),
10719                   getParentForChildren(),
10720                   isClean());
10721           replLoadSource_ = null;
10722         }
10723         return replLoadSourceBuilder_;
10724       }
10725 
10726       // optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;
10727       private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
10728       private com.google.protobuf.SingleFieldBuilder<
10729           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> replLoadSinkBuilder_;
10730       /**
10731        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10732        *
10733        * <pre>
10734        **
10735        * The replicationLoadSink for the replication Sink status of this region server.
10736        * </pre>
10737        */
10738       public boolean hasReplLoadSink() {
10739         return ((bitField0_ & 0x00000400) == 0x00000400);
10740       }
10741       /**
10742        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10743        *
10744        * <pre>
10745        **
10746        * The replicationLoadSink for the replication Sink status of this region server.
10747        * </pre>
10748        */
10749       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() {
10750         if (replLoadSinkBuilder_ == null) {
10751           return replLoadSink_;
10752         } else {
10753           return replLoadSinkBuilder_.getMessage();
10754         }
10755       }
10756       /**
10757        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10758        *
10759        * <pre>
10760        **
10761        * The replicationLoadSink for the replication Sink status of this region server.
10762        * </pre>
10763        */
10764       public Builder setReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) {
10765         if (replLoadSinkBuilder_ == null) {
10766           if (value == null) {
10767             throw new NullPointerException();
10768           }
10769           replLoadSink_ = value;
10770           onChanged();
10771         } else {
10772           replLoadSinkBuilder_.setMessage(value);
10773         }
10774         bitField0_ |= 0x00000400;
10775         return this;
10776       }
10777       /**
10778        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10779        *
10780        * <pre>
10781        **
10782        * The replicationLoadSink for the replication Sink status of this region server.
10783        * </pre>
10784        */
10785       public Builder setReplLoadSink(
10786           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder builderForValue) {
10787         if (replLoadSinkBuilder_ == null) {
10788           replLoadSink_ = builderForValue.build();
10789           onChanged();
10790         } else {
10791           replLoadSinkBuilder_.setMessage(builderForValue.build());
10792         }
10793         bitField0_ |= 0x00000400;
10794         return this;
10795       }
10796       /**
10797        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10798        *
10799        * <pre>
10800        **
10801        * The replicationLoadSink for the replication Sink status of this region server.
10802        * </pre>
10803        */
10804       public Builder mergeReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) {
10805         if (replLoadSinkBuilder_ == null) {
10806           if (((bitField0_ & 0x00000400) == 0x00000400) &&
10807               replLoadSink_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) {
10808             replLoadSink_ =
10809               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder(replLoadSink_).mergeFrom(value).buildPartial();
10810           } else {
10811             replLoadSink_ = value;
10812           }
10813           onChanged();
10814         } else {
10815           replLoadSinkBuilder_.mergeFrom(value);
10816         }
10817         bitField0_ |= 0x00000400;
10818         return this;
10819       }
10820       /**
10821        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10822        *
10823        * <pre>
10824        **
10825        * The replicationLoadSink for the replication Sink status of this region server.
10826        * </pre>
10827        */
10828       public Builder clearReplLoadSink() {
10829         if (replLoadSinkBuilder_ == null) {
10830           replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
10831           onChanged();
10832         } else {
10833           replLoadSinkBuilder_.clear();
10834         }
10835         bitField0_ = (bitField0_ & ~0x00000400);
10836         return this;
10837       }
10838       /**
10839        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10840        *
10841        * <pre>
10842        **
10843        * The replicationLoadSink for the replication Sink status of this region server.
10844        * </pre>
10845        */
10846       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder getReplLoadSinkBuilder() {
10847         bitField0_ |= 0x00000400;
10848         onChanged();
10849         return getReplLoadSinkFieldBuilder().getBuilder();
10850       }
10851       /**
10852        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10853        *
10854        * <pre>
10855        **
10856        * The replicationLoadSink for the replication Sink status of this region server.
10857        * </pre>
10858        */
10859       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() {
10860         if (replLoadSinkBuilder_ != null) {
10861           return replLoadSinkBuilder_.getMessageOrBuilder();
10862         } else {
10863           return replLoadSink_;
10864         }
10865       }
10866       /**
10867        * <code>optional .hbase.pb.ReplicationLoadSink replLoadSink = 11;</code>
10868        *
10869        * <pre>
10870        **
10871        * The replicationLoadSink for the replication Sink status of this region server.
10872        * </pre>
10873        */
10874       private com.google.protobuf.SingleFieldBuilder<
10875           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> 
10876           getReplLoadSinkFieldBuilder() {
10877         if (replLoadSinkBuilder_ == null) {
10878           replLoadSinkBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10879               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>(
10880                   replLoadSink_,
10881                   getParentForChildren(),
10882                   isClean());
10883           replLoadSink_ = null;
10884         }
10885         return replLoadSinkBuilder_;
10886       }
10887 
10888       // @@protoc_insertion_point(builder_scope:hbase.pb.ServerLoad)
10889     }
10890 
10891     static {
10892       defaultInstance = new ServerLoad(true);
10893       defaultInstance.initFields();
10894     }
10895 
10896     // @@protoc_insertion_point(class_scope:hbase.pb.ServerLoad)
10897   }
10898 
10899   public interface LiveServerInfoOrBuilder
10900       extends com.google.protobuf.MessageOrBuilder {
10901 
10902     // required .hbase.pb.ServerName server = 1;
10903     /**
10904      * <code>required .hbase.pb.ServerName server = 1;</code>
10905      */
10906     boolean hasServer();
10907     /**
10908      * <code>required .hbase.pb.ServerName server = 1;</code>
10909      */
10910     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
10911     /**
10912      * <code>required .hbase.pb.ServerName server = 1;</code>
10913      */
10914     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
10915 
10916     // required .hbase.pb.ServerLoad server_load = 2;
10917     /**
10918      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
10919      */
10920     boolean hasServerLoad();
10921     /**
10922      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
10923      */
10924     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad();
10925     /**
10926      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
10927      */
10928     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder();
10929   }
10930   /**
10931    * Protobuf type {@code hbase.pb.LiveServerInfo}
10932    */
10933   public static final class LiveServerInfo extends
10934       com.google.protobuf.GeneratedMessage
10935       implements LiveServerInfoOrBuilder {
10936     // Use LiveServerInfo.newBuilder() to construct.
10937     private LiveServerInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10938       super(builder);
10939       this.unknownFields = builder.getUnknownFields();
10940     }
10941     private LiveServerInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10942 
10943     private static final LiveServerInfo defaultInstance;
10944     public static LiveServerInfo getDefaultInstance() {
10945       return defaultInstance;
10946     }
10947 
10948     public LiveServerInfo getDefaultInstanceForType() {
10949       return defaultInstance;
10950     }
10951 
10952     private final com.google.protobuf.UnknownFieldSet unknownFields;
10953     @java.lang.Override
10954     public final com.google.protobuf.UnknownFieldSet
10955         getUnknownFields() {
10956       return this.unknownFields;
10957     }
10958     private LiveServerInfo(
10959         com.google.protobuf.CodedInputStream input,
10960         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10961         throws com.google.protobuf.InvalidProtocolBufferException {
10962       initFields();
10963       int mutable_bitField0_ = 0;
10964       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10965           com.google.protobuf.UnknownFieldSet.newBuilder();
10966       try {
10967         boolean done = false;
10968         while (!done) {
10969           int tag = input.readTag();
10970           switch (tag) {
10971             case 0:
10972               done = true;
10973               break;
10974             default: {
10975               if (!parseUnknownField(input, unknownFields,
10976                                      extensionRegistry, tag)) {
10977                 done = true;
10978               }
10979               break;
10980             }
10981             case 10: {
10982               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
10983               if (((bitField0_ & 0x00000001) == 0x00000001)) {
10984                 subBuilder = server_.toBuilder();
10985               }
10986               server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
10987               if (subBuilder != null) {
10988                 subBuilder.mergeFrom(server_);
10989                 server_ = subBuilder.buildPartial();
10990               }
10991               bitField0_ |= 0x00000001;
10992               break;
10993             }
10994             case 18: {
10995               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = null;
10996               if (((bitField0_ & 0x00000002) == 0x00000002)) {
10997                 subBuilder = serverLoad_.toBuilder();
10998               }
10999               serverLoad_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.PARSER, extensionRegistry);
11000               if (subBuilder != null) {
11001                 subBuilder.mergeFrom(serverLoad_);
11002                 serverLoad_ = subBuilder.buildPartial();
11003               }
11004               bitField0_ |= 0x00000002;
11005               break;
11006             }
11007           }
11008         }
11009       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11010         throw e.setUnfinishedMessage(this);
11011       } catch (java.io.IOException e) {
11012         throw new com.google.protobuf.InvalidProtocolBufferException(
11013             e.getMessage()).setUnfinishedMessage(this);
11014       } finally {
11015         this.unknownFields = unknownFields.build();
11016         makeExtensionsImmutable();
11017       }
11018     }
11019     public static final com.google.protobuf.Descriptors.Descriptor
11020         getDescriptor() {
11021       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_LiveServerInfo_descriptor;
11022     }
11023 
11024     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11025         internalGetFieldAccessorTable() {
11026       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_LiveServerInfo_fieldAccessorTable
11027           .ensureFieldAccessorsInitialized(
11028               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder.class);
11029     }
11030 
11031     public static com.google.protobuf.Parser<LiveServerInfo> PARSER =
11032         new com.google.protobuf.AbstractParser<LiveServerInfo>() {
11033       public LiveServerInfo parsePartialFrom(
11034           com.google.protobuf.CodedInputStream input,
11035           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11036           throws com.google.protobuf.InvalidProtocolBufferException {
11037         return new LiveServerInfo(input, extensionRegistry);
11038       }
11039     };
11040 
11041     @java.lang.Override
11042     public com.google.protobuf.Parser<LiveServerInfo> getParserForType() {
11043       return PARSER;
11044     }
11045 
11046     private int bitField0_;
11047     // required .hbase.pb.ServerName server = 1;
11048     public static final int SERVER_FIELD_NUMBER = 1;
11049     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
11050     /**
11051      * <code>required .hbase.pb.ServerName server = 1;</code>
11052      */
11053     public boolean hasServer() {
11054       return ((bitField0_ & 0x00000001) == 0x00000001);
11055     }
11056     /**
11057      * <code>required .hbase.pb.ServerName server = 1;</code>
11058      */
11059     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
11060       return server_;
11061     }
11062     /**
11063      * <code>required .hbase.pb.ServerName server = 1;</code>
11064      */
11065     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
11066       return server_;
11067     }
11068 
11069     // required .hbase.pb.ServerLoad server_load = 2;
11070     public static final int SERVER_LOAD_FIELD_NUMBER = 2;
11071     private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad serverLoad_;
11072     /**
11073      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11074      */
11075     public boolean hasServerLoad() {
11076       return ((bitField0_ & 0x00000002) == 0x00000002);
11077     }
11078     /**
11079      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11080      */
11081     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad() {
11082       return serverLoad_;
11083     }
11084     /**
11085      * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11086      */
11087     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder() {
11088       return serverLoad_;
11089     }
11090 
11091     private void initFields() {
11092       server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
11093       serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
11094     }
11095     private byte memoizedIsInitialized = -1;
11096     public final boolean isInitialized() {
11097       byte isInitialized = memoizedIsInitialized;
11098       if (isInitialized != -1) return isInitialized == 1;
11099 
11100       if (!hasServer()) {
11101         memoizedIsInitialized = 0;
11102         return false;
11103       }
11104       if (!hasServerLoad()) {
11105         memoizedIsInitialized = 0;
11106         return false;
11107       }
11108       if (!getServer().isInitialized()) {
11109         memoizedIsInitialized = 0;
11110         return false;
11111       }
11112       if (!getServerLoad().isInitialized()) {
11113         memoizedIsInitialized = 0;
11114         return false;
11115       }
11116       memoizedIsInitialized = 1;
11117       return true;
11118     }
11119 
11120     public void writeTo(com.google.protobuf.CodedOutputStream output)
11121                         throws java.io.IOException {
11122       getSerializedSize();
11123       if (((bitField0_ & 0x00000001) == 0x00000001)) {
11124         output.writeMessage(1, server_);
11125       }
11126       if (((bitField0_ & 0x00000002) == 0x00000002)) {
11127         output.writeMessage(2, serverLoad_);
11128       }
11129       getUnknownFields().writeTo(output);
11130     }
11131 
11132     private int memoizedSerializedSize = -1;
11133     public int getSerializedSize() {
11134       int size = memoizedSerializedSize;
11135       if (size != -1) return size;
11136 
11137       size = 0;
11138       if (((bitField0_ & 0x00000001) == 0x00000001)) {
11139         size += com.google.protobuf.CodedOutputStream
11140           .computeMessageSize(1, server_);
11141       }
11142       if (((bitField0_ & 0x00000002) == 0x00000002)) {
11143         size += com.google.protobuf.CodedOutputStream
11144           .computeMessageSize(2, serverLoad_);
11145       }
11146       size += getUnknownFields().getSerializedSize();
11147       memoizedSerializedSize = size;
11148       return size;
11149     }
11150 
11151     private static final long serialVersionUID = 0L;
11152     @java.lang.Override
11153     protected java.lang.Object writeReplace()
11154         throws java.io.ObjectStreamException {
11155       return super.writeReplace();
11156     }
11157 
11158     @java.lang.Override
11159     public boolean equals(final java.lang.Object obj) {
11160       if (obj == this) {
11161        return true;
11162       }
11163       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo)) {
11164         return super.equals(obj);
11165       }
11166       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) obj;
11167 
11168       boolean result = true;
11169       result = result && (hasServer() == other.hasServer());
11170       if (hasServer()) {
11171         result = result && getServer()
11172             .equals(other.getServer());
11173       }
11174       result = result && (hasServerLoad() == other.hasServerLoad());
11175       if (hasServerLoad()) {
11176         result = result && getServerLoad()
11177             .equals(other.getServerLoad());
11178       }
11179       result = result &&
11180           getUnknownFields().equals(other.getUnknownFields());
11181       return result;
11182     }
11183 
11184     private int memoizedHashCode = 0;
11185     @java.lang.Override
11186     public int hashCode() {
11187       if (memoizedHashCode != 0) {
11188         return memoizedHashCode;
11189       }
11190       int hash = 41;
11191       hash = (19 * hash) + getDescriptorForType().hashCode();
11192       if (hasServer()) {
11193         hash = (37 * hash) + SERVER_FIELD_NUMBER;
11194         hash = (53 * hash) + getServer().hashCode();
11195       }
11196       if (hasServerLoad()) {
11197         hash = (37 * hash) + SERVER_LOAD_FIELD_NUMBER;
11198         hash = (53 * hash) + getServerLoad().hashCode();
11199       }
11200       hash = (29 * hash) + getUnknownFields().hashCode();
11201       memoizedHashCode = hash;
11202       return hash;
11203     }
11204 
11205     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11206         com.google.protobuf.ByteString data)
11207         throws com.google.protobuf.InvalidProtocolBufferException {
11208       return PARSER.parseFrom(data);
11209     }
11210     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11211         com.google.protobuf.ByteString data,
11212         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11213         throws com.google.protobuf.InvalidProtocolBufferException {
11214       return PARSER.parseFrom(data, extensionRegistry);
11215     }
11216     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(byte[] data)
11217         throws com.google.protobuf.InvalidProtocolBufferException {
11218       return PARSER.parseFrom(data);
11219     }
11220     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11221         byte[] data,
11222         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11223         throws com.google.protobuf.InvalidProtocolBufferException {
11224       return PARSER.parseFrom(data, extensionRegistry);
11225     }
11226     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(java.io.InputStream input)
11227         throws java.io.IOException {
11228       return PARSER.parseFrom(input);
11229     }
11230     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11231         java.io.InputStream input,
11232         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11233         throws java.io.IOException {
11234       return PARSER.parseFrom(input, extensionRegistry);
11235     }
11236     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseDelimitedFrom(java.io.InputStream input)
11237         throws java.io.IOException {
11238       return PARSER.parseDelimitedFrom(input);
11239     }
11240     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseDelimitedFrom(
11241         java.io.InputStream input,
11242         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11243         throws java.io.IOException {
11244       return PARSER.parseDelimitedFrom(input, extensionRegistry);
11245     }
11246     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11247         com.google.protobuf.CodedInputStream input)
11248         throws java.io.IOException {
11249       return PARSER.parseFrom(input);
11250     }
11251     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
11252         com.google.protobuf.CodedInputStream input,
11253         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11254         throws java.io.IOException {
11255       return PARSER.parseFrom(input, extensionRegistry);
11256     }
11257 
11258     public static Builder newBuilder() { return Builder.create(); }
11259     public Builder newBuilderForType() { return newBuilder(); }
11260     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo prototype) {
11261       return newBuilder().mergeFrom(prototype);
11262     }
11263     public Builder toBuilder() { return newBuilder(this); }
11264 
11265     @java.lang.Override
11266     protected Builder newBuilderForType(
11267         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11268       Builder builder = new Builder(parent);
11269       return builder;
11270     }
11271     /**
11272      * Protobuf type {@code hbase.pb.LiveServerInfo}
11273      */
11274     public static final class Builder extends
11275         com.google.protobuf.GeneratedMessage.Builder<Builder>
11276        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder {
11277       public static final com.google.protobuf.Descriptors.Descriptor
11278           getDescriptor() {
11279         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_LiveServerInfo_descriptor;
11280       }
11281 
11282       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11283           internalGetFieldAccessorTable() {
11284         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_LiveServerInfo_fieldAccessorTable
11285             .ensureFieldAccessorsInitialized(
11286                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder.class);
11287       }
11288 
11289       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.newBuilder()
11290       private Builder() {
11291         maybeForceBuilderInitialization();
11292       }
11293 
11294       private Builder(
11295           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11296         super(parent);
11297         maybeForceBuilderInitialization();
11298       }
11299       private void maybeForceBuilderInitialization() {
11300         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11301           getServerFieldBuilder();
11302           getServerLoadFieldBuilder();
11303         }
11304       }
11305       private static Builder create() {
11306         return new Builder();
11307       }
11308 
11309       public Builder clear() {
11310         super.clear();
11311         if (serverBuilder_ == null) {
11312           server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
11313         } else {
11314           serverBuilder_.clear();
11315         }
11316         bitField0_ = (bitField0_ & ~0x00000001);
11317         if (serverLoadBuilder_ == null) {
11318           serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
11319         } else {
11320           serverLoadBuilder_.clear();
11321         }
11322         bitField0_ = (bitField0_ & ~0x00000002);
11323         return this;
11324       }
11325 
11326       public Builder clone() {
11327         return create().mergeFrom(buildPartial());
11328       }
11329 
11330       public com.google.protobuf.Descriptors.Descriptor
11331           getDescriptorForType() {
11332         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_LiveServerInfo_descriptor;
11333       }
11334 
11335       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getDefaultInstanceForType() {
11336         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance();
11337       }
11338 
11339       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo build() {
11340         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo result = buildPartial();
11341         if (!result.isInitialized()) {
11342           throw newUninitializedMessageException(result);
11343         }
11344         return result;
11345       }
11346 
11347       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo buildPartial() {
11348         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo(this);
11349         int from_bitField0_ = bitField0_;
11350         int to_bitField0_ = 0;
11351         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11352           to_bitField0_ |= 0x00000001;
11353         }
11354         if (serverBuilder_ == null) {
11355           result.server_ = server_;
11356         } else {
11357           result.server_ = serverBuilder_.build();
11358         }
11359         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11360           to_bitField0_ |= 0x00000002;
11361         }
11362         if (serverLoadBuilder_ == null) {
11363           result.serverLoad_ = serverLoad_;
11364         } else {
11365           result.serverLoad_ = serverLoadBuilder_.build();
11366         }
11367         result.bitField0_ = to_bitField0_;
11368         onBuilt();
11369         return result;
11370       }
11371 
11372       public Builder mergeFrom(com.google.protobuf.Message other) {
11373         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) {
11374           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo)other);
11375         } else {
11376           super.mergeFrom(other);
11377           return this;
11378         }
11379       }
11380 
11381       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo other) {
11382         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance()) return this;
11383         if (other.hasServer()) {
11384           mergeServer(other.getServer());
11385         }
11386         if (other.hasServerLoad()) {
11387           mergeServerLoad(other.getServerLoad());
11388         }
11389         this.mergeUnknownFields(other.getUnknownFields());
11390         return this;
11391       }
11392 
11393       public final boolean isInitialized() {
11394         if (!hasServer()) {
11395           
11396           return false;
11397         }
11398         if (!hasServerLoad()) {
11399           
11400           return false;
11401         }
11402         if (!getServer().isInitialized()) {
11403           
11404           return false;
11405         }
11406         if (!getServerLoad().isInitialized()) {
11407           
11408           return false;
11409         }
11410         return true;
11411       }
11412 
11413       public Builder mergeFrom(
11414           com.google.protobuf.CodedInputStream input,
11415           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11416           throws java.io.IOException {
11417         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parsedMessage = null;
11418         try {
11419           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11420         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11421           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) e.getUnfinishedMessage();
11422           throw e;
11423         } finally {
11424           if (parsedMessage != null) {
11425             mergeFrom(parsedMessage);
11426           }
11427         }
11428         return this;
11429       }
11430       private int bitField0_;
11431 
11432       // required .hbase.pb.ServerName server = 1;
11433       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
11434       private com.google.protobuf.SingleFieldBuilder<
11435           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
11436       /**
11437        * <code>required .hbase.pb.ServerName server = 1;</code>
11438        */
11439       public boolean hasServer() {
11440         return ((bitField0_ & 0x00000001) == 0x00000001);
11441       }
11442       /**
11443        * <code>required .hbase.pb.ServerName server = 1;</code>
11444        */
11445       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
11446         if (serverBuilder_ == null) {
11447           return server_;
11448         } else {
11449           return serverBuilder_.getMessage();
11450         }
11451       }
11452       /**
11453        * <code>required .hbase.pb.ServerName server = 1;</code>
11454        */
11455       public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
11456         if (serverBuilder_ == null) {
11457           if (value == null) {
11458             throw new NullPointerException();
11459           }
11460           server_ = value;
11461           onChanged();
11462         } else {
11463           serverBuilder_.setMessage(value);
11464         }
11465         bitField0_ |= 0x00000001;
11466         return this;
11467       }
11468       /**
11469        * <code>required .hbase.pb.ServerName server = 1;</code>
11470        */
11471       public Builder setServer(
11472           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
11473         if (serverBuilder_ == null) {
11474           server_ = builderForValue.build();
11475           onChanged();
11476         } else {
11477           serverBuilder_.setMessage(builderForValue.build());
11478         }
11479         bitField0_ |= 0x00000001;
11480         return this;
11481       }
11482       /**
11483        * <code>required .hbase.pb.ServerName server = 1;</code>
11484        */
11485       public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
11486         if (serverBuilder_ == null) {
11487           if (((bitField0_ & 0x00000001) == 0x00000001) &&
11488               server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
11489             server_ =
11490               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
11491           } else {
11492             server_ = value;
11493           }
11494           onChanged();
11495         } else {
11496           serverBuilder_.mergeFrom(value);
11497         }
11498         bitField0_ |= 0x00000001;
11499         return this;
11500       }
11501       /**
11502        * <code>required .hbase.pb.ServerName server = 1;</code>
11503        */
11504       public Builder clearServer() {
11505         if (serverBuilder_ == null) {
11506           server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
11507           onChanged();
11508         } else {
11509           serverBuilder_.clear();
11510         }
11511         bitField0_ = (bitField0_ & ~0x00000001);
11512         return this;
11513       }
11514       /**
11515        * <code>required .hbase.pb.ServerName server = 1;</code>
11516        */
11517       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
11518         bitField0_ |= 0x00000001;
11519         onChanged();
11520         return getServerFieldBuilder().getBuilder();
11521       }
11522       /**
11523        * <code>required .hbase.pb.ServerName server = 1;</code>
11524        */
11525       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
11526         if (serverBuilder_ != null) {
11527           return serverBuilder_.getMessageOrBuilder();
11528         } else {
11529           return server_;
11530         }
11531       }
11532       /**
11533        * <code>required .hbase.pb.ServerName server = 1;</code>
11534        */
11535       private com.google.protobuf.SingleFieldBuilder<
11536           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
11537           getServerFieldBuilder() {
11538         if (serverBuilder_ == null) {
11539           serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11540               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
11541                   server_,
11542                   getParentForChildren(),
11543                   isClean());
11544           server_ = null;
11545         }
11546         return serverBuilder_;
11547       }
11548 
11549       // required .hbase.pb.ServerLoad server_load = 2;
11550       private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
11551       private com.google.protobuf.SingleFieldBuilder<
11552           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> serverLoadBuilder_;
11553       /**
11554        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11555        */
11556       public boolean hasServerLoad() {
11557         return ((bitField0_ & 0x00000002) == 0x00000002);
11558       }
11559       /**
11560        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11561        */
11562       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad() {
11563         if (serverLoadBuilder_ == null) {
11564           return serverLoad_;
11565         } else {
11566           return serverLoadBuilder_.getMessage();
11567         }
11568       }
11569       /**
11570        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11571        */
11572       public Builder setServerLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
11573         if (serverLoadBuilder_ == null) {
11574           if (value == null) {
11575             throw new NullPointerException();
11576           }
11577           serverLoad_ = value;
11578           onChanged();
11579         } else {
11580           serverLoadBuilder_.setMessage(value);
11581         }
11582         bitField0_ |= 0x00000002;
11583         return this;
11584       }
11585       /**
11586        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11587        */
11588       public Builder setServerLoad(
11589           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) {
11590         if (serverLoadBuilder_ == null) {
11591           serverLoad_ = builderForValue.build();
11592           onChanged();
11593         } else {
11594           serverLoadBuilder_.setMessage(builderForValue.build());
11595         }
11596         bitField0_ |= 0x00000002;
11597         return this;
11598       }
11599       /**
11600        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11601        */
11602       public Builder mergeServerLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
11603         if (serverLoadBuilder_ == null) {
11604           if (((bitField0_ & 0x00000002) == 0x00000002) &&
11605               serverLoad_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
11606             serverLoad_ =
11607               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(serverLoad_).mergeFrom(value).buildPartial();
11608           } else {
11609             serverLoad_ = value;
11610           }
11611           onChanged();
11612         } else {
11613           serverLoadBuilder_.mergeFrom(value);
11614         }
11615         bitField0_ |= 0x00000002;
11616         return this;
11617       }
11618       /**
11619        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11620        */
11621       public Builder clearServerLoad() {
11622         if (serverLoadBuilder_ == null) {
11623           serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
11624           onChanged();
11625         } else {
11626           serverLoadBuilder_.clear();
11627         }
11628         bitField0_ = (bitField0_ & ~0x00000002);
11629         return this;
11630       }
11631       /**
11632        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11633        */
11634       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getServerLoadBuilder() {
11635         bitField0_ |= 0x00000002;
11636         onChanged();
11637         return getServerLoadFieldBuilder().getBuilder();
11638       }
11639       /**
11640        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11641        */
11642       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder() {
11643         if (serverLoadBuilder_ != null) {
11644           return serverLoadBuilder_.getMessageOrBuilder();
11645         } else {
11646           return serverLoad_;
11647         }
11648       }
11649       /**
11650        * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
11651        */
11652       private com.google.protobuf.SingleFieldBuilder<
11653           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> 
11654           getServerLoadFieldBuilder() {
11655         if (serverLoadBuilder_ == null) {
11656           serverLoadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11657               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>(
11658                   serverLoad_,
11659                   getParentForChildren(),
11660                   isClean());
11661           serverLoad_ = null;
11662         }
11663         return serverLoadBuilder_;
11664       }
11665 
11666       // @@protoc_insertion_point(builder_scope:hbase.pb.LiveServerInfo)
11667     }
11668 
11669     static {
11670       defaultInstance = new LiveServerInfo(true);
11671       defaultInstance.initFields();
11672     }
11673 
11674     // @@protoc_insertion_point(class_scope:hbase.pb.LiveServerInfo)
11675   }
11676 
11677   public interface ClusterStatusOrBuilder
11678       extends com.google.protobuf.MessageOrBuilder {
11679 
11680     // optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;
11681     /**
11682      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
11683      */
11684     boolean hasHbaseVersion();
11685     /**
11686      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
11687      */
11688     org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion();
11689     /**
11690      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
11691      */
11692     org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder();
11693 
11694     // repeated .hbase.pb.LiveServerInfo live_servers = 2;
11695     /**
11696      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
11697      */
11698     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> 
11699         getLiveServersList();
11700     /**
11701      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
11702      */
11703     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index);
11704     /**
11705      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
11706      */
11707     int getLiveServersCount();
11708     /**
11709      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
11710      */
11711     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> 
11712         getLiveServersOrBuilderList();
11713     /**
11714      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
11715      */
11716     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
11717         int index);
11718 
11719     // repeated .hbase.pb.ServerName dead_servers = 3;
11720     /**
11721      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
11722      */
11723     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> 
11724         getDeadServersList();
11725     /**
11726      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
11727      */
11728     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index);
11729     /**
11730      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
11731      */
11732     int getDeadServersCount();
11733     /**
11734      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
11735      */
11736     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
11737         getDeadServersOrBuilderList();
11738     /**
11739      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
11740      */
11741     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
11742         int index);
11743 
11744     // repeated .hbase.pb.RegionInTransition regions_in_transition = 4;
11745     /**
11746      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
11747      */
11748     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> 
11749         getRegionsInTransitionList();
11750     /**
11751      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
11752      */
11753     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index);
11754     /**
11755      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
11756      */
11757     int getRegionsInTransitionCount();
11758     /**
11759      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
11760      */
11761     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> 
11762         getRegionsInTransitionOrBuilderList();
11763     /**
11764      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
11765      */
11766     org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
11767         int index);
11768 
11769     // optional .hbase.pb.ClusterId cluster_id = 5;
11770     /**
11771      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
11772      */
11773     boolean hasClusterId();
11774     /**
11775      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
11776      */
11777     org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId();
11778     /**
11779      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
11780      */
11781     org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder();
11782 
11783     // repeated .hbase.pb.Coprocessor master_coprocessors = 6;
11784     /**
11785      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
11786      */
11787     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> 
11788         getMasterCoprocessorsList();
11789     /**
11790      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
11791      */
11792     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index);
11793     /**
11794      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
11795      */
11796     int getMasterCoprocessorsCount();
11797     /**
11798      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
11799      */
11800     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
11801         getMasterCoprocessorsOrBuilderList();
11802     /**
11803      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
11804      */
11805     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
11806         int index);
11807 
11808     // optional .hbase.pb.ServerName master = 7;
11809     /**
11810      * <code>optional .hbase.pb.ServerName master = 7;</code>
11811      */
11812     boolean hasMaster();
11813     /**
11814      * <code>optional .hbase.pb.ServerName master = 7;</code>
11815      */
11816     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
11817     /**
11818      * <code>optional .hbase.pb.ServerName master = 7;</code>
11819      */
11820     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
11821 
11822     // repeated .hbase.pb.ServerName backup_masters = 8;
11823     /**
11824      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
11825      */
11826     java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> 
11827         getBackupMastersList();
11828     /**
11829      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
11830      */
11831     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index);
11832     /**
11833      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
11834      */
11835     int getBackupMastersCount();
11836     /**
11837      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
11838      */
11839     java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
11840         getBackupMastersOrBuilderList();
11841     /**
11842      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
11843      */
11844     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
11845         int index);
11846 
11847     // optional bool balancer_on = 9;
11848     /**
11849      * <code>optional bool balancer_on = 9;</code>
11850      */
11851     boolean hasBalancerOn();
11852     /**
11853      * <code>optional bool balancer_on = 9;</code>
11854      */
11855     boolean getBalancerOn();
11856   }
11857   /**
11858    * Protobuf type {@code hbase.pb.ClusterStatus}
11859    */
11860   public static final class ClusterStatus extends
11861       com.google.protobuf.GeneratedMessage
11862       implements ClusterStatusOrBuilder {
11863     // Use ClusterStatus.newBuilder() to construct.
11864     private ClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11865       super(builder);
11866       this.unknownFields = builder.getUnknownFields();
11867     }
11868     private ClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11869 
11870     private static final ClusterStatus defaultInstance;
11871     public static ClusterStatus getDefaultInstance() {
11872       return defaultInstance;
11873     }
11874 
11875     public ClusterStatus getDefaultInstanceForType() {
11876       return defaultInstance;
11877     }
11878 
11879     private final com.google.protobuf.UnknownFieldSet unknownFields;
11880     @java.lang.Override
11881     public final com.google.protobuf.UnknownFieldSet
11882         getUnknownFields() {
11883       return this.unknownFields;
11884     }
11885     private ClusterStatus(
11886         com.google.protobuf.CodedInputStream input,
11887         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11888         throws com.google.protobuf.InvalidProtocolBufferException {
11889       initFields();
11890       int mutable_bitField0_ = 0;
11891       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11892           com.google.protobuf.UnknownFieldSet.newBuilder();
11893       try {
11894         boolean done = false;
11895         while (!done) {
11896           int tag = input.readTag();
11897           switch (tag) {
11898             case 0:
11899               done = true;
11900               break;
11901             default: {
11902               if (!parseUnknownField(input, unknownFields,
11903                                      extensionRegistry, tag)) {
11904                 done = true;
11905               }
11906               break;
11907             }
11908             case 10: {
11909               org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder subBuilder = null;
11910               if (((bitField0_ & 0x00000001) == 0x00000001)) {
11911                 subBuilder = hbaseVersion_.toBuilder();
11912               }
11913               hbaseVersion_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.PARSER, extensionRegistry);
11914               if (subBuilder != null) {
11915                 subBuilder.mergeFrom(hbaseVersion_);
11916                 hbaseVersion_ = subBuilder.buildPartial();
11917               }
11918               bitField0_ |= 0x00000001;
11919               break;
11920             }
11921             case 18: {
11922               if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
11923                 liveServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo>();
11924                 mutable_bitField0_ |= 0x00000002;
11925               }
11926               liveServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.PARSER, extensionRegistry));
11927               break;
11928             }
11929             case 26: {
11930               if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
11931                 deadServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>();
11932                 mutable_bitField0_ |= 0x00000004;
11933               }
11934               deadServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
11935               break;
11936             }
11937             case 34: {
11938               if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
11939                 regionsInTransition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition>();
11940                 mutable_bitField0_ |= 0x00000008;
11941               }
11942               regionsInTransition_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.PARSER, extensionRegistry));
11943               break;
11944             }
11945             case 42: {
11946               org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder subBuilder = null;
11947               if (((bitField0_ & 0x00000002) == 0x00000002)) {
11948                 subBuilder = clusterId_.toBuilder();
11949               }
11950               clusterId_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.PARSER, extensionRegistry);
11951               if (subBuilder != null) {
11952                 subBuilder.mergeFrom(clusterId_);
11953                 clusterId_ = subBuilder.buildPartial();
11954               }
11955               bitField0_ |= 0x00000002;
11956               break;
11957             }
11958             case 50: {
11959               if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
11960                 masterCoprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>();
11961                 mutable_bitField0_ |= 0x00000020;
11962               }
11963               masterCoprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry));
11964               break;
11965             }
11966             case 58: {
11967               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
11968               if (((bitField0_ & 0x00000004) == 0x00000004)) {
11969                 subBuilder = master_.toBuilder();
11970               }
11971               master_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
11972               if (subBuilder != null) {
11973                 subBuilder.mergeFrom(master_);
11974                 master_ = subBuilder.buildPartial();
11975               }
11976               bitField0_ |= 0x00000004;
11977               break;
11978             }
11979             case 66: {
11980               if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
11981                 backupMasters_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>();
11982                 mutable_bitField0_ |= 0x00000080;
11983               }
11984               backupMasters_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
11985               break;
11986             }
11987             case 72: {
11988               bitField0_ |= 0x00000008;
11989               balancerOn_ = input.readBool();
11990               break;
11991             }
11992           }
11993         }
11994       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11995         throw e.setUnfinishedMessage(this);
11996       } catch (java.io.IOException e) {
11997         throw new com.google.protobuf.InvalidProtocolBufferException(
11998             e.getMessage()).setUnfinishedMessage(this);
11999       } finally {
12000         if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
12001           liveServers_ = java.util.Collections.unmodifiableList(liveServers_);
12002         }
12003         if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
12004           deadServers_ = java.util.Collections.unmodifiableList(deadServers_);
12005         }
12006         if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
12007           regionsInTransition_ = java.util.Collections.unmodifiableList(regionsInTransition_);
12008         }
12009         if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
12010           masterCoprocessors_ = java.util.Collections.unmodifiableList(masterCoprocessors_);
12011         }
12012         if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
12013           backupMasters_ = java.util.Collections.unmodifiableList(backupMasters_);
12014         }
12015         this.unknownFields = unknownFields.build();
12016         makeExtensionsImmutable();
12017       }
12018     }
12019     public static final com.google.protobuf.Descriptors.Descriptor
12020         getDescriptor() {
12021       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ClusterStatus_descriptor;
12022     }
12023 
12024     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12025         internalGetFieldAccessorTable() {
12026       return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ClusterStatus_fieldAccessorTable
12027           .ensureFieldAccessorsInitialized(
12028               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder.class);
12029     }
12030 
12031     public static com.google.protobuf.Parser<ClusterStatus> PARSER =
12032         new com.google.protobuf.AbstractParser<ClusterStatus>() {
12033       public ClusterStatus parsePartialFrom(
12034           com.google.protobuf.CodedInputStream input,
12035           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12036           throws com.google.protobuf.InvalidProtocolBufferException {
12037         return new ClusterStatus(input, extensionRegistry);
12038       }
12039     };
12040 
12041     @java.lang.Override
12042     public com.google.protobuf.Parser<ClusterStatus> getParserForType() {
12043       return PARSER;
12044     }
12045 
12046     private int bitField0_;
12047     // optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;
12048     public static final int HBASE_VERSION_FIELD_NUMBER = 1;
12049     private org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent hbaseVersion_;
12050     /**
12051      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
12052      */
12053     public boolean hasHbaseVersion() {
12054       return ((bitField0_ & 0x00000001) == 0x00000001);
12055     }
12056     /**
12057      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
12058      */
12059     public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion() {
12060       return hbaseVersion_;
12061     }
12062     /**
12063      * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
12064      */
12065     public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder() {
12066       return hbaseVersion_;
12067     }
12068 
12069     // repeated .hbase.pb.LiveServerInfo live_servers = 2;
12070     public static final int LIVE_SERVERS_FIELD_NUMBER = 2;
12071     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> liveServers_;
12072     /**
12073      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
12074      */
12075     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> getLiveServersList() {
12076       return liveServers_;
12077     }
12078     /**
12079      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
12080      */
12081     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> 
12082         getLiveServersOrBuilderList() {
12083       return liveServers_;
12084     }
12085     /**
12086      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
12087      */
12088     public int getLiveServersCount() {
12089       return liveServers_.size();
12090     }
12091     /**
12092      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
12093      */
12094     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index) {
12095       return liveServers_.get(index);
12096     }
12097     /**
12098      * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
12099      */
12100     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
12101         int index) {
12102       return liveServers_.get(index);
12103     }
12104 
12105     // repeated .hbase.pb.ServerName dead_servers = 3;
12106     public static final int DEAD_SERVERS_FIELD_NUMBER = 3;
12107     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> deadServers_;
12108     /**
12109      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
12110      */
12111     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getDeadServersList() {
12112       return deadServers_;
12113     }
12114     /**
12115      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
12116      */
12117     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
12118         getDeadServersOrBuilderList() {
12119       return deadServers_;
12120     }
12121     /**
12122      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
12123      */
12124     public int getDeadServersCount() {
12125       return deadServers_.size();
12126     }
12127     /**
12128      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
12129      */
12130     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index) {
12131       return deadServers_.get(index);
12132     }
12133     /**
12134      * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
12135      */
12136     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
12137         int index) {
12138       return deadServers_.get(index);
12139     }
12140 
12141     // repeated .hbase.pb.RegionInTransition regions_in_transition = 4;
12142     public static final int REGIONS_IN_TRANSITION_FIELD_NUMBER = 4;
12143     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> regionsInTransition_;
12144     /**
12145      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
12146      */
12147     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> getRegionsInTransitionList() {
12148       return regionsInTransition_;
12149     }
12150     /**
12151      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
12152      */
12153     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> 
12154         getRegionsInTransitionOrBuilderList() {
12155       return regionsInTransition_;
12156     }
12157     /**
12158      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
12159      */
12160     public int getRegionsInTransitionCount() {
12161       return regionsInTransition_.size();
12162     }
12163     /**
12164      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
12165      */
12166     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index) {
12167       return regionsInTransition_.get(index);
12168     }
12169     /**
12170      * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
12171      */
12172     public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
12173         int index) {
12174       return regionsInTransition_.get(index);
12175     }
12176 
12177     // optional .hbase.pb.ClusterId cluster_id = 5;
12178     public static final int CLUSTER_ID_FIELD_NUMBER = 5;
12179     private org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId clusterId_;
12180     /**
12181      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
12182      */
12183     public boolean hasClusterId() {
12184       return ((bitField0_ & 0x00000002) == 0x00000002);
12185     }
12186     /**
12187      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
12188      */
12189     public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId() {
12190       return clusterId_;
12191     }
12192     /**
12193      * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
12194      */
12195     public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder() {
12196       return clusterId_;
12197     }
12198 
12199     // repeated .hbase.pb.Coprocessor master_coprocessors = 6;
12200     public static final int MASTER_COPROCESSORS_FIELD_NUMBER = 6;
12201     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> masterCoprocessors_;
12202     /**
12203      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
12204      */
12205     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getMasterCoprocessorsList() {
12206       return masterCoprocessors_;
12207     }
12208     /**
12209      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
12210      */
12211     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
12212         getMasterCoprocessorsOrBuilderList() {
12213       return masterCoprocessors_;
12214     }
12215     /**
12216      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
12217      */
12218     public int getMasterCoprocessorsCount() {
12219       return masterCoprocessors_.size();
12220     }
12221     /**
12222      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
12223      */
12224     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index) {
12225       return masterCoprocessors_.get(index);
12226     }
12227     /**
12228      * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
12229      */
12230     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
12231         int index) {
12232       return masterCoprocessors_.get(index);
12233     }
12234 
12235     // optional .hbase.pb.ServerName master = 7;
12236     public static final int MASTER_FIELD_NUMBER = 7;
12237     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_;
12238     /**
12239      * <code>optional .hbase.pb.ServerName master = 7;</code>
12240      */
12241     public boolean hasMaster() {
12242       return ((bitField0_ & 0x00000004) == 0x00000004);
12243     }
12244     /**
12245      * <code>optional .hbase.pb.ServerName master = 7;</code>
12246      */
12247     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
12248       return master_;
12249     }
12250     /**
12251      * <code>optional .hbase.pb.ServerName master = 7;</code>
12252      */
12253     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
12254       return master_;
12255     }
12256 
12257     // repeated .hbase.pb.ServerName backup_masters = 8;
12258     public static final int BACKUP_MASTERS_FIELD_NUMBER = 8;
12259     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> backupMasters_;
12260     /**
12261      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
12262      */
12263     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getBackupMastersList() {
12264       return backupMasters_;
12265     }
12266     /**
12267      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
12268      */
12269     public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
12270         getBackupMastersOrBuilderList() {
12271       return backupMasters_;
12272     }
12273     /**
12274      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
12275      */
12276     public int getBackupMastersCount() {
12277       return backupMasters_.size();
12278     }
12279     /**
12280      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
12281      */
12282     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index) {
12283       return backupMasters_.get(index);
12284     }
12285     /**
12286      * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
12287      */
12288     public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
12289         int index) {
12290       return backupMasters_.get(index);
12291     }
12292 
12293     // optional bool balancer_on = 9;
12294     public static final int BALANCER_ON_FIELD_NUMBER = 9;
12295     private boolean balancerOn_;
12296     /**
12297      * <code>optional bool balancer_on = 9;</code>
12298      */
12299     public boolean hasBalancerOn() {
12300       return ((bitField0_ & 0x00000008) == 0x00000008);
12301     }
12302     /**
12303      * <code>optional bool balancer_on = 9;</code>
12304      */
12305     public boolean getBalancerOn() {
12306       return balancerOn_;
12307     }
12308 
12309     private void initFields() {
12310       hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
12311       liveServers_ = java.util.Collections.emptyList();
12312       deadServers_ = java.util.Collections.emptyList();
12313       regionsInTransition_ = java.util.Collections.emptyList();
12314       clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
12315       masterCoprocessors_ = java.util.Collections.emptyList();
12316       master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
12317       backupMasters_ = java.util.Collections.emptyList();
12318       balancerOn_ = false;
12319     }
12320     private byte memoizedIsInitialized = -1;
12321     public final boolean isInitialized() {
12322       byte isInitialized = memoizedIsInitialized;
12323       if (isInitialized != -1) return isInitialized == 1;
12324 
12325       if (hasHbaseVersion()) {
12326         if (!getHbaseVersion().isInitialized()) {
12327           memoizedIsInitialized = 0;
12328           return false;
12329         }
12330       }
12331       for (int i = 0; i < getLiveServersCount(); i++) {
12332         if (!getLiveServers(i).isInitialized()) {
12333           memoizedIsInitialized = 0;
12334           return false;
12335         }
12336       }
12337       for (int i = 0; i < getDeadServersCount(); i++) {
12338         if (!getDeadServers(i).isInitialized()) {
12339           memoizedIsInitialized = 0;
12340           return false;
12341         }
12342       }
12343       for (int i = 0; i < getRegionsInTransitionCount(); i++) {
12344         if (!getRegionsInTransition(i).isInitialized()) {
12345           memoizedIsInitialized = 0;
12346           return false;
12347         }
12348       }
12349       if (hasClusterId()) {
12350         if (!getClusterId().isInitialized()) {
12351           memoizedIsInitialized = 0;
12352           return false;
12353         }
12354       }
12355       for (int i = 0; i < getMasterCoprocessorsCount(); i++) {
12356         if (!getMasterCoprocessors(i).isInitialized()) {
12357           memoizedIsInitialized = 0;
12358           return false;
12359         }
12360       }
12361       if (hasMaster()) {
12362         if (!getMaster().isInitialized()) {
12363           memoizedIsInitialized = 0;
12364           return false;
12365         }
12366       }
12367       for (int i = 0; i < getBackupMastersCount(); i++) {
12368         if (!getBackupMasters(i).isInitialized()) {
12369           memoizedIsInitialized = 0;
12370           return false;
12371         }
12372       }
12373       memoizedIsInitialized = 1;
12374       return true;
12375     }
12376 
12377     public void writeTo(com.google.protobuf.CodedOutputStream output)
12378                         throws java.io.IOException {
12379       getSerializedSize();
12380       if (((bitField0_ & 0x00000001) == 0x00000001)) {
12381         output.writeMessage(1, hbaseVersion_);
12382       }
12383       for (int i = 0; i < liveServers_.size(); i++) {
12384         output.writeMessage(2, liveServers_.get(i));
12385       }
12386       for (int i = 0; i < deadServers_.size(); i++) {
12387         output.writeMessage(3, deadServers_.get(i));
12388       }
12389       for (int i = 0; i < regionsInTransition_.size(); i++) {
12390         output.writeMessage(4, regionsInTransition_.get(i));
12391       }
12392       if (((bitField0_ & 0x00000002) == 0x00000002)) {
12393         output.writeMessage(5, clusterId_);
12394       }
12395       for (int i = 0; i < masterCoprocessors_.size(); i++) {
12396         output.writeMessage(6, masterCoprocessors_.get(i));
12397       }
12398       if (((bitField0_ & 0x00000004) == 0x00000004)) {
12399         output.writeMessage(7, master_);
12400       }
12401       for (int i = 0; i < backupMasters_.size(); i++) {
12402         output.writeMessage(8, backupMasters_.get(i));
12403       }
12404       if (((bitField0_ & 0x00000008) == 0x00000008)) {
12405         output.writeBool(9, balancerOn_);
12406       }
12407       getUnknownFields().writeTo(output);
12408     }
12409 
12410     private int memoizedSerializedSize = -1;
12411     public int getSerializedSize() {
12412       int size = memoizedSerializedSize;
12413       if (size != -1) return size;
12414 
12415       size = 0;
12416       if (((bitField0_ & 0x00000001) == 0x00000001)) {
12417         size += com.google.protobuf.CodedOutputStream
12418           .computeMessageSize(1, hbaseVersion_);
12419       }
12420       for (int i = 0; i < liveServers_.size(); i++) {
12421         size += com.google.protobuf.CodedOutputStream
12422           .computeMessageSize(2, liveServers_.get(i));
12423       }
12424       for (int i = 0; i < deadServers_.size(); i++) {
12425         size += com.google.protobuf.CodedOutputStream
12426           .computeMessageSize(3, deadServers_.get(i));
12427       }
12428       for (int i = 0; i < regionsInTransition_.size(); i++) {
12429         size += com.google.protobuf.CodedOutputStream
12430           .computeMessageSize(4, regionsInTransition_.get(i));
12431       }
12432       if (((bitField0_ & 0x00000002) == 0x00000002)) {
12433         size += com.google.protobuf.CodedOutputStream
12434           .computeMessageSize(5, clusterId_);
12435       }
12436       for (int i = 0; i < masterCoprocessors_.size(); i++) {
12437         size += com.google.protobuf.CodedOutputStream
12438           .computeMessageSize(6, masterCoprocessors_.get(i));
12439       }
12440       if (((bitField0_ & 0x00000004) == 0x00000004)) {
12441         size += com.google.protobuf.CodedOutputStream
12442           .computeMessageSize(7, master_);
12443       }
12444       for (int i = 0; i < backupMasters_.size(); i++) {
12445         size += com.google.protobuf.CodedOutputStream
12446           .computeMessageSize(8, backupMasters_.get(i));
12447       }
12448       if (((bitField0_ & 0x00000008) == 0x00000008)) {
12449         size += com.google.protobuf.CodedOutputStream
12450           .computeBoolSize(9, balancerOn_);
12451       }
12452       size += getUnknownFields().getSerializedSize();
12453       memoizedSerializedSize = size;
12454       return size;
12455     }
12456 
12457     private static final long serialVersionUID = 0L;
12458     @java.lang.Override
12459     protected java.lang.Object writeReplace()
12460         throws java.io.ObjectStreamException {
12461       return super.writeReplace();
12462     }
12463 
12464     @java.lang.Override
12465     public boolean equals(final java.lang.Object obj) {
12466       if (obj == this) {
12467        return true;
12468       }
12469       if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus)) {
12470         return super.equals(obj);
12471       }
12472       org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) obj;
12473 
12474       boolean result = true;
12475       result = result && (hasHbaseVersion() == other.hasHbaseVersion());
12476       if (hasHbaseVersion()) {
12477         result = result && getHbaseVersion()
12478             .equals(other.getHbaseVersion());
12479       }
12480       result = result && getLiveServersList()
12481           .equals(other.getLiveServersList());
12482       result = result && getDeadServersList()
12483           .equals(other.getDeadServersList());
12484       result = result && getRegionsInTransitionList()
12485           .equals(other.getRegionsInTransitionList());
12486       result = result && (hasClusterId() == other.hasClusterId());
12487       if (hasClusterId()) {
12488         result = result && getClusterId()
12489             .equals(other.getClusterId());
12490       }
12491       result = result && getMasterCoprocessorsList()
12492           .equals(other.getMasterCoprocessorsList());
12493       result = result && (hasMaster() == other.hasMaster());
12494       if (hasMaster()) {
12495         result = result && getMaster()
12496             .equals(other.getMaster());
12497       }
12498       result = result && getBackupMastersList()
12499           .equals(other.getBackupMastersList());
12500       result = result && (hasBalancerOn() == other.hasBalancerOn());
12501       if (hasBalancerOn()) {
12502         result = result && (getBalancerOn()
12503             == other.getBalancerOn());
12504       }
12505       result = result &&
12506           getUnknownFields().equals(other.getUnknownFields());
12507       return result;
12508     }
12509 
12510     private int memoizedHashCode = 0;
12511     @java.lang.Override
12512     public int hashCode() {
12513       if (memoizedHashCode != 0) {
12514         return memoizedHashCode;
12515       }
12516       int hash = 41;
12517       hash = (19 * hash) + getDescriptorForType().hashCode();
12518       if (hasHbaseVersion()) {
12519         hash = (37 * hash) + HBASE_VERSION_FIELD_NUMBER;
12520         hash = (53 * hash) + getHbaseVersion().hashCode();
12521       }
12522       if (getLiveServersCount() > 0) {
12523         hash = (37 * hash) + LIVE_SERVERS_FIELD_NUMBER;
12524         hash = (53 * hash) + getLiveServersList().hashCode();
12525       }
12526       if (getDeadServersCount() > 0) {
12527         hash = (37 * hash) + DEAD_SERVERS_FIELD_NUMBER;
12528         hash = (53 * hash) + getDeadServersList().hashCode();
12529       }
12530       if (getRegionsInTransitionCount() > 0) {
12531         hash = (37 * hash) + REGIONS_IN_TRANSITION_FIELD_NUMBER;
12532         hash = (53 * hash) + getRegionsInTransitionList().hashCode();
12533       }
12534       if (hasClusterId()) {
12535         hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER;
12536         hash = (53 * hash) + getClusterId().hashCode();
12537       }
12538       if (getMasterCoprocessorsCount() > 0) {
12539         hash = (37 * hash) + MASTER_COPROCESSORS_FIELD_NUMBER;
12540         hash = (53 * hash) + getMasterCoprocessorsList().hashCode();
12541       }
12542       if (hasMaster()) {
12543         hash = (37 * hash) + MASTER_FIELD_NUMBER;
12544         hash = (53 * hash) + getMaster().hashCode();
12545       }
12546       if (getBackupMastersCount() > 0) {
12547         hash = (37 * hash) + BACKUP_MASTERS_FIELD_NUMBER;
12548         hash = (53 * hash) + getBackupMastersList().hashCode();
12549       }
12550       if (hasBalancerOn()) {
12551         hash = (37 * hash) + BALANCER_ON_FIELD_NUMBER;
12552         hash = (53 * hash) + hashBoolean(getBalancerOn());
12553       }
12554       hash = (29 * hash) + getUnknownFields().hashCode();
12555       memoizedHashCode = hash;
12556       return hash;
12557     }
12558 
12559     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12560         com.google.protobuf.ByteString data)
12561         throws com.google.protobuf.InvalidProtocolBufferException {
12562       return PARSER.parseFrom(data);
12563     }
12564     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12565         com.google.protobuf.ByteString data,
12566         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12567         throws com.google.protobuf.InvalidProtocolBufferException {
12568       return PARSER.parseFrom(data, extensionRegistry);
12569     }
12570     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(byte[] data)
12571         throws com.google.protobuf.InvalidProtocolBufferException {
12572       return PARSER.parseFrom(data);
12573     }
12574     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12575         byte[] data,
12576         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12577         throws com.google.protobuf.InvalidProtocolBufferException {
12578       return PARSER.parseFrom(data, extensionRegistry);
12579     }
12580     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(java.io.InputStream input)
12581         throws java.io.IOException {
12582       return PARSER.parseFrom(input);
12583     }
12584     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12585         java.io.InputStream input,
12586         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12587         throws java.io.IOException {
12588       return PARSER.parseFrom(input, extensionRegistry);
12589     }
12590     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseDelimitedFrom(java.io.InputStream input)
12591         throws java.io.IOException {
12592       return PARSER.parseDelimitedFrom(input);
12593     }
12594     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseDelimitedFrom(
12595         java.io.InputStream input,
12596         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12597         throws java.io.IOException {
12598       return PARSER.parseDelimitedFrom(input, extensionRegistry);
12599     }
12600     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12601         com.google.protobuf.CodedInputStream input)
12602         throws java.io.IOException {
12603       return PARSER.parseFrom(input);
12604     }
12605     public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
12606         com.google.protobuf.CodedInputStream input,
12607         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12608         throws java.io.IOException {
12609       return PARSER.parseFrom(input, extensionRegistry);
12610     }
12611 
12612     public static Builder newBuilder() { return Builder.create(); }
12613     public Builder newBuilderForType() { return newBuilder(); }
12614     public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus prototype) {
12615       return newBuilder().mergeFrom(prototype);
12616     }
12617     public Builder toBuilder() { return newBuilder(this); }
12618 
12619     @java.lang.Override
12620     protected Builder newBuilderForType(
12621         com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12622       Builder builder = new Builder(parent);
12623       return builder;
12624     }
12625     /**
12626      * Protobuf type {@code hbase.pb.ClusterStatus}
12627      */
12628     public static final class Builder extends
12629         com.google.protobuf.GeneratedMessage.Builder<Builder>
12630        implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder {
12631       public static final com.google.protobuf.Descriptors.Descriptor
12632           getDescriptor() {
12633         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ClusterStatus_descriptor;
12634       }
12635 
12636       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12637           internalGetFieldAccessorTable() {
12638         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ClusterStatus_fieldAccessorTable
12639             .ensureFieldAccessorsInitialized(
12640                 org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder.class);
12641       }
12642 
12643       // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder()
12644       private Builder() {
12645         maybeForceBuilderInitialization();
12646       }
12647 
12648       private Builder(
12649           com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12650         super(parent);
12651         maybeForceBuilderInitialization();
12652       }
12653       private void maybeForceBuilderInitialization() {
12654         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12655           getHbaseVersionFieldBuilder();
12656           getLiveServersFieldBuilder();
12657           getDeadServersFieldBuilder();
12658           getRegionsInTransitionFieldBuilder();
12659           getClusterIdFieldBuilder();
12660           getMasterCoprocessorsFieldBuilder();
12661           getMasterFieldBuilder();
12662           getBackupMastersFieldBuilder();
12663         }
12664       }
12665       private static Builder create() {
12666         return new Builder();
12667       }
12668 
12669       public Builder clear() {
12670         super.clear();
12671         if (hbaseVersionBuilder_ == null) {
12672           hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
12673         } else {
12674           hbaseVersionBuilder_.clear();
12675         }
12676         bitField0_ = (bitField0_ & ~0x00000001);
12677         if (liveServersBuilder_ == null) {
12678           liveServers_ = java.util.Collections.emptyList();
12679           bitField0_ = (bitField0_ & ~0x00000002);
12680         } else {
12681           liveServersBuilder_.clear();
12682         }
12683         if (deadServersBuilder_ == null) {
12684           deadServers_ = java.util.Collections.emptyList();
12685           bitField0_ = (bitField0_ & ~0x00000004);
12686         } else {
12687           deadServersBuilder_.clear();
12688         }
12689         if (regionsInTransitionBuilder_ == null) {
12690           regionsInTransition_ = java.util.Collections.emptyList();
12691           bitField0_ = (bitField0_ & ~0x00000008);
12692         } else {
12693           regionsInTransitionBuilder_.clear();
12694         }
12695         if (clusterIdBuilder_ == null) {
12696           clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
12697         } else {
12698           clusterIdBuilder_.clear();
12699         }
12700         bitField0_ = (bitField0_ & ~0x00000010);
12701         if (masterCoprocessorsBuilder_ == null) {
12702           masterCoprocessors_ = java.util.Collections.emptyList();
12703           bitField0_ = (bitField0_ & ~0x00000020);
12704         } else {
12705           masterCoprocessorsBuilder_.clear();
12706         }
12707         if (masterBuilder_ == null) {
12708           master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
12709         } else {
12710           masterBuilder_.clear();
12711         }
12712         bitField0_ = (bitField0_ & ~0x00000040);
12713         if (backupMastersBuilder_ == null) {
12714           backupMasters_ = java.util.Collections.emptyList();
12715           bitField0_ = (bitField0_ & ~0x00000080);
12716         } else {
12717           backupMastersBuilder_.clear();
12718         }
12719         balancerOn_ = false;
12720         bitField0_ = (bitField0_ & ~0x00000100);
12721         return this;
12722       }
12723 
12724       public Builder clone() {
12725         return create().mergeFrom(buildPartial());
12726       }
12727 
12728       public com.google.protobuf.Descriptors.Descriptor
12729           getDescriptorForType() {
12730         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_hbase_pb_ClusterStatus_descriptor;
12731       }
12732 
12733       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getDefaultInstanceForType() {
12734         return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance();
12735       }
12736 
12737       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus build() {
12738         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus result = buildPartial();
12739         if (!result.isInitialized()) {
12740           throw newUninitializedMessageException(result);
12741         }
12742         return result;
12743       }
12744 
12745       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus buildPartial() {
12746         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus(this);
12747         int from_bitField0_ = bitField0_;
12748         int to_bitField0_ = 0;
12749         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12750           to_bitField0_ |= 0x00000001;
12751         }
12752         if (hbaseVersionBuilder_ == null) {
12753           result.hbaseVersion_ = hbaseVersion_;
12754         } else {
12755           result.hbaseVersion_ = hbaseVersionBuilder_.build();
12756         }
12757         if (liveServersBuilder_ == null) {
12758           if (((bitField0_ & 0x00000002) == 0x00000002)) {
12759             liveServers_ = java.util.Collections.unmodifiableList(liveServers_);
12760             bitField0_ = (bitField0_ & ~0x00000002);
12761           }
12762           result.liveServers_ = liveServers_;
12763         } else {
12764           result.liveServers_ = liveServersBuilder_.build();
12765         }
12766         if (deadServersBuilder_ == null) {
12767           if (((bitField0_ & 0x00000004) == 0x00000004)) {
12768             deadServers_ = java.util.Collections.unmodifiableList(deadServers_);
12769             bitField0_ = (bitField0_ & ~0x00000004);
12770           }
12771           result.deadServers_ = deadServers_;
12772         } else {
12773           result.deadServers_ = deadServersBuilder_.build();
12774         }
12775         if (regionsInTransitionBuilder_ == null) {
12776           if (((bitField0_ & 0x00000008) == 0x00000008)) {
12777             regionsInTransition_ = java.util.Collections.unmodifiableList(regionsInTransition_);
12778             bitField0_ = (bitField0_ & ~0x00000008);
12779           }
12780           result.regionsInTransition_ = regionsInTransition_;
12781         } else {
12782           result.regionsInTransition_ = regionsInTransitionBuilder_.build();
12783         }
12784         if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
12785           to_bitField0_ |= 0x00000002;
12786         }
12787         if (clusterIdBuilder_ == null) {
12788           result.clusterId_ = clusterId_;
12789         } else {
12790           result.clusterId_ = clusterIdBuilder_.build();
12791         }
12792         if (masterCoprocessorsBuilder_ == null) {
12793           if (((bitField0_ & 0x00000020) == 0x00000020)) {
12794             masterCoprocessors_ = java.util.Collections.unmodifiableList(masterCoprocessors_);
12795             bitField0_ = (bitField0_ & ~0x00000020);
12796           }
12797           result.masterCoprocessors_ = masterCoprocessors_;
12798         } else {
12799           result.masterCoprocessors_ = masterCoprocessorsBuilder_.build();
12800         }
12801         if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
12802           to_bitField0_ |= 0x00000004;
12803         }
12804         if (masterBuilder_ == null) {
12805           result.master_ = master_;
12806         } else {
12807           result.master_ = masterBuilder_.build();
12808         }
12809         if (backupMastersBuilder_ == null) {
12810           if (((bitField0_ & 0x00000080) == 0x00000080)) {
12811             backupMasters_ = java.util.Collections.unmodifiableList(backupMasters_);
12812             bitField0_ = (bitField0_ & ~0x00000080);
12813           }
12814           result.backupMasters_ = backupMasters_;
12815         } else {
12816           result.backupMasters_ = backupMastersBuilder_.build();
12817         }
12818         if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
12819           to_bitField0_ |= 0x00000008;
12820         }
12821         result.balancerOn_ = balancerOn_;
12822         result.bitField0_ = to_bitField0_;
12823         onBuilt();
12824         return result;
12825       }
12826 
12827       public Builder mergeFrom(com.google.protobuf.Message other) {
12828         if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) {
12829           return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus)other);
12830         } else {
12831           super.mergeFrom(other);
12832           return this;
12833         }
12834       }
12835 
12836       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus other) {
12837         if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) return this;
12838         if (other.hasHbaseVersion()) {
12839           mergeHbaseVersion(other.getHbaseVersion());
12840         }
12841         if (liveServersBuilder_ == null) {
12842           if (!other.liveServers_.isEmpty()) {
12843             if (liveServers_.isEmpty()) {
12844               liveServers_ = other.liveServers_;
12845               bitField0_ = (bitField0_ & ~0x00000002);
12846             } else {
12847               ensureLiveServersIsMutable();
12848               liveServers_.addAll(other.liveServers_);
12849             }
12850             onChanged();
12851           }
12852         } else {
12853           if (!other.liveServers_.isEmpty()) {
12854             if (liveServersBuilder_.isEmpty()) {
12855               liveServersBuilder_.dispose();
12856               liveServersBuilder_ = null;
12857               liveServers_ = other.liveServers_;
12858               bitField0_ = (bitField0_ & ~0x00000002);
12859               liveServersBuilder_ = 
12860                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
12861                    getLiveServersFieldBuilder() : null;
12862             } else {
12863               liveServersBuilder_.addAllMessages(other.liveServers_);
12864             }
12865           }
12866         }
12867         if (deadServersBuilder_ == null) {
12868           if (!other.deadServers_.isEmpty()) {
12869             if (deadServers_.isEmpty()) {
12870               deadServers_ = other.deadServers_;
12871               bitField0_ = (bitField0_ & ~0x00000004);
12872             } else {
12873               ensureDeadServersIsMutable();
12874               deadServers_.addAll(other.deadServers_);
12875             }
12876             onChanged();
12877           }
12878         } else {
12879           if (!other.deadServers_.isEmpty()) {
12880             if (deadServersBuilder_.isEmpty()) {
12881               deadServersBuilder_.dispose();
12882               deadServersBuilder_ = null;
12883               deadServers_ = other.deadServers_;
12884               bitField0_ = (bitField0_ & ~0x00000004);
12885               deadServersBuilder_ = 
12886                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
12887                    getDeadServersFieldBuilder() : null;
12888             } else {
12889               deadServersBuilder_.addAllMessages(other.deadServers_);
12890             }
12891           }
12892         }
12893         if (regionsInTransitionBuilder_ == null) {
12894           if (!other.regionsInTransition_.isEmpty()) {
12895             if (regionsInTransition_.isEmpty()) {
12896               regionsInTransition_ = other.regionsInTransition_;
12897               bitField0_ = (bitField0_ & ~0x00000008);
12898             } else {
12899               ensureRegionsInTransitionIsMutable();
12900               regionsInTransition_.addAll(other.regionsInTransition_);
12901             }
12902             onChanged();
12903           }
12904         } else {
12905           if (!other.regionsInTransition_.isEmpty()) {
12906             if (regionsInTransitionBuilder_.isEmpty()) {
12907               regionsInTransitionBuilder_.dispose();
12908               regionsInTransitionBuilder_ = null;
12909               regionsInTransition_ = other.regionsInTransition_;
12910               bitField0_ = (bitField0_ & ~0x00000008);
12911               regionsInTransitionBuilder_ = 
12912                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
12913                    getRegionsInTransitionFieldBuilder() : null;
12914             } else {
12915               regionsInTransitionBuilder_.addAllMessages(other.regionsInTransition_);
12916             }
12917           }
12918         }
12919         if (other.hasClusterId()) {
12920           mergeClusterId(other.getClusterId());
12921         }
12922         if (masterCoprocessorsBuilder_ == null) {
12923           if (!other.masterCoprocessors_.isEmpty()) {
12924             if (masterCoprocessors_.isEmpty()) {
12925               masterCoprocessors_ = other.masterCoprocessors_;
12926               bitField0_ = (bitField0_ & ~0x00000020);
12927             } else {
12928               ensureMasterCoprocessorsIsMutable();
12929               masterCoprocessors_.addAll(other.masterCoprocessors_);
12930             }
12931             onChanged();
12932           }
12933         } else {
12934           if (!other.masterCoprocessors_.isEmpty()) {
12935             if (masterCoprocessorsBuilder_.isEmpty()) {
12936               masterCoprocessorsBuilder_.dispose();
12937               masterCoprocessorsBuilder_ = null;
12938               masterCoprocessors_ = other.masterCoprocessors_;
12939               bitField0_ = (bitField0_ & ~0x00000020);
12940               masterCoprocessorsBuilder_ = 
12941                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
12942                    getMasterCoprocessorsFieldBuilder() : null;
12943             } else {
12944               masterCoprocessorsBuilder_.addAllMessages(other.masterCoprocessors_);
12945             }
12946           }
12947         }
12948         if (other.hasMaster()) {
12949           mergeMaster(other.getMaster());
12950         }
12951         if (backupMastersBuilder_ == null) {
12952           if (!other.backupMasters_.isEmpty()) {
12953             if (backupMasters_.isEmpty()) {
12954               backupMasters_ = other.backupMasters_;
12955               bitField0_ = (bitField0_ & ~0x00000080);
12956             } else {
12957               ensureBackupMastersIsMutable();
12958               backupMasters_.addAll(other.backupMasters_);
12959             }
12960             onChanged();
12961           }
12962         } else {
12963           if (!other.backupMasters_.isEmpty()) {
12964             if (backupMastersBuilder_.isEmpty()) {
12965               backupMastersBuilder_.dispose();
12966               backupMastersBuilder_ = null;
12967               backupMasters_ = other.backupMasters_;
12968               bitField0_ = (bitField0_ & ~0x00000080);
12969               backupMastersBuilder_ = 
12970                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
12971                    getBackupMastersFieldBuilder() : null;
12972             } else {
12973               backupMastersBuilder_.addAllMessages(other.backupMasters_);
12974             }
12975           }
12976         }
12977         if (other.hasBalancerOn()) {
12978           setBalancerOn(other.getBalancerOn());
12979         }
12980         this.mergeUnknownFields(other.getUnknownFields());
12981         return this;
12982       }
12983 
12984       public final boolean isInitialized() {
12985         if (hasHbaseVersion()) {
12986           if (!getHbaseVersion().isInitialized()) {
12987             
12988             return false;
12989           }
12990         }
12991         for (int i = 0; i < getLiveServersCount(); i++) {
12992           if (!getLiveServers(i).isInitialized()) {
12993             
12994             return false;
12995           }
12996         }
12997         for (int i = 0; i < getDeadServersCount(); i++) {
12998           if (!getDeadServers(i).isInitialized()) {
12999             
13000             return false;
13001           }
13002         }
13003         for (int i = 0; i < getRegionsInTransitionCount(); i++) {
13004           if (!getRegionsInTransition(i).isInitialized()) {
13005             
13006             return false;
13007           }
13008         }
13009         if (hasClusterId()) {
13010           if (!getClusterId().isInitialized()) {
13011             
13012             return false;
13013           }
13014         }
13015         for (int i = 0; i < getMasterCoprocessorsCount(); i++) {
13016           if (!getMasterCoprocessors(i).isInitialized()) {
13017             
13018             return false;
13019           }
13020         }
13021         if (hasMaster()) {
13022           if (!getMaster().isInitialized()) {
13023             
13024             return false;
13025           }
13026         }
13027         for (int i = 0; i < getBackupMastersCount(); i++) {
13028           if (!getBackupMasters(i).isInitialized()) {
13029             
13030             return false;
13031           }
13032         }
13033         return true;
13034       }
13035 
13036       public Builder mergeFrom(
13037           com.google.protobuf.CodedInputStream input,
13038           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13039           throws java.io.IOException {
13040         org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parsedMessage = null;
13041         try {
13042           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13043         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13044           parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) e.getUnfinishedMessage();
13045           throw e;
13046         } finally {
13047           if (parsedMessage != null) {
13048             mergeFrom(parsedMessage);
13049           }
13050         }
13051         return this;
13052       }
13053       private int bitField0_;
13054 
13055       // optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;
13056       private org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
13057       private com.google.protobuf.SingleFieldBuilder<
13058           org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder> hbaseVersionBuilder_;
13059       /**
13060        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13061        */
13062       public boolean hasHbaseVersion() {
13063         return ((bitField0_ & 0x00000001) == 0x00000001);
13064       }
13065       /**
13066        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13067        */
13068       public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion() {
13069         if (hbaseVersionBuilder_ == null) {
13070           return hbaseVersion_;
13071         } else {
13072           return hbaseVersionBuilder_.getMessage();
13073         }
13074       }
13075       /**
13076        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13077        */
13078       public Builder setHbaseVersion(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent value) {
13079         if (hbaseVersionBuilder_ == null) {
13080           if (value == null) {
13081             throw new NullPointerException();
13082           }
13083           hbaseVersion_ = value;
13084           onChanged();
13085         } else {
13086           hbaseVersionBuilder_.setMessage(value);
13087         }
13088         bitField0_ |= 0x00000001;
13089         return this;
13090       }
13091       /**
13092        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13093        */
13094       public Builder setHbaseVersion(
13095           org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder builderForValue) {
13096         if (hbaseVersionBuilder_ == null) {
13097           hbaseVersion_ = builderForValue.build();
13098           onChanged();
13099         } else {
13100           hbaseVersionBuilder_.setMessage(builderForValue.build());
13101         }
13102         bitField0_ |= 0x00000001;
13103         return this;
13104       }
13105       /**
13106        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13107        */
13108       public Builder mergeHbaseVersion(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent value) {
13109         if (hbaseVersionBuilder_ == null) {
13110           if (((bitField0_ & 0x00000001) == 0x00000001) &&
13111               hbaseVersion_ != org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance()) {
13112             hbaseVersion_ =
13113               org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.newBuilder(hbaseVersion_).mergeFrom(value).buildPartial();
13114           } else {
13115             hbaseVersion_ = value;
13116           }
13117           onChanged();
13118         } else {
13119           hbaseVersionBuilder_.mergeFrom(value);
13120         }
13121         bitField0_ |= 0x00000001;
13122         return this;
13123       }
13124       /**
13125        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13126        */
13127       public Builder clearHbaseVersion() {
13128         if (hbaseVersionBuilder_ == null) {
13129           hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
13130           onChanged();
13131         } else {
13132           hbaseVersionBuilder_.clear();
13133         }
13134         bitField0_ = (bitField0_ & ~0x00000001);
13135         return this;
13136       }
13137       /**
13138        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13139        */
13140       public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder getHbaseVersionBuilder() {
13141         bitField0_ |= 0x00000001;
13142         onChanged();
13143         return getHbaseVersionFieldBuilder().getBuilder();
13144       }
13145       /**
13146        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13147        */
13148       public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder() {
13149         if (hbaseVersionBuilder_ != null) {
13150           return hbaseVersionBuilder_.getMessageOrBuilder();
13151         } else {
13152           return hbaseVersion_;
13153         }
13154       }
13155       /**
13156        * <code>optional .hbase.pb.HBaseVersionFileContent hbase_version = 1;</code>
13157        */
13158       private com.google.protobuf.SingleFieldBuilder<
13159           org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder> 
13160           getHbaseVersionFieldBuilder() {
13161         if (hbaseVersionBuilder_ == null) {
13162           hbaseVersionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13163               org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder>(
13164                   hbaseVersion_,
13165                   getParentForChildren(),
13166                   isClean());
13167           hbaseVersion_ = null;
13168         }
13169         return hbaseVersionBuilder_;
13170       }
13171 
13172       // repeated .hbase.pb.LiveServerInfo live_servers = 2;
13173       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> liveServers_ =
13174         java.util.Collections.emptyList();
13175       private void ensureLiveServersIsMutable() {
13176         if (!((bitField0_ & 0x00000002) == 0x00000002)) {
13177           liveServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo>(liveServers_);
13178           bitField0_ |= 0x00000002;
13179          }
13180       }
13181 
13182       private com.google.protobuf.RepeatedFieldBuilder<
13183           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> liveServersBuilder_;
13184 
13185       /**
13186        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13187        */
13188       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> getLiveServersList() {
13189         if (liveServersBuilder_ == null) {
13190           return java.util.Collections.unmodifiableList(liveServers_);
13191         } else {
13192           return liveServersBuilder_.getMessageList();
13193         }
13194       }
13195       /**
13196        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13197        */
13198       public int getLiveServersCount() {
13199         if (liveServersBuilder_ == null) {
13200           return liveServers_.size();
13201         } else {
13202           return liveServersBuilder_.getCount();
13203         }
13204       }
13205       /**
13206        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13207        */
13208       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index) {
13209         if (liveServersBuilder_ == null) {
13210           return liveServers_.get(index);
13211         } else {
13212           return liveServersBuilder_.getMessage(index);
13213         }
13214       }
13215       /**
13216        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13217        */
13218       public Builder setLiveServers(
13219           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
13220         if (liveServersBuilder_ == null) {
13221           if (value == null) {
13222             throw new NullPointerException();
13223           }
13224           ensureLiveServersIsMutable();
13225           liveServers_.set(index, value);
13226           onChanged();
13227         } else {
13228           liveServersBuilder_.setMessage(index, value);
13229         }
13230         return this;
13231       }
13232       /**
13233        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13234        */
13235       public Builder setLiveServers(
13236           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
13237         if (liveServersBuilder_ == null) {
13238           ensureLiveServersIsMutable();
13239           liveServers_.set(index, builderForValue.build());
13240           onChanged();
13241         } else {
13242           liveServersBuilder_.setMessage(index, builderForValue.build());
13243         }
13244         return this;
13245       }
13246       /**
13247        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13248        */
13249       public Builder addLiveServers(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
13250         if (liveServersBuilder_ == null) {
13251           if (value == null) {
13252             throw new NullPointerException();
13253           }
13254           ensureLiveServersIsMutable();
13255           liveServers_.add(value);
13256           onChanged();
13257         } else {
13258           liveServersBuilder_.addMessage(value);
13259         }
13260         return this;
13261       }
13262       /**
13263        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13264        */
13265       public Builder addLiveServers(
13266           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
13267         if (liveServersBuilder_ == null) {
13268           if (value == null) {
13269             throw new NullPointerException();
13270           }
13271           ensureLiveServersIsMutable();
13272           liveServers_.add(index, value);
13273           onChanged();
13274         } else {
13275           liveServersBuilder_.addMessage(index, value);
13276         }
13277         return this;
13278       }
13279       /**
13280        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13281        */
13282       public Builder addLiveServers(
13283           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
13284         if (liveServersBuilder_ == null) {
13285           ensureLiveServersIsMutable();
13286           liveServers_.add(builderForValue.build());
13287           onChanged();
13288         } else {
13289           liveServersBuilder_.addMessage(builderForValue.build());
13290         }
13291         return this;
13292       }
13293       /**
13294        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13295        */
13296       public Builder addLiveServers(
13297           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
13298         if (liveServersBuilder_ == null) {
13299           ensureLiveServersIsMutable();
13300           liveServers_.add(index, builderForValue.build());
13301           onChanged();
13302         } else {
13303           liveServersBuilder_.addMessage(index, builderForValue.build());
13304         }
13305         return this;
13306       }
13307       /**
13308        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13309        */
13310       public Builder addAllLiveServers(
13311           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> values) {
13312         if (liveServersBuilder_ == null) {
13313           ensureLiveServersIsMutable();
13314           super.addAll(values, liveServers_);
13315           onChanged();
13316         } else {
13317           liveServersBuilder_.addAllMessages(values);
13318         }
13319         return this;
13320       }
13321       /**
13322        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13323        */
13324       public Builder clearLiveServers() {
13325         if (liveServersBuilder_ == null) {
13326           liveServers_ = java.util.Collections.emptyList();
13327           bitField0_ = (bitField0_ & ~0x00000002);
13328           onChanged();
13329         } else {
13330           liveServersBuilder_.clear();
13331         }
13332         return this;
13333       }
13334       /**
13335        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13336        */
13337       public Builder removeLiveServers(int index) {
13338         if (liveServersBuilder_ == null) {
13339           ensureLiveServersIsMutable();
13340           liveServers_.remove(index);
13341           onChanged();
13342         } else {
13343           liveServersBuilder_.remove(index);
13344         }
13345         return this;
13346       }
13347       /**
13348        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13349        */
13350       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder getLiveServersBuilder(
13351           int index) {
13352         return getLiveServersFieldBuilder().getBuilder(index);
13353       }
13354       /**
13355        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13356        */
13357       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
13358           int index) {
13359         if (liveServersBuilder_ == null) {
13360           return liveServers_.get(index);  } else {
13361           return liveServersBuilder_.getMessageOrBuilder(index);
13362         }
13363       }
13364       /**
13365        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13366        */
13367       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> 
13368            getLiveServersOrBuilderList() {
13369         if (liveServersBuilder_ != null) {
13370           return liveServersBuilder_.getMessageOrBuilderList();
13371         } else {
13372           return java.util.Collections.unmodifiableList(liveServers_);
13373         }
13374       }
13375       /**
13376        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13377        */
13378       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder addLiveServersBuilder() {
13379         return getLiveServersFieldBuilder().addBuilder(
13380             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance());
13381       }
13382       /**
13383        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13384        */
13385       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder addLiveServersBuilder(
13386           int index) {
13387         return getLiveServersFieldBuilder().addBuilder(
13388             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance());
13389       }
13390       /**
13391        * <code>repeated .hbase.pb.LiveServerInfo live_servers = 2;</code>
13392        */
13393       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder> 
13394            getLiveServersBuilderList() {
13395         return getLiveServersFieldBuilder().getBuilderList();
13396       }
13397       private com.google.protobuf.RepeatedFieldBuilder<
13398           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> 
13399           getLiveServersFieldBuilder() {
13400         if (liveServersBuilder_ == null) {
13401           liveServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
13402               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>(
13403                   liveServers_,
13404                   ((bitField0_ & 0x00000002) == 0x00000002),
13405                   getParentForChildren(),
13406                   isClean());
13407           liveServers_ = null;
13408         }
13409         return liveServersBuilder_;
13410       }
13411 
13412       // repeated .hbase.pb.ServerName dead_servers = 3;
13413       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> deadServers_ =
13414         java.util.Collections.emptyList();
13415       private void ensureDeadServersIsMutable() {
13416         if (!((bitField0_ & 0x00000004) == 0x00000004)) {
13417           deadServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(deadServers_);
13418           bitField0_ |= 0x00000004;
13419          }
13420       }
13421 
13422       private com.google.protobuf.RepeatedFieldBuilder<
13423           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> deadServersBuilder_;
13424 
13425       /**
13426        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13427        */
13428       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getDeadServersList() {
13429         if (deadServersBuilder_ == null) {
13430           return java.util.Collections.unmodifiableList(deadServers_);
13431         } else {
13432           return deadServersBuilder_.getMessageList();
13433         }
13434       }
13435       /**
13436        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13437        */
13438       public int getDeadServersCount() {
13439         if (deadServersBuilder_ == null) {
13440           return deadServers_.size();
13441         } else {
13442           return deadServersBuilder_.getCount();
13443         }
13444       }
13445       /**
13446        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13447        */
13448       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index) {
13449         if (deadServersBuilder_ == null) {
13450           return deadServers_.get(index);
13451         } else {
13452           return deadServersBuilder_.getMessage(index);
13453         }
13454       }
13455       /**
13456        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13457        */
13458       public Builder setDeadServers(
13459           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
13460         if (deadServersBuilder_ == null) {
13461           if (value == null) {
13462             throw new NullPointerException();
13463           }
13464           ensureDeadServersIsMutable();
13465           deadServers_.set(index, value);
13466           onChanged();
13467         } else {
13468           deadServersBuilder_.setMessage(index, value);
13469         }
13470         return this;
13471       }
13472       /**
13473        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13474        */
13475       public Builder setDeadServers(
13476           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
13477         if (deadServersBuilder_ == null) {
13478           ensureDeadServersIsMutable();
13479           deadServers_.set(index, builderForValue.build());
13480           onChanged();
13481         } else {
13482           deadServersBuilder_.setMessage(index, builderForValue.build());
13483         }
13484         return this;
13485       }
13486       /**
13487        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13488        */
13489       public Builder addDeadServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
13490         if (deadServersBuilder_ == null) {
13491           if (value == null) {
13492             throw new NullPointerException();
13493           }
13494           ensureDeadServersIsMutable();
13495           deadServers_.add(value);
13496           onChanged();
13497         } else {
13498           deadServersBuilder_.addMessage(value);
13499         }
13500         return this;
13501       }
13502       /**
13503        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13504        */
13505       public Builder addDeadServers(
13506           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
13507         if (deadServersBuilder_ == null) {
13508           if (value == null) {
13509             throw new NullPointerException();
13510           }
13511           ensureDeadServersIsMutable();
13512           deadServers_.add(index, value);
13513           onChanged();
13514         } else {
13515           deadServersBuilder_.addMessage(index, value);
13516         }
13517         return this;
13518       }
13519       /**
13520        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13521        */
13522       public Builder addDeadServers(
13523           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
13524         if (deadServersBuilder_ == null) {
13525           ensureDeadServersIsMutable();
13526           deadServers_.add(builderForValue.build());
13527           onChanged();
13528         } else {
13529           deadServersBuilder_.addMessage(builderForValue.build());
13530         }
13531         return this;
13532       }
13533       /**
13534        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13535        */
13536       public Builder addDeadServers(
13537           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
13538         if (deadServersBuilder_ == null) {
13539           ensureDeadServersIsMutable();
13540           deadServers_.add(index, builderForValue.build());
13541           onChanged();
13542         } else {
13543           deadServersBuilder_.addMessage(index, builderForValue.build());
13544         }
13545         return this;
13546       }
13547       /**
13548        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13549        */
13550       public Builder addAllDeadServers(
13551           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
13552         if (deadServersBuilder_ == null) {
13553           ensureDeadServersIsMutable();
13554           super.addAll(values, deadServers_);
13555           onChanged();
13556         } else {
13557           deadServersBuilder_.addAllMessages(values);
13558         }
13559         return this;
13560       }
13561       /**
13562        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13563        */
13564       public Builder clearDeadServers() {
13565         if (deadServersBuilder_ == null) {
13566           deadServers_ = java.util.Collections.emptyList();
13567           bitField0_ = (bitField0_ & ~0x00000004);
13568           onChanged();
13569         } else {
13570           deadServersBuilder_.clear();
13571         }
13572         return this;
13573       }
13574       /**
13575        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13576        */
13577       public Builder removeDeadServers(int index) {
13578         if (deadServersBuilder_ == null) {
13579           ensureDeadServersIsMutable();
13580           deadServers_.remove(index);
13581           onChanged();
13582         } else {
13583           deadServersBuilder_.remove(index);
13584         }
13585         return this;
13586       }
13587       /**
13588        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13589        */
13590       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getDeadServersBuilder(
13591           int index) {
13592         return getDeadServersFieldBuilder().getBuilder(index);
13593       }
13594       /**
13595        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13596        */
13597       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
13598           int index) {
13599         if (deadServersBuilder_ == null) {
13600           return deadServers_.get(index);  } else {
13601           return deadServersBuilder_.getMessageOrBuilder(index);
13602         }
13603       }
13604       /**
13605        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13606        */
13607       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
13608            getDeadServersOrBuilderList() {
13609         if (deadServersBuilder_ != null) {
13610           return deadServersBuilder_.getMessageOrBuilderList();
13611         } else {
13612           return java.util.Collections.unmodifiableList(deadServers_);
13613         }
13614       }
13615       /**
13616        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13617        */
13618       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addDeadServersBuilder() {
13619         return getDeadServersFieldBuilder().addBuilder(
13620             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
13621       }
13622       /**
13623        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13624        */
13625       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addDeadServersBuilder(
13626           int index) {
13627         return getDeadServersFieldBuilder().addBuilder(
13628             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
13629       }
13630       /**
13631        * <code>repeated .hbase.pb.ServerName dead_servers = 3;</code>
13632        */
13633       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder> 
13634            getDeadServersBuilderList() {
13635         return getDeadServersFieldBuilder().getBuilderList();
13636       }
13637       private com.google.protobuf.RepeatedFieldBuilder<
13638           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
13639           getDeadServersFieldBuilder() {
13640         if (deadServersBuilder_ == null) {
13641           deadServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
13642               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
13643                   deadServers_,
13644                   ((bitField0_ & 0x00000004) == 0x00000004),
13645                   getParentForChildren(),
13646                   isClean());
13647           deadServers_ = null;
13648         }
13649         return deadServersBuilder_;
13650       }
13651 
13652       // repeated .hbase.pb.RegionInTransition regions_in_transition = 4;
13653       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> regionsInTransition_ =
13654         java.util.Collections.emptyList();
13655       private void ensureRegionsInTransitionIsMutable() {
13656         if (!((bitField0_ & 0x00000008) == 0x00000008)) {
13657           regionsInTransition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition>(regionsInTransition_);
13658           bitField0_ |= 0x00000008;
13659          }
13660       }
13661 
13662       private com.google.protobuf.RepeatedFieldBuilder<
13663           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> regionsInTransitionBuilder_;
13664 
13665       /**
13666        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13667        */
13668       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> getRegionsInTransitionList() {
13669         if (regionsInTransitionBuilder_ == null) {
13670           return java.util.Collections.unmodifiableList(regionsInTransition_);
13671         } else {
13672           return regionsInTransitionBuilder_.getMessageList();
13673         }
13674       }
13675       /**
13676        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13677        */
13678       public int getRegionsInTransitionCount() {
13679         if (regionsInTransitionBuilder_ == null) {
13680           return regionsInTransition_.size();
13681         } else {
13682           return regionsInTransitionBuilder_.getCount();
13683         }
13684       }
13685       /**
13686        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13687        */
13688       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index) {
13689         if (regionsInTransitionBuilder_ == null) {
13690           return regionsInTransition_.get(index);
13691         } else {
13692           return regionsInTransitionBuilder_.getMessage(index);
13693         }
13694       }
13695       /**
13696        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13697        */
13698       public Builder setRegionsInTransition(
13699           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
13700         if (regionsInTransitionBuilder_ == null) {
13701           if (value == null) {
13702             throw new NullPointerException();
13703           }
13704           ensureRegionsInTransitionIsMutable();
13705           regionsInTransition_.set(index, value);
13706           onChanged();
13707         } else {
13708           regionsInTransitionBuilder_.setMessage(index, value);
13709         }
13710         return this;
13711       }
13712       /**
13713        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13714        */
13715       public Builder setRegionsInTransition(
13716           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
13717         if (regionsInTransitionBuilder_ == null) {
13718           ensureRegionsInTransitionIsMutable();
13719           regionsInTransition_.set(index, builderForValue.build());
13720           onChanged();
13721         } else {
13722           regionsInTransitionBuilder_.setMessage(index, builderForValue.build());
13723         }
13724         return this;
13725       }
13726       /**
13727        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13728        */
13729       public Builder addRegionsInTransition(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
13730         if (regionsInTransitionBuilder_ == null) {
13731           if (value == null) {
13732             throw new NullPointerException();
13733           }
13734           ensureRegionsInTransitionIsMutable();
13735           regionsInTransition_.add(value);
13736           onChanged();
13737         } else {
13738           regionsInTransitionBuilder_.addMessage(value);
13739         }
13740         return this;
13741       }
13742       /**
13743        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13744        */
13745       public Builder addRegionsInTransition(
13746           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
13747         if (regionsInTransitionBuilder_ == null) {
13748           if (value == null) {
13749             throw new NullPointerException();
13750           }
13751           ensureRegionsInTransitionIsMutable();
13752           regionsInTransition_.add(index, value);
13753           onChanged();
13754         } else {
13755           regionsInTransitionBuilder_.addMessage(index, value);
13756         }
13757         return this;
13758       }
13759       /**
13760        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13761        */
13762       public Builder addRegionsInTransition(
13763           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
13764         if (regionsInTransitionBuilder_ == null) {
13765           ensureRegionsInTransitionIsMutable();
13766           regionsInTransition_.add(builderForValue.build());
13767           onChanged();
13768         } else {
13769           regionsInTransitionBuilder_.addMessage(builderForValue.build());
13770         }
13771         return this;
13772       }
13773       /**
13774        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13775        */
13776       public Builder addRegionsInTransition(
13777           int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
13778         if (regionsInTransitionBuilder_ == null) {
13779           ensureRegionsInTransitionIsMutable();
13780           regionsInTransition_.add(index, builderForValue.build());
13781           onChanged();
13782         } else {
13783           regionsInTransitionBuilder_.addMessage(index, builderForValue.build());
13784         }
13785         return this;
13786       }
13787       /**
13788        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13789        */
13790       public Builder addAllRegionsInTransition(
13791           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> values) {
13792         if (regionsInTransitionBuilder_ == null) {
13793           ensureRegionsInTransitionIsMutable();
13794           super.addAll(values, regionsInTransition_);
13795           onChanged();
13796         } else {
13797           regionsInTransitionBuilder_.addAllMessages(values);
13798         }
13799         return this;
13800       }
13801       /**
13802        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13803        */
13804       public Builder clearRegionsInTransition() {
13805         if (regionsInTransitionBuilder_ == null) {
13806           regionsInTransition_ = java.util.Collections.emptyList();
13807           bitField0_ = (bitField0_ & ~0x00000008);
13808           onChanged();
13809         } else {
13810           regionsInTransitionBuilder_.clear();
13811         }
13812         return this;
13813       }
13814       /**
13815        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13816        */
13817       public Builder removeRegionsInTransition(int index) {
13818         if (regionsInTransitionBuilder_ == null) {
13819           ensureRegionsInTransitionIsMutable();
13820           regionsInTransition_.remove(index);
13821           onChanged();
13822         } else {
13823           regionsInTransitionBuilder_.remove(index);
13824         }
13825         return this;
13826       }
13827       /**
13828        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13829        */
13830       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder getRegionsInTransitionBuilder(
13831           int index) {
13832         return getRegionsInTransitionFieldBuilder().getBuilder(index);
13833       }
13834       /**
13835        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13836        */
13837       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
13838           int index) {
13839         if (regionsInTransitionBuilder_ == null) {
13840           return regionsInTransition_.get(index);  } else {
13841           return regionsInTransitionBuilder_.getMessageOrBuilder(index);
13842         }
13843       }
13844       /**
13845        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13846        */
13847       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> 
13848            getRegionsInTransitionOrBuilderList() {
13849         if (regionsInTransitionBuilder_ != null) {
13850           return regionsInTransitionBuilder_.getMessageOrBuilderList();
13851         } else {
13852           return java.util.Collections.unmodifiableList(regionsInTransition_);
13853         }
13854       }
13855       /**
13856        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13857        */
13858       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder addRegionsInTransitionBuilder() {
13859         return getRegionsInTransitionFieldBuilder().addBuilder(
13860             org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance());
13861       }
13862       /**
13863        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13864        */
13865       public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder addRegionsInTransitionBuilder(
13866           int index) {
13867         return getRegionsInTransitionFieldBuilder().addBuilder(
13868             index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance());
13869       }
13870       /**
13871        * <code>repeated .hbase.pb.RegionInTransition regions_in_transition = 4;</code>
13872        */
13873       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder> 
13874            getRegionsInTransitionBuilderList() {
13875         return getRegionsInTransitionFieldBuilder().getBuilderList();
13876       }
13877       private com.google.protobuf.RepeatedFieldBuilder<
13878           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> 
13879           getRegionsInTransitionFieldBuilder() {
13880         if (regionsInTransitionBuilder_ == null) {
13881           regionsInTransitionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
13882               org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>(
13883                   regionsInTransition_,
13884                   ((bitField0_ & 0x00000008) == 0x00000008),
13885                   getParentForChildren(),
13886                   isClean());
13887           regionsInTransition_ = null;
13888         }
13889         return regionsInTransitionBuilder_;
13890       }
13891 
13892       // optional .hbase.pb.ClusterId cluster_id = 5;
13893       private org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
13894       private com.google.protobuf.SingleFieldBuilder<
13895           org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder> clusterIdBuilder_;
13896       /**
13897        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13898        */
13899       public boolean hasClusterId() {
13900         return ((bitField0_ & 0x00000010) == 0x00000010);
13901       }
13902       /**
13903        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13904        */
13905       public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId() {
13906         if (clusterIdBuilder_ == null) {
13907           return clusterId_;
13908         } else {
13909           return clusterIdBuilder_.getMessage();
13910         }
13911       }
13912       /**
13913        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13914        */
13915       public Builder setClusterId(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId value) {
13916         if (clusterIdBuilder_ == null) {
13917           if (value == null) {
13918             throw new NullPointerException();
13919           }
13920           clusterId_ = value;
13921           onChanged();
13922         } else {
13923           clusterIdBuilder_.setMessage(value);
13924         }
13925         bitField0_ |= 0x00000010;
13926         return this;
13927       }
13928       /**
13929        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13930        */
13931       public Builder setClusterId(
13932           org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder builderForValue) {
13933         if (clusterIdBuilder_ == null) {
13934           clusterId_ = builderForValue.build();
13935           onChanged();
13936         } else {
13937           clusterIdBuilder_.setMessage(builderForValue.build());
13938         }
13939         bitField0_ |= 0x00000010;
13940         return this;
13941       }
13942       /**
13943        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13944        */
13945       public Builder mergeClusterId(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId value) {
13946         if (clusterIdBuilder_ == null) {
13947           if (((bitField0_ & 0x00000010) == 0x00000010) &&
13948               clusterId_ != org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance()) {
13949             clusterId_ =
13950               org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.newBuilder(clusterId_).mergeFrom(value).buildPartial();
13951           } else {
13952             clusterId_ = value;
13953           }
13954           onChanged();
13955         } else {
13956           clusterIdBuilder_.mergeFrom(value);
13957         }
13958         bitField0_ |= 0x00000010;
13959         return this;
13960       }
13961       /**
13962        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13963        */
13964       public Builder clearClusterId() {
13965         if (clusterIdBuilder_ == null) {
13966           clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
13967           onChanged();
13968         } else {
13969           clusterIdBuilder_.clear();
13970         }
13971         bitField0_ = (bitField0_ & ~0x00000010);
13972         return this;
13973       }
13974       /**
13975        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13976        */
13977       public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder getClusterIdBuilder() {
13978         bitField0_ |= 0x00000010;
13979         onChanged();
13980         return getClusterIdFieldBuilder().getBuilder();
13981       }
13982       /**
13983        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13984        */
13985       public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder() {
13986         if (clusterIdBuilder_ != null) {
13987           return clusterIdBuilder_.getMessageOrBuilder();
13988         } else {
13989           return clusterId_;
13990         }
13991       }
13992       /**
13993        * <code>optional .hbase.pb.ClusterId cluster_id = 5;</code>
13994        */
13995       private com.google.protobuf.SingleFieldBuilder<
13996           org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder> 
13997           getClusterIdFieldBuilder() {
13998         if (clusterIdBuilder_ == null) {
13999           clusterIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14000               org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder>(
14001                   clusterId_,
14002                   getParentForChildren(),
14003                   isClean());
14004           clusterId_ = null;
14005         }
14006         return clusterIdBuilder_;
14007       }
14008 
14009       // repeated .hbase.pb.Coprocessor master_coprocessors = 6;
14010       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> masterCoprocessors_ =
14011         java.util.Collections.emptyList();
14012       private void ensureMasterCoprocessorsIsMutable() {
14013         if (!((bitField0_ & 0x00000020) == 0x00000020)) {
14014           masterCoprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>(masterCoprocessors_);
14015           bitField0_ |= 0x00000020;
14016          }
14017       }
14018 
14019       private com.google.protobuf.RepeatedFieldBuilder<
14020           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> masterCoprocessorsBuilder_;
14021 
14022       /**
14023        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14024        */
14025       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getMasterCoprocessorsList() {
14026         if (masterCoprocessorsBuilder_ == null) {
14027           return java.util.Collections.unmodifiableList(masterCoprocessors_);
14028         } else {
14029           return masterCoprocessorsBuilder_.getMessageList();
14030         }
14031       }
14032       /**
14033        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14034        */
14035       public int getMasterCoprocessorsCount() {
14036         if (masterCoprocessorsBuilder_ == null) {
14037           return masterCoprocessors_.size();
14038         } else {
14039           return masterCoprocessorsBuilder_.getCount();
14040         }
14041       }
14042       /**
14043        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14044        */
14045       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index) {
14046         if (masterCoprocessorsBuilder_ == null) {
14047           return masterCoprocessors_.get(index);
14048         } else {
14049           return masterCoprocessorsBuilder_.getMessage(index);
14050         }
14051       }
14052       /**
14053        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14054        */
14055       public Builder setMasterCoprocessors(
14056           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
14057         if (masterCoprocessorsBuilder_ == null) {
14058           if (value == null) {
14059             throw new NullPointerException();
14060           }
14061           ensureMasterCoprocessorsIsMutable();
14062           masterCoprocessors_.set(index, value);
14063           onChanged();
14064         } else {
14065           masterCoprocessorsBuilder_.setMessage(index, value);
14066         }
14067         return this;
14068       }
14069       /**
14070        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14071        */
14072       public Builder setMasterCoprocessors(
14073           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
14074         if (masterCoprocessorsBuilder_ == null) {
14075           ensureMasterCoprocessorsIsMutable();
14076           masterCoprocessors_.set(index, builderForValue.build());
14077           onChanged();
14078         } else {
14079           masterCoprocessorsBuilder_.setMessage(index, builderForValue.build());
14080         }
14081         return this;
14082       }
14083       /**
14084        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14085        */
14086       public Builder addMasterCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
14087         if (masterCoprocessorsBuilder_ == null) {
14088           if (value == null) {
14089             throw new NullPointerException();
14090           }
14091           ensureMasterCoprocessorsIsMutable();
14092           masterCoprocessors_.add(value);
14093           onChanged();
14094         } else {
14095           masterCoprocessorsBuilder_.addMessage(value);
14096         }
14097         return this;
14098       }
14099       /**
14100        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14101        */
14102       public Builder addMasterCoprocessors(
14103           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
14104         if (masterCoprocessorsBuilder_ == null) {
14105           if (value == null) {
14106             throw new NullPointerException();
14107           }
14108           ensureMasterCoprocessorsIsMutable();
14109           masterCoprocessors_.add(index, value);
14110           onChanged();
14111         } else {
14112           masterCoprocessorsBuilder_.addMessage(index, value);
14113         }
14114         return this;
14115       }
14116       /**
14117        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14118        */
14119       public Builder addMasterCoprocessors(
14120           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
14121         if (masterCoprocessorsBuilder_ == null) {
14122           ensureMasterCoprocessorsIsMutable();
14123           masterCoprocessors_.add(builderForValue.build());
14124           onChanged();
14125         } else {
14126           masterCoprocessorsBuilder_.addMessage(builderForValue.build());
14127         }
14128         return this;
14129       }
14130       /**
14131        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14132        */
14133       public Builder addMasterCoprocessors(
14134           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
14135         if (masterCoprocessorsBuilder_ == null) {
14136           ensureMasterCoprocessorsIsMutable();
14137           masterCoprocessors_.add(index, builderForValue.build());
14138           onChanged();
14139         } else {
14140           masterCoprocessorsBuilder_.addMessage(index, builderForValue.build());
14141         }
14142         return this;
14143       }
14144       /**
14145        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14146        */
14147       public Builder addAllMasterCoprocessors(
14148           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> values) {
14149         if (masterCoprocessorsBuilder_ == null) {
14150           ensureMasterCoprocessorsIsMutable();
14151           super.addAll(values, masterCoprocessors_);
14152           onChanged();
14153         } else {
14154           masterCoprocessorsBuilder_.addAllMessages(values);
14155         }
14156         return this;
14157       }
14158       /**
14159        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14160        */
14161       public Builder clearMasterCoprocessors() {
14162         if (masterCoprocessorsBuilder_ == null) {
14163           masterCoprocessors_ = java.util.Collections.emptyList();
14164           bitField0_ = (bitField0_ & ~0x00000020);
14165           onChanged();
14166         } else {
14167           masterCoprocessorsBuilder_.clear();
14168         }
14169         return this;
14170       }
14171       /**
14172        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14173        */
14174       public Builder removeMasterCoprocessors(int index) {
14175         if (masterCoprocessorsBuilder_ == null) {
14176           ensureMasterCoprocessorsIsMutable();
14177           masterCoprocessors_.remove(index);
14178           onChanged();
14179         } else {
14180           masterCoprocessorsBuilder_.remove(index);
14181         }
14182         return this;
14183       }
14184       /**
14185        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14186        */
14187       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getMasterCoprocessorsBuilder(
14188           int index) {
14189         return getMasterCoprocessorsFieldBuilder().getBuilder(index);
14190       }
14191       /**
14192        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14193        */
14194       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
14195           int index) {
14196         if (masterCoprocessorsBuilder_ == null) {
14197           return masterCoprocessors_.get(index);  } else {
14198           return masterCoprocessorsBuilder_.getMessageOrBuilder(index);
14199         }
14200       }
14201       /**
14202        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14203        */
14204       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
14205            getMasterCoprocessorsOrBuilderList() {
14206         if (masterCoprocessorsBuilder_ != null) {
14207           return masterCoprocessorsBuilder_.getMessageOrBuilderList();
14208         } else {
14209           return java.util.Collections.unmodifiableList(masterCoprocessors_);
14210         }
14211       }
14212       /**
14213        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14214        */
14215       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addMasterCoprocessorsBuilder() {
14216         return getMasterCoprocessorsFieldBuilder().addBuilder(
14217             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
14218       }
14219       /**
14220        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14221        */
14222       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addMasterCoprocessorsBuilder(
14223           int index) {
14224         return getMasterCoprocessorsFieldBuilder().addBuilder(
14225             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
14226       }
14227       /**
14228        * <code>repeated .hbase.pb.Coprocessor master_coprocessors = 6;</code>
14229        */
14230       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder> 
14231            getMasterCoprocessorsBuilderList() {
14232         return getMasterCoprocessorsFieldBuilder().getBuilderList();
14233       }
14234       private com.google.protobuf.RepeatedFieldBuilder<
14235           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
14236           getMasterCoprocessorsFieldBuilder() {
14237         if (masterCoprocessorsBuilder_ == null) {
14238           masterCoprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
14239               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>(
14240                   masterCoprocessors_,
14241                   ((bitField0_ & 0x00000020) == 0x00000020),
14242                   getParentForChildren(),
14243                   isClean());
14244           masterCoprocessors_ = null;
14245         }
14246         return masterCoprocessorsBuilder_;
14247       }
14248 
14249       // optional .hbase.pb.ServerName master = 7;
14250       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
14251       private com.google.protobuf.SingleFieldBuilder<
14252           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_;
14253       /**
14254        * <code>optional .hbase.pb.ServerName master = 7;</code>
14255        */
14256       public boolean hasMaster() {
14257         return ((bitField0_ & 0x00000040) == 0x00000040);
14258       }
14259       /**
14260        * <code>optional .hbase.pb.ServerName master = 7;</code>
14261        */
14262       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
14263         if (masterBuilder_ == null) {
14264           return master_;
14265         } else {
14266           return masterBuilder_.getMessage();
14267         }
14268       }
14269       /**
14270        * <code>optional .hbase.pb.ServerName master = 7;</code>
14271        */
14272       public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
14273         if (masterBuilder_ == null) {
14274           if (value == null) {
14275             throw new NullPointerException();
14276           }
14277           master_ = value;
14278           onChanged();
14279         } else {
14280           masterBuilder_.setMessage(value);
14281         }
14282         bitField0_ |= 0x00000040;
14283         return this;
14284       }
14285       /**
14286        * <code>optional .hbase.pb.ServerName master = 7;</code>
14287        */
14288       public Builder setMaster(
14289           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
14290         if (masterBuilder_ == null) {
14291           master_ = builderForValue.build();
14292           onChanged();
14293         } else {
14294           masterBuilder_.setMessage(builderForValue.build());
14295         }
14296         bitField0_ |= 0x00000040;
14297         return this;
14298       }
14299       /**
14300        * <code>optional .hbase.pb.ServerName master = 7;</code>
14301        */
14302       public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
14303         if (masterBuilder_ == null) {
14304           if (((bitField0_ & 0x00000040) == 0x00000040) &&
14305               master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
14306             master_ =
14307               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial();
14308           } else {
14309             master_ = value;
14310           }
14311           onChanged();
14312         } else {
14313           masterBuilder_.mergeFrom(value);
14314         }
14315         bitField0_ |= 0x00000040;
14316         return this;
14317       }
14318       /**
14319        * <code>optional .hbase.pb.ServerName master = 7;</code>
14320        */
14321       public Builder clearMaster() {
14322         if (masterBuilder_ == null) {
14323           master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
14324           onChanged();
14325         } else {
14326           masterBuilder_.clear();
14327         }
14328         bitField0_ = (bitField0_ & ~0x00000040);
14329         return this;
14330       }
14331       /**
14332        * <code>optional .hbase.pb.ServerName master = 7;</code>
14333        */
14334       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() {
14335         bitField0_ |= 0x00000040;
14336         onChanged();
14337         return getMasterFieldBuilder().getBuilder();
14338       }
14339       /**
14340        * <code>optional .hbase.pb.ServerName master = 7;</code>
14341        */
14342       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
14343         if (masterBuilder_ != null) {
14344           return masterBuilder_.getMessageOrBuilder();
14345         } else {
14346           return master_;
14347         }
14348       }
14349       /**
14350        * <code>optional .hbase.pb.ServerName master = 7;</code>
14351        */
14352       private com.google.protobuf.SingleFieldBuilder<
14353           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
14354           getMasterFieldBuilder() {
14355         if (masterBuilder_ == null) {
14356           masterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14357               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
14358                   master_,
14359                   getParentForChildren(),
14360                   isClean());
14361           master_ = null;
14362         }
14363         return masterBuilder_;
14364       }
14365 
14366       // repeated .hbase.pb.ServerName backup_masters = 8;
14367       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> backupMasters_ =
14368         java.util.Collections.emptyList();
14369       private void ensureBackupMastersIsMutable() {
14370         if (!((bitField0_ & 0x00000080) == 0x00000080)) {
14371           backupMasters_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(backupMasters_);
14372           bitField0_ |= 0x00000080;
14373          }
14374       }
14375 
14376       private com.google.protobuf.RepeatedFieldBuilder<
14377           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> backupMastersBuilder_;
14378 
14379       /**
14380        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14381        */
14382       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getBackupMastersList() {
14383         if (backupMastersBuilder_ == null) {
14384           return java.util.Collections.unmodifiableList(backupMasters_);
14385         } else {
14386           return backupMastersBuilder_.getMessageList();
14387         }
14388       }
14389       /**
14390        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14391        */
14392       public int getBackupMastersCount() {
14393         if (backupMastersBuilder_ == null) {
14394           return backupMasters_.size();
14395         } else {
14396           return backupMastersBuilder_.getCount();
14397         }
14398       }
14399       /**
14400        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14401        */
14402       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index) {
14403         if (backupMastersBuilder_ == null) {
14404           return backupMasters_.get(index);
14405         } else {
14406           return backupMastersBuilder_.getMessage(index);
14407         }
14408       }
14409       /**
14410        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14411        */
14412       public Builder setBackupMasters(
14413           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
14414         if (backupMastersBuilder_ == null) {
14415           if (value == null) {
14416             throw new NullPointerException();
14417           }
14418           ensureBackupMastersIsMutable();
14419           backupMasters_.set(index, value);
14420           onChanged();
14421         } else {
14422           backupMastersBuilder_.setMessage(index, value);
14423         }
14424         return this;
14425       }
14426       /**
14427        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14428        */
14429       public Builder setBackupMasters(
14430           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
14431         if (backupMastersBuilder_ == null) {
14432           ensureBackupMastersIsMutable();
14433           backupMasters_.set(index, builderForValue.build());
14434           onChanged();
14435         } else {
14436           backupMastersBuilder_.setMessage(index, builderForValue.build());
14437         }
14438         return this;
14439       }
14440       /**
14441        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14442        */
14443       public Builder addBackupMasters(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
14444         if (backupMastersBuilder_ == null) {
14445           if (value == null) {
14446             throw new NullPointerException();
14447           }
14448           ensureBackupMastersIsMutable();
14449           backupMasters_.add(value);
14450           onChanged();
14451         } else {
14452           backupMastersBuilder_.addMessage(value);
14453         }
14454         return this;
14455       }
14456       /**
14457        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14458        */
14459       public Builder addBackupMasters(
14460           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
14461         if (backupMastersBuilder_ == null) {
14462           if (value == null) {
14463             throw new NullPointerException();
14464           }
14465           ensureBackupMastersIsMutable();
14466           backupMasters_.add(index, value);
14467           onChanged();
14468         } else {
14469           backupMastersBuilder_.addMessage(index, value);
14470         }
14471         return this;
14472       }
14473       /**
14474        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14475        */
14476       public Builder addBackupMasters(
14477           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
14478         if (backupMastersBuilder_ == null) {
14479           ensureBackupMastersIsMutable();
14480           backupMasters_.add(builderForValue.build());
14481           onChanged();
14482         } else {
14483           backupMastersBuilder_.addMessage(builderForValue.build());
14484         }
14485         return this;
14486       }
14487       /**
14488        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14489        */
14490       public Builder addBackupMasters(
14491           int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
14492         if (backupMastersBuilder_ == null) {
14493           ensureBackupMastersIsMutable();
14494           backupMasters_.add(index, builderForValue.build());
14495           onChanged();
14496         } else {
14497           backupMastersBuilder_.addMessage(index, builderForValue.build());
14498         }
14499         return this;
14500       }
14501       /**
14502        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14503        */
14504       public Builder addAllBackupMasters(
14505           java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
14506         if (backupMastersBuilder_ == null) {
14507           ensureBackupMastersIsMutable();
14508           super.addAll(values, backupMasters_);
14509           onChanged();
14510         } else {
14511           backupMastersBuilder_.addAllMessages(values);
14512         }
14513         return this;
14514       }
14515       /**
14516        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14517        */
14518       public Builder clearBackupMasters() {
14519         if (backupMastersBuilder_ == null) {
14520           backupMasters_ = java.util.Collections.emptyList();
14521           bitField0_ = (bitField0_ & ~0x00000080);
14522           onChanged();
14523         } else {
14524           backupMastersBuilder_.clear();
14525         }
14526         return this;
14527       }
14528       /**
14529        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14530        */
14531       public Builder removeBackupMasters(int index) {
14532         if (backupMastersBuilder_ == null) {
14533           ensureBackupMastersIsMutable();
14534           backupMasters_.remove(index);
14535           onChanged();
14536         } else {
14537           backupMastersBuilder_.remove(index);
14538         }
14539         return this;
14540       }
14541       /**
14542        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14543        */
14544       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getBackupMastersBuilder(
14545           int index) {
14546         return getBackupMastersFieldBuilder().getBuilder(index);
14547       }
14548       /**
14549        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14550        */
14551       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
14552           int index) {
14553         if (backupMastersBuilder_ == null) {
14554           return backupMasters_.get(index);  } else {
14555           return backupMastersBuilder_.getMessageOrBuilder(index);
14556         }
14557       }
14558       /**
14559        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14560        */
14561       public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
14562            getBackupMastersOrBuilderList() {
14563         if (backupMastersBuilder_ != null) {
14564           return backupMastersBuilder_.getMessageOrBuilderList();
14565         } else {
14566           return java.util.Collections.unmodifiableList(backupMasters_);
14567         }
14568       }
14569       /**
14570        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14571        */
14572       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addBackupMastersBuilder() {
14573         return getBackupMastersFieldBuilder().addBuilder(
14574             org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
14575       }
14576       /**
14577        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14578        */
14579       public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addBackupMastersBuilder(
14580           int index) {
14581         return getBackupMastersFieldBuilder().addBuilder(
14582             index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
14583       }
14584       /**
14585        * <code>repeated .hbase.pb.ServerName backup_masters = 8;</code>
14586        */
14587       public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder> 
14588            getBackupMastersBuilderList() {
14589         return getBackupMastersFieldBuilder().getBuilderList();
14590       }
14591       private com.google.protobuf.RepeatedFieldBuilder<
14592           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
14593           getBackupMastersFieldBuilder() {
14594         if (backupMastersBuilder_ == null) {
14595           backupMastersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
14596               org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
14597                   backupMasters_,
14598                   ((bitField0_ & 0x00000080) == 0x00000080),
14599                   getParentForChildren(),
14600                   isClean());
14601           backupMasters_ = null;
14602         }
14603         return backupMastersBuilder_;
14604       }
14605 
14606       // optional bool balancer_on = 9;
14607       private boolean balancerOn_ ;
14608       /**
14609        * <code>optional bool balancer_on = 9;</code>
14610        */
14611       public boolean hasBalancerOn() {
14612         return ((bitField0_ & 0x00000100) == 0x00000100);
14613       }
14614       /**
14615        * <code>optional bool balancer_on = 9;</code>
14616        */
14617       public boolean getBalancerOn() {
14618         return balancerOn_;
14619       }
14620       /**
14621        * <code>optional bool balancer_on = 9;</code>
14622        */
14623       public Builder setBalancerOn(boolean value) {
14624         bitField0_ |= 0x00000100;
14625         balancerOn_ = value;
14626         onChanged();
14627         return this;
14628       }
14629       /**
14630        * <code>optional bool balancer_on = 9;</code>
14631        */
14632       public Builder clearBalancerOn() {
14633         bitField0_ = (bitField0_ & ~0x00000100);
14634         balancerOn_ = false;
14635         onChanged();
14636         return this;
14637       }
14638 
14639       // @@protoc_insertion_point(builder_scope:hbase.pb.ClusterStatus)
14640     }
14641 
14642     static {
14643       defaultInstance = new ClusterStatus(true);
14644       defaultInstance.initFields();
14645     }
14646 
14647     // @@protoc_insertion_point(class_scope:hbase.pb.ClusterStatus)
14648   }
14649 
14650   private static com.google.protobuf.Descriptors.Descriptor
14651     internal_static_hbase_pb_RegionState_descriptor;
14652   private static
14653     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14654       internal_static_hbase_pb_RegionState_fieldAccessorTable;
14655   private static com.google.protobuf.Descriptors.Descriptor
14656     internal_static_hbase_pb_RegionInTransition_descriptor;
14657   private static
14658     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14659       internal_static_hbase_pb_RegionInTransition_fieldAccessorTable;
14660   private static com.google.protobuf.Descriptors.Descriptor
14661     internal_static_hbase_pb_StoreSequenceId_descriptor;
14662   private static
14663     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14664       internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable;
14665   private static com.google.protobuf.Descriptors.Descriptor
14666     internal_static_hbase_pb_RegionStoreSequenceIds_descriptor;
14667   private static
14668     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14669       internal_static_hbase_pb_RegionStoreSequenceIds_fieldAccessorTable;
14670   private static com.google.protobuf.Descriptors.Descriptor
14671     internal_static_hbase_pb_RegionLoad_descriptor;
14672   private static
14673     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14674       internal_static_hbase_pb_RegionLoad_fieldAccessorTable;
14675   private static com.google.protobuf.Descriptors.Descriptor
14676     internal_static_hbase_pb_ReplicationLoadSink_descriptor;
14677   private static
14678     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14679       internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable;
14680   private static com.google.protobuf.Descriptors.Descriptor
14681     internal_static_hbase_pb_ReplicationLoadSource_descriptor;
14682   private static
14683     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14684       internal_static_hbase_pb_ReplicationLoadSource_fieldAccessorTable;
14685   private static com.google.protobuf.Descriptors.Descriptor
14686     internal_static_hbase_pb_ServerLoad_descriptor;
14687   private static
14688     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14689       internal_static_hbase_pb_ServerLoad_fieldAccessorTable;
14690   private static com.google.protobuf.Descriptors.Descriptor
14691     internal_static_hbase_pb_LiveServerInfo_descriptor;
14692   private static
14693     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14694       internal_static_hbase_pb_LiveServerInfo_fieldAccessorTable;
14695   private static com.google.protobuf.Descriptors.Descriptor
14696     internal_static_hbase_pb_ClusterStatus_descriptor;
14697   private static
14698     com.google.protobuf.GeneratedMessage.FieldAccessorTable
14699       internal_static_hbase_pb_ClusterStatus_fieldAccessorTable;
14700 
14701   public static com.google.protobuf.Descriptors.FileDescriptor
14702       getDescriptor() {
14703     return descriptor;
14704   }
14705   private static com.google.protobuf.Descriptors.FileDescriptor
14706       descriptor;
14707   static {
14708     java.lang.String[] descriptorData = {
14709       "\n\023ClusterStatus.proto\022\010hbase.pb\032\013HBase.p" +
14710       "roto\032\017ClusterId.proto\032\010FS.proto\"\331\002\n\013Regi" +
14711       "onState\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb." +
14712       "RegionInfo\022*\n\005state\030\002 \002(\0162\033.hbase.pb.Reg" +
14713       "ionState.State\022\r\n\005stamp\030\003 \001(\004\"\343\001\n\005State\022" +
14714       "\013\n\007OFFLINE\020\000\022\020\n\014PENDING_OPEN\020\001\022\013\n\007OPENIN" +
14715       "G\020\002\022\010\n\004OPEN\020\003\022\021\n\rPENDING_CLOSE\020\004\022\013\n\007CLOS" +
14716       "ING\020\005\022\n\n\006CLOSED\020\006\022\r\n\tSPLITTING\020\007\022\t\n\005SPLI" +
14717       "T\020\010\022\017\n\013FAILED_OPEN\020\t\022\020\n\014FAILED_CLOSE\020\n\022\013" +
14718       "\n\007MERGING\020\013\022\n\n\006MERGED\020\014\022\021\n\rSPLITTING_NEW",
14719       "\020\r\022\017\n\013MERGING_NEW\020\016\"j\n\022RegionInTransitio" +
14720       "n\022\'\n\004spec\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
14721       "er\022+\n\014region_state\030\002 \002(\0132\025.hbase.pb.Regi" +
14722       "onState\";\n\017StoreSequenceId\022\023\n\013family_nam" +
14723       "e\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"p\n\026RegionSt" +
14724       "oreSequenceIds\022 \n\030last_flushed_sequence_" +
14725       "id\030\001 \002(\004\0224\n\021store_sequence_id\030\002 \003(\0132\031.hb" +
14726       "ase.pb.StoreSequenceId\"\324\004\n\nRegionLoad\0223\n" +
14727       "\020region_specifier\030\001 \002(\0132\031.hbase.pb.Regio" +
14728       "nSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstorefiles",
14729       "\030\003 \001(\r\022\"\n\032store_uncompressed_size_MB\030\004 \001" +
14730       "(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022\030\n\020memstor" +
14731       "e_size_MB\030\006 \001(\r\022\037\n\027storefile_index_size_" +
14732       "MB\030\007 \001(\r\022\033\n\023read_requests_count\030\010 \001(\004\022\034\n" +
14733       "\024write_requests_count\030\t \001(\004\022\034\n\024total_com" +
14734       "pacting_KVs\030\n \001(\004\022\035\n\025current_compacted_K" +
14735       "Vs\030\013 \001(\004\022\032\n\022root_index_size_KB\030\014 \001(\r\022\"\n\032" +
14736       "total_static_index_size_KB\030\r \001(\r\022\"\n\032tota" +
14737       "l_static_bloom_size_KB\030\016 \001(\r\022\034\n\024complete" +
14738       "_sequence_id\030\017 \001(\004\022\025\n\rdata_locality\030\020 \001(",
14739       "\002\022#\n\030last_major_compaction_ts\030\021 \001(\004:\0010\022=" +
14740       "\n\032store_complete_sequence_id\030\022 \003(\0132\031.hba" +
14741       "se.pb.StoreSequenceId\"T\n\023ReplicationLoad" +
14742       "Sink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022!\n\031time" +
14743       "StampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025Replicat" +
14744       "ionLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLa" +
14745       "stShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(" +
14746       "\r\022 \n\030timeStampOfLastShippedOp\030\004 \002(\004\022\026\n\016r" +
14747       "eplicationLag\030\005 \002(\004\"\212\003\n\nServerLoad\022\032\n\022nu" +
14748       "mber_of_requests\030\001 \001(\004\022 \n\030total_number_o",
14749       "f_requests\030\002 \001(\004\022\024\n\014used_heap_MB\030\003 \001(\r\022\023" +
14750       "\n\013max_heap_MB\030\004 \001(\r\022*\n\014region_loads\030\005 \003(" +
14751       "\0132\024.hbase.pb.RegionLoad\022+\n\014coprocessors\030" +
14752       "\006 \003(\0132\025.hbase.pb.Coprocessor\022\031\n\021report_s" +
14753       "tart_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004" +
14754       "\022\030\n\020info_server_port\030\t \001(\r\0227\n\016replLoadSo" +
14755       "urce\030\n \003(\0132\037.hbase.pb.ReplicationLoadSou" +
14756       "rce\0223\n\014replLoadSink\030\013 \001(\0132\035.hbase.pb.Rep" +
14757       "licationLoadSink\"a\n\016LiveServerInfo\022$\n\006se" +
14758       "rver\030\001 \002(\0132\024.hbase.pb.ServerName\022)\n\013serv",
14759       "er_load\030\002 \002(\0132\024.hbase.pb.ServerLoad\"\250\003\n\r" +
14760       "ClusterStatus\0228\n\rhbase_version\030\001 \001(\0132!.h" +
14761       "base.pb.HBaseVersionFileContent\022.\n\014live_" +
14762       "servers\030\002 \003(\0132\030.hbase.pb.LiveServerInfo\022" +
14763       "*\n\014dead_servers\030\003 \003(\0132\024.hbase.pb.ServerN" +
14764       "ame\022;\n\025regions_in_transition\030\004 \003(\0132\034.hba" +
14765       "se.pb.RegionInTransition\022\'\n\ncluster_id\030\005" +
14766       " \001(\0132\023.hbase.pb.ClusterId\0222\n\023master_copr" +
14767       "ocessors\030\006 \003(\0132\025.hbase.pb.Coprocessor\022$\n" +
14768       "\006master\030\007 \001(\0132\024.hbase.pb.ServerName\022,\n\016b",
14769       "ackup_masters\030\010 \003(\0132\024.hbase.pb.ServerNam" +
14770       "e\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.had" +
14771       "oop.hbase.protobuf.generatedB\023ClusterSta" +
14772       "tusProtosH\001\240\001\001"
14773     };
14774     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
14775       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
14776         public com.google.protobuf.ExtensionRegistry assignDescriptors(
14777             com.google.protobuf.Descriptors.FileDescriptor root) {
14778           descriptor = root;
14779           internal_static_hbase_pb_RegionState_descriptor =
14780             getDescriptor().getMessageTypes().get(0);
14781           internal_static_hbase_pb_RegionState_fieldAccessorTable = new
14782             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14783               internal_static_hbase_pb_RegionState_descriptor,
14784               new java.lang.String[] { "RegionInfo", "State", "Stamp", });
14785           internal_static_hbase_pb_RegionInTransition_descriptor =
14786             getDescriptor().getMessageTypes().get(1);
14787           internal_static_hbase_pb_RegionInTransition_fieldAccessorTable = new
14788             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14789               internal_static_hbase_pb_RegionInTransition_descriptor,
14790               new java.lang.String[] { "Spec", "RegionState", });
14791           internal_static_hbase_pb_StoreSequenceId_descriptor =
14792             getDescriptor().getMessageTypes().get(2);
14793           internal_static_hbase_pb_StoreSequenceId_fieldAccessorTable = new
14794             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14795               internal_static_hbase_pb_StoreSequenceId_descriptor,
14796               new java.lang.String[] { "FamilyName", "SequenceId", });
14797           internal_static_hbase_pb_RegionStoreSequenceIds_descriptor =
14798             getDescriptor().getMessageTypes().get(3);
14799           internal_static_hbase_pb_RegionStoreSequenceIds_fieldAccessorTable = new
14800             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14801               internal_static_hbase_pb_RegionStoreSequenceIds_descriptor,
14802               new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", });
14803           internal_static_hbase_pb_RegionLoad_descriptor =
14804             getDescriptor().getMessageTypes().get(4);
14805           internal_static_hbase_pb_RegionLoad_fieldAccessorTable = new
14806             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14807               internal_static_hbase_pb_RegionLoad_descriptor,
14808               new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", });
14809           internal_static_hbase_pb_ReplicationLoadSink_descriptor =
14810             getDescriptor().getMessageTypes().get(5);
14811           internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable = new
14812             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14813               internal_static_hbase_pb_ReplicationLoadSink_descriptor,
14814               new java.lang.String[] { "AgeOfLastAppliedOp", "TimeStampsOfLastAppliedOp", });
14815           internal_static_hbase_pb_ReplicationLoadSource_descriptor =
14816             getDescriptor().getMessageTypes().get(6);
14817           internal_static_hbase_pb_ReplicationLoadSource_fieldAccessorTable = new
14818             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14819               internal_static_hbase_pb_ReplicationLoadSource_descriptor,
14820               new java.lang.String[] { "PeerID", "AgeOfLastShippedOp", "SizeOfLogQueue", "TimeStampOfLastShippedOp", "ReplicationLag", });
14821           internal_static_hbase_pb_ServerLoad_descriptor =
14822             getDescriptor().getMessageTypes().get(7);
14823           internal_static_hbase_pb_ServerLoad_fieldAccessorTable = new
14824             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14825               internal_static_hbase_pb_ServerLoad_descriptor,
14826               new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", "ReplLoadSource", "ReplLoadSink", });
14827           internal_static_hbase_pb_LiveServerInfo_descriptor =
14828             getDescriptor().getMessageTypes().get(8);
14829           internal_static_hbase_pb_LiveServerInfo_fieldAccessorTable = new
14830             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14831               internal_static_hbase_pb_LiveServerInfo_descriptor,
14832               new java.lang.String[] { "Server", "ServerLoad", });
14833           internal_static_hbase_pb_ClusterStatus_descriptor =
14834             getDescriptor().getMessageTypes().get(9);
14835           internal_static_hbase_pb_ClusterStatus_fieldAccessorTable = new
14836             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
14837               internal_static_hbase_pb_ClusterStatus_descriptor,
14838               new java.lang.String[] { "HbaseVersion", "LiveServers", "DeadServers", "RegionsInTransition", "ClusterId", "MasterCoprocessors", "Master", "BackupMasters", "BalancerOn", });
14839           return null;
14840         }
14841       };
14842     com.google.protobuf.Descriptors.FileDescriptor
14843       .internalBuildGeneratedFileFrom(descriptorData,
14844         new com.google.protobuf.Descriptors.FileDescriptor[] {
14845           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
14846           org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.getDescriptor(),
14847           org.apache.hadoop.hbase.protobuf.generated.FSProtos.getDescriptor(),
14848         }, assigner);
14849   }
14850 
14851   // @@protoc_insertion_point(outer_class_scope)
14852 }