1
2
3
4 package org.apache.hadoop.hbase.rest.protobuf.generated;
5
6 public final class StorageClusterStatusMessage {
7 private StorageClusterStatusMessage() {}
8 public static void registerAllExtensions(
9 com.google.protobuf.ExtensionRegistry registry) {
10 }
11 public interface StorageClusterStatusOrBuilder
12 extends com.google.protobuf.MessageOrBuilder {
13
14
15 java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>
16 getLiveNodesList();
17 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index);
18 int getLiveNodesCount();
19 java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
20 getLiveNodesOrBuilderList();
21 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
22 int index);
23
24
25 java.util.List<String> getDeadNodesList();
26 int getDeadNodesCount();
27 String getDeadNodes(int index);
28
29
30 boolean hasRegions();
31 int getRegions();
32
33
34 boolean hasRequests();
35 int getRequests();
36
37
38 boolean hasAverageLoad();
39 double getAverageLoad();
40 }
41 public static final class StorageClusterStatus extends
42 com.google.protobuf.GeneratedMessage
43 implements StorageClusterStatusOrBuilder {
44
45 private StorageClusterStatus(Builder builder) {
46 super(builder);
47 }
48 private StorageClusterStatus(boolean noInit) {}
49
50 private static final StorageClusterStatus defaultInstance;
51 public static StorageClusterStatus getDefaultInstance() {
52 return defaultInstance;
53 }
54
55 public StorageClusterStatus getDefaultInstanceForType() {
56 return defaultInstance;
57 }
58
59 public static final com.google.protobuf.Descriptors.Descriptor
60 getDescriptor() {
61 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
62 }
63
64 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
65 internalGetFieldAccessorTable() {
66 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
67 }
68
69 public interface RegionOrBuilder
70 extends com.google.protobuf.MessageOrBuilder {
71
72
73 boolean hasName();
74 com.google.protobuf.ByteString getName();
75
76
77 boolean hasStores();
78 int getStores();
79
80
81 boolean hasStorefiles();
82 int getStorefiles();
83
84
85 boolean hasStorefileSizeMB();
86 int getStorefileSizeMB();
87
88
89 boolean hasMemstoreSizeMB();
90 int getMemstoreSizeMB();
91
92
93 boolean hasStorefileIndexSizeMB();
94 int getStorefileIndexSizeMB();
95 }
96 public static final class Region extends
97 com.google.protobuf.GeneratedMessage
98 implements RegionOrBuilder {
99
100 private Region(Builder builder) {
101 super(builder);
102 }
103 private Region(boolean noInit) {}
104
105 private static final Region defaultInstance;
106 public static Region getDefaultInstance() {
107 return defaultInstance;
108 }
109
110 public Region getDefaultInstanceForType() {
111 return defaultInstance;
112 }
113
114 public static final com.google.protobuf.Descriptors.Descriptor
115 getDescriptor() {
116 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
117 }
118
119 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
120 internalGetFieldAccessorTable() {
121 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
122 }
123
124 private int bitField0_;
125
126 public static final int NAME_FIELD_NUMBER = 1;
127 private com.google.protobuf.ByteString name_;
128 public boolean hasName() {
129 return ((bitField0_ & 0x00000001) == 0x00000001);
130 }
131 public com.google.protobuf.ByteString getName() {
132 return name_;
133 }
134
135
136 public static final int STORES_FIELD_NUMBER = 2;
137 private int stores_;
138 public boolean hasStores() {
139 return ((bitField0_ & 0x00000002) == 0x00000002);
140 }
141 public int getStores() {
142 return stores_;
143 }
144
145
146 public static final int STOREFILES_FIELD_NUMBER = 3;
147 private int storefiles_;
148 public boolean hasStorefiles() {
149 return ((bitField0_ & 0x00000004) == 0x00000004);
150 }
151 public int getStorefiles() {
152 return storefiles_;
153 }
154
155
156 public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
157 private int storefileSizeMB_;
158 public boolean hasStorefileSizeMB() {
159 return ((bitField0_ & 0x00000008) == 0x00000008);
160 }
161 public int getStorefileSizeMB() {
162 return storefileSizeMB_;
163 }
164
165
166 public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
167 private int memstoreSizeMB_;
168 public boolean hasMemstoreSizeMB() {
169 return ((bitField0_ & 0x00000010) == 0x00000010);
170 }
171 public int getMemstoreSizeMB() {
172 return memstoreSizeMB_;
173 }
174
175
176 public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
177 private int storefileIndexSizeMB_;
178 public boolean hasStorefileIndexSizeMB() {
179 return ((bitField0_ & 0x00000020) == 0x00000020);
180 }
181 public int getStorefileIndexSizeMB() {
182 return storefileIndexSizeMB_;
183 }
184
185 private void initFields() {
186 name_ = com.google.protobuf.ByteString.EMPTY;
187 stores_ = 0;
188 storefiles_ = 0;
189 storefileSizeMB_ = 0;
190 memstoreSizeMB_ = 0;
191 storefileIndexSizeMB_ = 0;
192 }
193 private byte memoizedIsInitialized = -1;
194 public final boolean isInitialized() {
195 byte isInitialized = memoizedIsInitialized;
196 if (isInitialized != -1) return isInitialized == 1;
197
198 if (!hasName()) {
199 memoizedIsInitialized = 0;
200 return false;
201 }
202 memoizedIsInitialized = 1;
203 return true;
204 }
205
206 public void writeTo(com.google.protobuf.CodedOutputStream output)
207 throws java.io.IOException {
208 getSerializedSize();
209 if (((bitField0_ & 0x00000001) == 0x00000001)) {
210 output.writeBytes(1, name_);
211 }
212 if (((bitField0_ & 0x00000002) == 0x00000002)) {
213 output.writeInt32(2, stores_);
214 }
215 if (((bitField0_ & 0x00000004) == 0x00000004)) {
216 output.writeInt32(3, storefiles_);
217 }
218 if (((bitField0_ & 0x00000008) == 0x00000008)) {
219 output.writeInt32(4, storefileSizeMB_);
220 }
221 if (((bitField0_ & 0x00000010) == 0x00000010)) {
222 output.writeInt32(5, memstoreSizeMB_);
223 }
224 if (((bitField0_ & 0x00000020) == 0x00000020)) {
225 output.writeInt32(6, storefileIndexSizeMB_);
226 }
227 getUnknownFields().writeTo(output);
228 }
229
230 private int memoizedSerializedSize = -1;
231 public int getSerializedSize() {
232 int size = memoizedSerializedSize;
233 if (size != -1) return size;
234
235 size = 0;
236 if (((bitField0_ & 0x00000001) == 0x00000001)) {
237 size += com.google.protobuf.CodedOutputStream
238 .computeBytesSize(1, name_);
239 }
240 if (((bitField0_ & 0x00000002) == 0x00000002)) {
241 size += com.google.protobuf.CodedOutputStream
242 .computeInt32Size(2, stores_);
243 }
244 if (((bitField0_ & 0x00000004) == 0x00000004)) {
245 size += com.google.protobuf.CodedOutputStream
246 .computeInt32Size(3, storefiles_);
247 }
248 if (((bitField0_ & 0x00000008) == 0x00000008)) {
249 size += com.google.protobuf.CodedOutputStream
250 .computeInt32Size(4, storefileSizeMB_);
251 }
252 if (((bitField0_ & 0x00000010) == 0x00000010)) {
253 size += com.google.protobuf.CodedOutputStream
254 .computeInt32Size(5, memstoreSizeMB_);
255 }
256 if (((bitField0_ & 0x00000020) == 0x00000020)) {
257 size += com.google.protobuf.CodedOutputStream
258 .computeInt32Size(6, storefileIndexSizeMB_);
259 }
260 size += getUnknownFields().getSerializedSize();
261 memoizedSerializedSize = size;
262 return size;
263 }
264
265 private static final long serialVersionUID = 0L;
266 @java.lang.Override
267 protected java.lang.Object writeReplace()
268 throws java.io.ObjectStreamException {
269 return super.writeReplace();
270 }
271
272 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
273 com.google.protobuf.ByteString data)
274 throws com.google.protobuf.InvalidProtocolBufferException {
275 return newBuilder().mergeFrom(data).buildParsed();
276 }
277 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
278 com.google.protobuf.ByteString data,
279 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
280 throws com.google.protobuf.InvalidProtocolBufferException {
281 return newBuilder().mergeFrom(data, extensionRegistry)
282 .buildParsed();
283 }
284 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
285 throws com.google.protobuf.InvalidProtocolBufferException {
286 return newBuilder().mergeFrom(data).buildParsed();
287 }
288 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
289 byte[] data,
290 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291 throws com.google.protobuf.InvalidProtocolBufferException {
292 return newBuilder().mergeFrom(data, extensionRegistry)
293 .buildParsed();
294 }
295 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
296 throws java.io.IOException {
297 return newBuilder().mergeFrom(input).buildParsed();
298 }
299 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
300 java.io.InputStream input,
301 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302 throws java.io.IOException {
303 return newBuilder().mergeFrom(input, extensionRegistry)
304 .buildParsed();
305 }
306 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
307 throws java.io.IOException {
308 Builder builder = newBuilder();
309 if (builder.mergeDelimitedFrom(input)) {
310 return builder.buildParsed();
311 } else {
312 return null;
313 }
314 }
315 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
316 java.io.InputStream input,
317 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
318 throws java.io.IOException {
319 Builder builder = newBuilder();
320 if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
321 return builder.buildParsed();
322 } else {
323 return null;
324 }
325 }
326 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
327 com.google.protobuf.CodedInputStream input)
328 throws java.io.IOException {
329 return newBuilder().mergeFrom(input).buildParsed();
330 }
331 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
332 com.google.protobuf.CodedInputStream input,
333 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
334 throws java.io.IOException {
335 return newBuilder().mergeFrom(input, extensionRegistry)
336 .buildParsed();
337 }
338
339 public static Builder newBuilder() { return Builder.create(); }
340 public Builder newBuilderForType() { return newBuilder(); }
341 public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
342 return newBuilder().mergeFrom(prototype);
343 }
344 public Builder toBuilder() { return newBuilder(this); }
345
346 @java.lang.Override
347 protected Builder newBuilderForType(
348 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
349 Builder builder = new Builder(parent);
350 return builder;
351 }
352 public static final class Builder extends
353 com.google.protobuf.GeneratedMessage.Builder<Builder>
354 implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder {
355 public static final com.google.protobuf.Descriptors.Descriptor
356 getDescriptor() {
357 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
358 }
359
360 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
361 internalGetFieldAccessorTable() {
362 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
363 }
364
365
366 private Builder() {
367 maybeForceBuilderInitialization();
368 }
369
370 private Builder(BuilderParent parent) {
371 super(parent);
372 maybeForceBuilderInitialization();
373 }
374 private void maybeForceBuilderInitialization() {
375 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
376 }
377 }
378 private static Builder create() {
379 return new Builder();
380 }
381
382 public Builder clear() {
383 super.clear();
384 name_ = com.google.protobuf.ByteString.EMPTY;
385 bitField0_ = (bitField0_ & ~0x00000001);
386 stores_ = 0;
387 bitField0_ = (bitField0_ & ~0x00000002);
388 storefiles_ = 0;
389 bitField0_ = (bitField0_ & ~0x00000004);
390 storefileSizeMB_ = 0;
391 bitField0_ = (bitField0_ & ~0x00000008);
392 memstoreSizeMB_ = 0;
393 bitField0_ = (bitField0_ & ~0x00000010);
394 storefileIndexSizeMB_ = 0;
395 bitField0_ = (bitField0_ & ~0x00000020);
396 return this;
397 }
398
399 public Builder clone() {
400 return create().mergeFrom(buildPartial());
401 }
402
403 public com.google.protobuf.Descriptors.Descriptor
404 getDescriptorForType() {
405 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDescriptor();
406 }
407
408 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
409 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
410 }
411
412 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
413 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
414 if (!result.isInitialized()) {
415 throw newUninitializedMessageException(result);
416 }
417 return result;
418 }
419
420 private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildParsed()
421 throws com.google.protobuf.InvalidProtocolBufferException {
422 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
423 if (!result.isInitialized()) {
424 throw newUninitializedMessageException(
425 result).asInvalidProtocolBufferException();
426 }
427 return result;
428 }
429
430 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
431 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region(this);
432 int from_bitField0_ = bitField0_;
433 int to_bitField0_ = 0;
434 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
435 to_bitField0_ |= 0x00000001;
436 }
437 result.name_ = name_;
438 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
439 to_bitField0_ |= 0x00000002;
440 }
441 result.stores_ = stores_;
442 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
443 to_bitField0_ |= 0x00000004;
444 }
445 result.storefiles_ = storefiles_;
446 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
447 to_bitField0_ |= 0x00000008;
448 }
449 result.storefileSizeMB_ = storefileSizeMB_;
450 if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
451 to_bitField0_ |= 0x00000010;
452 }
453 result.memstoreSizeMB_ = memstoreSizeMB_;
454 if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
455 to_bitField0_ |= 0x00000020;
456 }
457 result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
458 result.bitField0_ = to_bitField0_;
459 onBuilt();
460 return result;
461 }
462
463 public Builder mergeFrom(com.google.protobuf.Message other) {
464 if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
465 return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
466 } else {
467 super.mergeFrom(other);
468 return this;
469 }
470 }
471
472 public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
473 if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
474 if (other.hasName()) {
475 setName(other.getName());
476 }
477 if (other.hasStores()) {
478 setStores(other.getStores());
479 }
480 if (other.hasStorefiles()) {
481 setStorefiles(other.getStorefiles());
482 }
483 if (other.hasStorefileSizeMB()) {
484 setStorefileSizeMB(other.getStorefileSizeMB());
485 }
486 if (other.hasMemstoreSizeMB()) {
487 setMemstoreSizeMB(other.getMemstoreSizeMB());
488 }
489 if (other.hasStorefileIndexSizeMB()) {
490 setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
491 }
492 this.mergeUnknownFields(other.getUnknownFields());
493 return this;
494 }
495
496 public final boolean isInitialized() {
497 if (!hasName()) {
498
499 return false;
500 }
501 return true;
502 }
503
504 public Builder mergeFrom(
505 com.google.protobuf.CodedInputStream input,
506 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
507 throws java.io.IOException {
508 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
509 com.google.protobuf.UnknownFieldSet.newBuilder(
510 this.getUnknownFields());
511 while (true) {
512 int tag = input.readTag();
513 switch (tag) {
514 case 0:
515 this.setUnknownFields(unknownFields.build());
516 onChanged();
517 return this;
518 default: {
519 if (!parseUnknownField(input, unknownFields,
520 extensionRegistry, tag)) {
521 this.setUnknownFields(unknownFields.build());
522 onChanged();
523 return this;
524 }
525 break;
526 }
527 case 10: {
528 bitField0_ |= 0x00000001;
529 name_ = input.readBytes();
530 break;
531 }
532 case 16: {
533 bitField0_ |= 0x00000002;
534 stores_ = input.readInt32();
535 break;
536 }
537 case 24: {
538 bitField0_ |= 0x00000004;
539 storefiles_ = input.readInt32();
540 break;
541 }
542 case 32: {
543 bitField0_ |= 0x00000008;
544 storefileSizeMB_ = input.readInt32();
545 break;
546 }
547 case 40: {
548 bitField0_ |= 0x00000010;
549 memstoreSizeMB_ = input.readInt32();
550 break;
551 }
552 case 48: {
553 bitField0_ |= 0x00000020;
554 storefileIndexSizeMB_ = input.readInt32();
555 break;
556 }
557 }
558 }
559 }
560
561 private int bitField0_;
562
563
564 private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
565 public boolean hasName() {
566 return ((bitField0_ & 0x00000001) == 0x00000001);
567 }
568 public com.google.protobuf.ByteString getName() {
569 return name_;
570 }
571 public Builder setName(com.google.protobuf.ByteString value) {
572 if (value == null) {
573 throw new NullPointerException();
574 }
575 bitField0_ |= 0x00000001;
576 name_ = value;
577 onChanged();
578 return this;
579 }
580 public Builder clearName() {
581 bitField0_ = (bitField0_ & ~0x00000001);
582 name_ = getDefaultInstance().getName();
583 onChanged();
584 return this;
585 }
586
587
588 private int stores_ ;
589 public boolean hasStores() {
590 return ((bitField0_ & 0x00000002) == 0x00000002);
591 }
592 public int getStores() {
593 return stores_;
594 }
595 public Builder setStores(int value) {
596 bitField0_ |= 0x00000002;
597 stores_ = value;
598 onChanged();
599 return this;
600 }
601 public Builder clearStores() {
602 bitField0_ = (bitField0_ & ~0x00000002);
603 stores_ = 0;
604 onChanged();
605 return this;
606 }
607
608
609 private int storefiles_ ;
610 public boolean hasStorefiles() {
611 return ((bitField0_ & 0x00000004) == 0x00000004);
612 }
613 public int getStorefiles() {
614 return storefiles_;
615 }
616 public Builder setStorefiles(int value) {
617 bitField0_ |= 0x00000004;
618 storefiles_ = value;
619 onChanged();
620 return this;
621 }
622 public Builder clearStorefiles() {
623 bitField0_ = (bitField0_ & ~0x00000004);
624 storefiles_ = 0;
625 onChanged();
626 return this;
627 }
628
629
630 private int storefileSizeMB_ ;
631 public boolean hasStorefileSizeMB() {
632 return ((bitField0_ & 0x00000008) == 0x00000008);
633 }
634 public int getStorefileSizeMB() {
635 return storefileSizeMB_;
636 }
637 public Builder setStorefileSizeMB(int value) {
638 bitField0_ |= 0x00000008;
639 storefileSizeMB_ = value;
640 onChanged();
641 return this;
642 }
643 public Builder clearStorefileSizeMB() {
644 bitField0_ = (bitField0_ & ~0x00000008);
645 storefileSizeMB_ = 0;
646 onChanged();
647 return this;
648 }
649
650
651 private int memstoreSizeMB_ ;
652 public boolean hasMemstoreSizeMB() {
653 return ((bitField0_ & 0x00000010) == 0x00000010);
654 }
655 public int getMemstoreSizeMB() {
656 return memstoreSizeMB_;
657 }
658 public Builder setMemstoreSizeMB(int value) {
659 bitField0_ |= 0x00000010;
660 memstoreSizeMB_ = value;
661 onChanged();
662 return this;
663 }
664 public Builder clearMemstoreSizeMB() {
665 bitField0_ = (bitField0_ & ~0x00000010);
666 memstoreSizeMB_ = 0;
667 onChanged();
668 return this;
669 }
670
671
672 private int storefileIndexSizeMB_ ;
673 public boolean hasStorefileIndexSizeMB() {
674 return ((bitField0_ & 0x00000020) == 0x00000020);
675 }
676 public int getStorefileIndexSizeMB() {
677 return storefileIndexSizeMB_;
678 }
679 public Builder setStorefileIndexSizeMB(int value) {
680 bitField0_ |= 0x00000020;
681 storefileIndexSizeMB_ = value;
682 onChanged();
683 return this;
684 }
685 public Builder clearStorefileIndexSizeMB() {
686 bitField0_ = (bitField0_ & ~0x00000020);
687 storefileIndexSizeMB_ = 0;
688 onChanged();
689 return this;
690 }
691
692
693 }
694
695 static {
696 defaultInstance = new Region(true);
697 defaultInstance.initFields();
698 }
699
700
701 }
702
703 public interface NodeOrBuilder
704 extends com.google.protobuf.MessageOrBuilder {
705
706
707 boolean hasName();
708 String getName();
709
710
711 boolean hasStartCode();
712 long getStartCode();
713
714
715 boolean hasRequests();
716 int getRequests();
717
718
719 boolean hasHeapSizeMB();
720 int getHeapSizeMB();
721
722
723 boolean hasMaxHeapSizeMB();
724 int getMaxHeapSizeMB();
725
726
727 java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>
728 getRegionsList();
729 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index);
730 int getRegionsCount();
731 java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
732 getRegionsOrBuilderList();
733 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
734 int index);
735 }
736 public static final class Node extends
737 com.google.protobuf.GeneratedMessage
738 implements NodeOrBuilder {
739
740 private Node(Builder builder) {
741 super(builder);
742 }
743 private Node(boolean noInit) {}
744
745 private static final Node defaultInstance;
746 public static Node getDefaultInstance() {
747 return defaultInstance;
748 }
749
750 public Node getDefaultInstanceForType() {
751 return defaultInstance;
752 }
753
754 public static final com.google.protobuf.Descriptors.Descriptor
755 getDescriptor() {
756 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
757 }
758
759 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
760 internalGetFieldAccessorTable() {
761 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
762 }
763
764 private int bitField0_;
765
766 public static final int NAME_FIELD_NUMBER = 1;
767 private java.lang.Object name_;
768 public boolean hasName() {
769 return ((bitField0_ & 0x00000001) == 0x00000001);
770 }
771 public String getName() {
772 java.lang.Object ref = name_;
773 if (ref instanceof String) {
774 return (String) ref;
775 } else {
776 com.google.protobuf.ByteString bs =
777 (com.google.protobuf.ByteString) ref;
778 String s = bs.toStringUtf8();
779 if (com.google.protobuf.Internal.isValidUtf8(bs)) {
780 name_ = s;
781 }
782 return s;
783 }
784 }
785 private com.google.protobuf.ByteString getNameBytes() {
786 java.lang.Object ref = name_;
787 if (ref instanceof String) {
788 com.google.protobuf.ByteString b =
789 com.google.protobuf.ByteString.copyFromUtf8((String) ref);
790 name_ = b;
791 return b;
792 } else {
793 return (com.google.protobuf.ByteString) ref;
794 }
795 }
796
797
798 public static final int STARTCODE_FIELD_NUMBER = 2;
799 private long startCode_;
800 public boolean hasStartCode() {
801 return ((bitField0_ & 0x00000002) == 0x00000002);
802 }
803 public long getStartCode() {
804 return startCode_;
805 }
806
807
808 public static final int REQUESTS_FIELD_NUMBER = 3;
809 private int requests_;
810 public boolean hasRequests() {
811 return ((bitField0_ & 0x00000004) == 0x00000004);
812 }
813 public int getRequests() {
814 return requests_;
815 }
816
817
818 public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
819 private int heapSizeMB_;
820 public boolean hasHeapSizeMB() {
821 return ((bitField0_ & 0x00000008) == 0x00000008);
822 }
823 public int getHeapSizeMB() {
824 return heapSizeMB_;
825 }
826
827
828 public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
829 private int maxHeapSizeMB_;
830 public boolean hasMaxHeapSizeMB() {
831 return ((bitField0_ & 0x00000010) == 0x00000010);
832 }
833 public int getMaxHeapSizeMB() {
834 return maxHeapSizeMB_;
835 }
836
837
838 public static final int REGIONS_FIELD_NUMBER = 6;
839 private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_;
840 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
841 return regions_;
842 }
843 public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
844 getRegionsOrBuilderList() {
845 return regions_;
846 }
847 public int getRegionsCount() {
848 return regions_.size();
849 }
850 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
851 return regions_.get(index);
852 }
853 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
854 int index) {
855 return regions_.get(index);
856 }
857
858 private void initFields() {
859 name_ = "";
860 startCode_ = 0L;
861 requests_ = 0;
862 heapSizeMB_ = 0;
863 maxHeapSizeMB_ = 0;
864 regions_ = java.util.Collections.emptyList();
865 }
866 private byte memoizedIsInitialized = -1;
867 public final boolean isInitialized() {
868 byte isInitialized = memoizedIsInitialized;
869 if (isInitialized != -1) return isInitialized == 1;
870
871 if (!hasName()) {
872 memoizedIsInitialized = 0;
873 return false;
874 }
875 for (int i = 0; i < getRegionsCount(); i++) {
876 if (!getRegions(i).isInitialized()) {
877 memoizedIsInitialized = 0;
878 return false;
879 }
880 }
881 memoizedIsInitialized = 1;
882 return true;
883 }
884
885 public void writeTo(com.google.protobuf.CodedOutputStream output)
886 throws java.io.IOException {
887 getSerializedSize();
888 if (((bitField0_ & 0x00000001) == 0x00000001)) {
889 output.writeBytes(1, getNameBytes());
890 }
891 if (((bitField0_ & 0x00000002) == 0x00000002)) {
892 output.writeInt64(2, startCode_);
893 }
894 if (((bitField0_ & 0x00000004) == 0x00000004)) {
895 output.writeInt32(3, requests_);
896 }
897 if (((bitField0_ & 0x00000008) == 0x00000008)) {
898 output.writeInt32(4, heapSizeMB_);
899 }
900 if (((bitField0_ & 0x00000010) == 0x00000010)) {
901 output.writeInt32(5, maxHeapSizeMB_);
902 }
903 for (int i = 0; i < regions_.size(); i++) {
904 output.writeMessage(6, regions_.get(i));
905 }
906 getUnknownFields().writeTo(output);
907 }
908
909 private int memoizedSerializedSize = -1;
910 public int getSerializedSize() {
911 int size = memoizedSerializedSize;
912 if (size != -1) return size;
913
914 size = 0;
915 if (((bitField0_ & 0x00000001) == 0x00000001)) {
916 size += com.google.protobuf.CodedOutputStream
917 .computeBytesSize(1, getNameBytes());
918 }
919 if (((bitField0_ & 0x00000002) == 0x00000002)) {
920 size += com.google.protobuf.CodedOutputStream
921 .computeInt64Size(2, startCode_);
922 }
923 if (((bitField0_ & 0x00000004) == 0x00000004)) {
924 size += com.google.protobuf.CodedOutputStream
925 .computeInt32Size(3, requests_);
926 }
927 if (((bitField0_ & 0x00000008) == 0x00000008)) {
928 size += com.google.protobuf.CodedOutputStream
929 .computeInt32Size(4, heapSizeMB_);
930 }
931 if (((bitField0_ & 0x00000010) == 0x00000010)) {
932 size += com.google.protobuf.CodedOutputStream
933 .computeInt32Size(5, maxHeapSizeMB_);
934 }
935 for (int i = 0; i < regions_.size(); i++) {
936 size += com.google.protobuf.CodedOutputStream
937 .computeMessageSize(6, regions_.get(i));
938 }
939 size += getUnknownFields().getSerializedSize();
940 memoizedSerializedSize = size;
941 return size;
942 }
943
944 private static final long serialVersionUID = 0L;
945 @java.lang.Override
946 protected java.lang.Object writeReplace()
947 throws java.io.ObjectStreamException {
948 return super.writeReplace();
949 }
950
951 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
952 com.google.protobuf.ByteString data)
953 throws com.google.protobuf.InvalidProtocolBufferException {
954 return newBuilder().mergeFrom(data).buildParsed();
955 }
956 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
957 com.google.protobuf.ByteString data,
958 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
959 throws com.google.protobuf.InvalidProtocolBufferException {
960 return newBuilder().mergeFrom(data, extensionRegistry)
961 .buildParsed();
962 }
963 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
964 throws com.google.protobuf.InvalidProtocolBufferException {
965 return newBuilder().mergeFrom(data).buildParsed();
966 }
967 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
968 byte[] data,
969 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
970 throws com.google.protobuf.InvalidProtocolBufferException {
971 return newBuilder().mergeFrom(data, extensionRegistry)
972 .buildParsed();
973 }
974 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
975 throws java.io.IOException {
976 return newBuilder().mergeFrom(input).buildParsed();
977 }
978 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
979 java.io.InputStream input,
980 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
981 throws java.io.IOException {
982 return newBuilder().mergeFrom(input, extensionRegistry)
983 .buildParsed();
984 }
985 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
986 throws java.io.IOException {
987 Builder builder = newBuilder();
988 if (builder.mergeDelimitedFrom(input)) {
989 return builder.buildParsed();
990 } else {
991 return null;
992 }
993 }
994 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
995 java.io.InputStream input,
996 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
997 throws java.io.IOException {
998 Builder builder = newBuilder();
999 if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
1000 return builder.buildParsed();
1001 } else {
1002 return null;
1003 }
1004 }
1005 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
1006 com.google.protobuf.CodedInputStream input)
1007 throws java.io.IOException {
1008 return newBuilder().mergeFrom(input).buildParsed();
1009 }
1010 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
1011 com.google.protobuf.CodedInputStream input,
1012 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1013 throws java.io.IOException {
1014 return newBuilder().mergeFrom(input, extensionRegistry)
1015 .buildParsed();
1016 }
1017
1018 public static Builder newBuilder() { return Builder.create(); }
1019 public Builder newBuilderForType() { return newBuilder(); }
1020 public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
1021 return newBuilder().mergeFrom(prototype);
1022 }
1023 public Builder toBuilder() { return newBuilder(this); }
1024
1025 @java.lang.Override
1026 protected Builder newBuilderForType(
1027 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1028 Builder builder = new Builder(parent);
1029 return builder;
1030 }
1031 public static final class Builder extends
1032 com.google.protobuf.GeneratedMessage.Builder<Builder>
1033 implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder {
1034 public static final com.google.protobuf.Descriptors.Descriptor
1035 getDescriptor() {
1036 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
1037 }
1038
1039 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1040 internalGetFieldAccessorTable() {
1041 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
1042 }
1043
1044
1045 private Builder() {
1046 maybeForceBuilderInitialization();
1047 }
1048
1049 private Builder(BuilderParent parent) {
1050 super(parent);
1051 maybeForceBuilderInitialization();
1052 }
1053 private void maybeForceBuilderInitialization() {
1054 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1055 getRegionsFieldBuilder();
1056 }
1057 }
1058 private static Builder create() {
1059 return new Builder();
1060 }
1061
1062 public Builder clear() {
1063 super.clear();
1064 name_ = "";
1065 bitField0_ = (bitField0_ & ~0x00000001);
1066 startCode_ = 0L;
1067 bitField0_ = (bitField0_ & ~0x00000002);
1068 requests_ = 0;
1069 bitField0_ = (bitField0_ & ~0x00000004);
1070 heapSizeMB_ = 0;
1071 bitField0_ = (bitField0_ & ~0x00000008);
1072 maxHeapSizeMB_ = 0;
1073 bitField0_ = (bitField0_ & ~0x00000010);
1074 if (regionsBuilder_ == null) {
1075 regions_ = java.util.Collections.emptyList();
1076 bitField0_ = (bitField0_ & ~0x00000020);
1077 } else {
1078 regionsBuilder_.clear();
1079 }
1080 return this;
1081 }
1082
1083 public Builder clone() {
1084 return create().mergeFrom(buildPartial());
1085 }
1086
1087 public com.google.protobuf.Descriptors.Descriptor
1088 getDescriptorForType() {
1089 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDescriptor();
1090 }
1091
1092 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
1093 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
1094 }
1095
1096 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
1097 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
1098 if (!result.isInitialized()) {
1099 throw newUninitializedMessageException(result);
1100 }
1101 return result;
1102 }
1103
1104 private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildParsed()
1105 throws com.google.protobuf.InvalidProtocolBufferException {
1106 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
1107 if (!result.isInitialized()) {
1108 throw newUninitializedMessageException(
1109 result).asInvalidProtocolBufferException();
1110 }
1111 return result;
1112 }
1113
1114 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
1115 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node(this);
1116 int from_bitField0_ = bitField0_;
1117 int to_bitField0_ = 0;
1118 if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1119 to_bitField0_ |= 0x00000001;
1120 }
1121 result.name_ = name_;
1122 if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1123 to_bitField0_ |= 0x00000002;
1124 }
1125 result.startCode_ = startCode_;
1126 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1127 to_bitField0_ |= 0x00000004;
1128 }
1129 result.requests_ = requests_;
1130 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1131 to_bitField0_ |= 0x00000008;
1132 }
1133 result.heapSizeMB_ = heapSizeMB_;
1134 if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
1135 to_bitField0_ |= 0x00000010;
1136 }
1137 result.maxHeapSizeMB_ = maxHeapSizeMB_;
1138 if (regionsBuilder_ == null) {
1139 if (((bitField0_ & 0x00000020) == 0x00000020)) {
1140 regions_ = java.util.Collections.unmodifiableList(regions_);
1141 bitField0_ = (bitField0_ & ~0x00000020);
1142 }
1143 result.regions_ = regions_;
1144 } else {
1145 result.regions_ = regionsBuilder_.build();
1146 }
1147 result.bitField0_ = to_bitField0_;
1148 onBuilt();
1149 return result;
1150 }
1151
1152 public Builder mergeFrom(com.google.protobuf.Message other) {
1153 if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
1154 return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
1155 } else {
1156 super.mergeFrom(other);
1157 return this;
1158 }
1159 }
1160
1161 public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
1162 if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
1163 if (other.hasName()) {
1164 setName(other.getName());
1165 }
1166 if (other.hasStartCode()) {
1167 setStartCode(other.getStartCode());
1168 }
1169 if (other.hasRequests()) {
1170 setRequests(other.getRequests());
1171 }
1172 if (other.hasHeapSizeMB()) {
1173 setHeapSizeMB(other.getHeapSizeMB());
1174 }
1175 if (other.hasMaxHeapSizeMB()) {
1176 setMaxHeapSizeMB(other.getMaxHeapSizeMB());
1177 }
1178 if (regionsBuilder_ == null) {
1179 if (!other.regions_.isEmpty()) {
1180 if (regions_.isEmpty()) {
1181 regions_ = other.regions_;
1182 bitField0_ = (bitField0_ & ~0x00000020);
1183 } else {
1184 ensureRegionsIsMutable();
1185 regions_.addAll(other.regions_);
1186 }
1187 onChanged();
1188 }
1189 } else {
1190 if (!other.regions_.isEmpty()) {
1191 if (regionsBuilder_.isEmpty()) {
1192 regionsBuilder_.dispose();
1193 regionsBuilder_ = null;
1194 regions_ = other.regions_;
1195 bitField0_ = (bitField0_ & ~0x00000020);
1196 regionsBuilder_ =
1197 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
1198 getRegionsFieldBuilder() : null;
1199 } else {
1200 regionsBuilder_.addAllMessages(other.regions_);
1201 }
1202 }
1203 }
1204 this.mergeUnknownFields(other.getUnknownFields());
1205 return this;
1206 }
1207
1208 public final boolean isInitialized() {
1209 if (!hasName()) {
1210
1211 return false;
1212 }
1213 for (int i = 0; i < getRegionsCount(); i++) {
1214 if (!getRegions(i).isInitialized()) {
1215
1216 return false;
1217 }
1218 }
1219 return true;
1220 }
1221
1222 public Builder mergeFrom(
1223 com.google.protobuf.CodedInputStream input,
1224 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1225 throws java.io.IOException {
1226 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1227 com.google.protobuf.UnknownFieldSet.newBuilder(
1228 this.getUnknownFields());
1229 while (true) {
1230 int tag = input.readTag();
1231 switch (tag) {
1232 case 0:
1233 this.setUnknownFields(unknownFields.build());
1234 onChanged();
1235 return this;
1236 default: {
1237 if (!parseUnknownField(input, unknownFields,
1238 extensionRegistry, tag)) {
1239 this.setUnknownFields(unknownFields.build());
1240 onChanged();
1241 return this;
1242 }
1243 break;
1244 }
1245 case 10: {
1246 bitField0_ |= 0x00000001;
1247 name_ = input.readBytes();
1248 break;
1249 }
1250 case 16: {
1251 bitField0_ |= 0x00000002;
1252 startCode_ = input.readInt64();
1253 break;
1254 }
1255 case 24: {
1256 bitField0_ |= 0x00000004;
1257 requests_ = input.readInt32();
1258 break;
1259 }
1260 case 32: {
1261 bitField0_ |= 0x00000008;
1262 heapSizeMB_ = input.readInt32();
1263 break;
1264 }
1265 case 40: {
1266 bitField0_ |= 0x00000010;
1267 maxHeapSizeMB_ = input.readInt32();
1268 break;
1269 }
1270 case 50: {
1271 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder subBuilder = org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder();
1272 input.readMessage(subBuilder, extensionRegistry);
1273 addRegions(subBuilder.buildPartial());
1274 break;
1275 }
1276 }
1277 }
1278 }
1279
1280 private int bitField0_;
1281
1282
1283 private java.lang.Object name_ = "";
1284 public boolean hasName() {
1285 return ((bitField0_ & 0x00000001) == 0x00000001);
1286 }
1287 public String getName() {
1288 java.lang.Object ref = name_;
1289 if (!(ref instanceof String)) {
1290 String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
1291 name_ = s;
1292 return s;
1293 } else {
1294 return (String) ref;
1295 }
1296 }
1297 public Builder setName(String value) {
1298 if (value == null) {
1299 throw new NullPointerException();
1300 }
1301 bitField0_ |= 0x00000001;
1302 name_ = value;
1303 onChanged();
1304 return this;
1305 }
1306 public Builder clearName() {
1307 bitField0_ = (bitField0_ & ~0x00000001);
1308 name_ = getDefaultInstance().getName();
1309 onChanged();
1310 return this;
1311 }
1312 void setName(com.google.protobuf.ByteString value) {
1313 bitField0_ |= 0x00000001;
1314 name_ = value;
1315 onChanged();
1316 }
1317
1318
1319 private long startCode_ ;
1320 public boolean hasStartCode() {
1321 return ((bitField0_ & 0x00000002) == 0x00000002);
1322 }
1323 public long getStartCode() {
1324 return startCode_;
1325 }
1326 public Builder setStartCode(long value) {
1327 bitField0_ |= 0x00000002;
1328 startCode_ = value;
1329 onChanged();
1330 return this;
1331 }
1332 public Builder clearStartCode() {
1333 bitField0_ = (bitField0_ & ~0x00000002);
1334 startCode_ = 0L;
1335 onChanged();
1336 return this;
1337 }
1338
1339
1340 private int requests_ ;
1341 public boolean hasRequests() {
1342 return ((bitField0_ & 0x00000004) == 0x00000004);
1343 }
1344 public int getRequests() {
1345 return requests_;
1346 }
1347 public Builder setRequests(int value) {
1348 bitField0_ |= 0x00000004;
1349 requests_ = value;
1350 onChanged();
1351 return this;
1352 }
1353 public Builder clearRequests() {
1354 bitField0_ = (bitField0_ & ~0x00000004);
1355 requests_ = 0;
1356 onChanged();
1357 return this;
1358 }
1359
1360
1361 private int heapSizeMB_ ;
1362 public boolean hasHeapSizeMB() {
1363 return ((bitField0_ & 0x00000008) == 0x00000008);
1364 }
1365 public int getHeapSizeMB() {
1366 return heapSizeMB_;
1367 }
1368 public Builder setHeapSizeMB(int value) {
1369 bitField0_ |= 0x00000008;
1370 heapSizeMB_ = value;
1371 onChanged();
1372 return this;
1373 }
1374 public Builder clearHeapSizeMB() {
1375 bitField0_ = (bitField0_ & ~0x00000008);
1376 heapSizeMB_ = 0;
1377 onChanged();
1378 return this;
1379 }
1380
1381
1382 private int maxHeapSizeMB_ ;
1383 public boolean hasMaxHeapSizeMB() {
1384 return ((bitField0_ & 0x00000010) == 0x00000010);
1385 }
1386 public int getMaxHeapSizeMB() {
1387 return maxHeapSizeMB_;
1388 }
1389 public Builder setMaxHeapSizeMB(int value) {
1390 bitField0_ |= 0x00000010;
1391 maxHeapSizeMB_ = value;
1392 onChanged();
1393 return this;
1394 }
1395 public Builder clearMaxHeapSizeMB() {
1396 bitField0_ = (bitField0_ & ~0x00000010);
1397 maxHeapSizeMB_ = 0;
1398 onChanged();
1399 return this;
1400 }
1401
1402
1403 private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_ =
1404 java.util.Collections.emptyList();
1405 private void ensureRegionsIsMutable() {
1406 if (!((bitField0_ & 0x00000020) == 0x00000020)) {
1407 regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>(regions_);
1408 bitField0_ |= 0x00000020;
1409 }
1410 }
1411
1412 private com.google.protobuf.RepeatedFieldBuilder<
1413 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder> regionsBuilder_;
1414
1415 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
1416 if (regionsBuilder_ == null) {
1417 return java.util.Collections.unmodifiableList(regions_);
1418 } else {
1419 return regionsBuilder_.getMessageList();
1420 }
1421 }
1422 public int getRegionsCount() {
1423 if (regionsBuilder_ == null) {
1424 return regions_.size();
1425 } else {
1426 return regionsBuilder_.getCount();
1427 }
1428 }
1429 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
1430 if (regionsBuilder_ == null) {
1431 return regions_.get(index);
1432 } else {
1433 return regionsBuilder_.getMessage(index);
1434 }
1435 }
1436 public Builder setRegions(
1437 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
1438 if (regionsBuilder_ == null) {
1439 if (value == null) {
1440 throw new NullPointerException();
1441 }
1442 ensureRegionsIsMutable();
1443 regions_.set(index, value);
1444 onChanged();
1445 } else {
1446 regionsBuilder_.setMessage(index, value);
1447 }
1448 return this;
1449 }
1450 public Builder setRegions(
1451 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
1452 if (regionsBuilder_ == null) {
1453 ensureRegionsIsMutable();
1454 regions_.set(index, builderForValue.build());
1455 onChanged();
1456 } else {
1457 regionsBuilder_.setMessage(index, builderForValue.build());
1458 }
1459 return this;
1460 }
1461 public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
1462 if (regionsBuilder_ == null) {
1463 if (value == null) {
1464 throw new NullPointerException();
1465 }
1466 ensureRegionsIsMutable();
1467 regions_.add(value);
1468 onChanged();
1469 } else {
1470 regionsBuilder_.addMessage(value);
1471 }
1472 return this;
1473 }
1474 public Builder addRegions(
1475 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
1476 if (regionsBuilder_ == null) {
1477 if (value == null) {
1478 throw new NullPointerException();
1479 }
1480 ensureRegionsIsMutable();
1481 regions_.add(index, value);
1482 onChanged();
1483 } else {
1484 regionsBuilder_.addMessage(index, value);
1485 }
1486 return this;
1487 }
1488 public Builder addRegions(
1489 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
1490 if (regionsBuilder_ == null) {
1491 ensureRegionsIsMutable();
1492 regions_.add(builderForValue.build());
1493 onChanged();
1494 } else {
1495 regionsBuilder_.addMessage(builderForValue.build());
1496 }
1497 return this;
1498 }
1499 public Builder addRegions(
1500 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
1501 if (regionsBuilder_ == null) {
1502 ensureRegionsIsMutable();
1503 regions_.add(index, builderForValue.build());
1504 onChanged();
1505 } else {
1506 regionsBuilder_.addMessage(index, builderForValue.build());
1507 }
1508 return this;
1509 }
1510 public Builder addAllRegions(
1511 java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> values) {
1512 if (regionsBuilder_ == null) {
1513 ensureRegionsIsMutable();
1514 super.addAll(values, regions_);
1515 onChanged();
1516 } else {
1517 regionsBuilder_.addAllMessages(values);
1518 }
1519 return this;
1520 }
1521 public Builder clearRegions() {
1522 if (regionsBuilder_ == null) {
1523 regions_ = java.util.Collections.emptyList();
1524 bitField0_ = (bitField0_ & ~0x00000020);
1525 onChanged();
1526 } else {
1527 regionsBuilder_.clear();
1528 }
1529 return this;
1530 }
1531 public Builder removeRegions(int index) {
1532 if (regionsBuilder_ == null) {
1533 ensureRegionsIsMutable();
1534 regions_.remove(index);
1535 onChanged();
1536 } else {
1537 regionsBuilder_.remove(index);
1538 }
1539 return this;
1540 }
1541 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder getRegionsBuilder(
1542 int index) {
1543 return getRegionsFieldBuilder().getBuilder(index);
1544 }
1545 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
1546 int index) {
1547 if (regionsBuilder_ == null) {
1548 return regions_.get(index); } else {
1549 return regionsBuilder_.getMessageOrBuilder(index);
1550 }
1551 }
1552 public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
1553 getRegionsOrBuilderList() {
1554 if (regionsBuilder_ != null) {
1555 return regionsBuilder_.getMessageOrBuilderList();
1556 } else {
1557 return java.util.Collections.unmodifiableList(regions_);
1558 }
1559 }
1560 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder addRegionsBuilder() {
1561 return getRegionsFieldBuilder().addBuilder(
1562 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance());
1563 }
1564 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder addRegionsBuilder(
1565 int index) {
1566 return getRegionsFieldBuilder().addBuilder(
1567 index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance());
1568 }
1569 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder>
1570 getRegionsBuilderList() {
1571 return getRegionsFieldBuilder().getBuilderList();
1572 }
1573 private com.google.protobuf.RepeatedFieldBuilder<
1574 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>
1575 getRegionsFieldBuilder() {
1576 if (regionsBuilder_ == null) {
1577 regionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
1578 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder>(
1579 regions_,
1580 ((bitField0_ & 0x00000020) == 0x00000020),
1581 getParentForChildren(),
1582 isClean());
1583 regions_ = null;
1584 }
1585 return regionsBuilder_;
1586 }
1587
1588
1589 }
1590
1591 static {
1592 defaultInstance = new Node(true);
1593 defaultInstance.initFields();
1594 }
1595
1596
1597 }
1598
1599 private int bitField0_;
1600
1601 public static final int LIVENODES_FIELD_NUMBER = 1;
1602 private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> liveNodes_;
1603 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
1604 return liveNodes_;
1605 }
1606 public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
1607 getLiveNodesOrBuilderList() {
1608 return liveNodes_;
1609 }
1610 public int getLiveNodesCount() {
1611 return liveNodes_.size();
1612 }
1613 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
1614 return liveNodes_.get(index);
1615 }
1616 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
1617 int index) {
1618 return liveNodes_.get(index);
1619 }
1620
1621
1622 public static final int DEADNODES_FIELD_NUMBER = 2;
1623 private com.google.protobuf.LazyStringList deadNodes_;
1624 public java.util.List<String>
1625 getDeadNodesList() {
1626 return deadNodes_;
1627 }
1628 public int getDeadNodesCount() {
1629 return deadNodes_.size();
1630 }
1631 public String getDeadNodes(int index) {
1632 return deadNodes_.get(index);
1633 }
1634
1635
1636 public static final int REGIONS_FIELD_NUMBER = 3;
1637 private int regions_;
1638 public boolean hasRegions() {
1639 return ((bitField0_ & 0x00000001) == 0x00000001);
1640 }
1641 public int getRegions() {
1642 return regions_;
1643 }
1644
1645
1646 public static final int REQUESTS_FIELD_NUMBER = 4;
1647 private int requests_;
1648 public boolean hasRequests() {
1649 return ((bitField0_ & 0x00000002) == 0x00000002);
1650 }
1651 public int getRequests() {
1652 return requests_;
1653 }
1654
1655
1656 public static final int AVERAGELOAD_FIELD_NUMBER = 5;
1657 private double averageLoad_;
1658 public boolean hasAverageLoad() {
1659 return ((bitField0_ & 0x00000004) == 0x00000004);
1660 }
1661 public double getAverageLoad() {
1662 return averageLoad_;
1663 }
1664
1665 private void initFields() {
1666 liveNodes_ = java.util.Collections.emptyList();
1667 deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
1668 regions_ = 0;
1669 requests_ = 0;
1670 averageLoad_ = 0D;
1671 }
1672 private byte memoizedIsInitialized = -1;
1673 public final boolean isInitialized() {
1674 byte isInitialized = memoizedIsInitialized;
1675 if (isInitialized != -1) return isInitialized == 1;
1676
1677 for (int i = 0; i < getLiveNodesCount(); i++) {
1678 if (!getLiveNodes(i).isInitialized()) {
1679 memoizedIsInitialized = 0;
1680 return false;
1681 }
1682 }
1683 memoizedIsInitialized = 1;
1684 return true;
1685 }
1686
1687 public void writeTo(com.google.protobuf.CodedOutputStream output)
1688 throws java.io.IOException {
1689 getSerializedSize();
1690 for (int i = 0; i < liveNodes_.size(); i++) {
1691 output.writeMessage(1, liveNodes_.get(i));
1692 }
1693 for (int i = 0; i < deadNodes_.size(); i++) {
1694 output.writeBytes(2, deadNodes_.getByteString(i));
1695 }
1696 if (((bitField0_ & 0x00000001) == 0x00000001)) {
1697 output.writeInt32(3, regions_);
1698 }
1699 if (((bitField0_ & 0x00000002) == 0x00000002)) {
1700 output.writeInt32(4, requests_);
1701 }
1702 if (((bitField0_ & 0x00000004) == 0x00000004)) {
1703 output.writeDouble(5, averageLoad_);
1704 }
1705 getUnknownFields().writeTo(output);
1706 }
1707
1708 private int memoizedSerializedSize = -1;
1709 public int getSerializedSize() {
1710 int size = memoizedSerializedSize;
1711 if (size != -1) return size;
1712
1713 size = 0;
1714 for (int i = 0; i < liveNodes_.size(); i++) {
1715 size += com.google.protobuf.CodedOutputStream
1716 .computeMessageSize(1, liveNodes_.get(i));
1717 }
1718 {
1719 int dataSize = 0;
1720 for (int i = 0; i < deadNodes_.size(); i++) {
1721 dataSize += com.google.protobuf.CodedOutputStream
1722 .computeBytesSizeNoTag(deadNodes_.getByteString(i));
1723 }
1724 size += dataSize;
1725 size += 1 * getDeadNodesList().size();
1726 }
1727 if (((bitField0_ & 0x00000001) == 0x00000001)) {
1728 size += com.google.protobuf.CodedOutputStream
1729 .computeInt32Size(3, regions_);
1730 }
1731 if (((bitField0_ & 0x00000002) == 0x00000002)) {
1732 size += com.google.protobuf.CodedOutputStream
1733 .computeInt32Size(4, requests_);
1734 }
1735 if (((bitField0_ & 0x00000004) == 0x00000004)) {
1736 size += com.google.protobuf.CodedOutputStream
1737 .computeDoubleSize(5, averageLoad_);
1738 }
1739 size += getUnknownFields().getSerializedSize();
1740 memoizedSerializedSize = size;
1741 return size;
1742 }
1743
1744 private static final long serialVersionUID = 0L;
1745 @java.lang.Override
1746 protected java.lang.Object writeReplace()
1747 throws java.io.ObjectStreamException {
1748 return super.writeReplace();
1749 }
1750
1751 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1752 com.google.protobuf.ByteString data)
1753 throws com.google.protobuf.InvalidProtocolBufferException {
1754 return newBuilder().mergeFrom(data).buildParsed();
1755 }
1756 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1757 com.google.protobuf.ByteString data,
1758 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1759 throws com.google.protobuf.InvalidProtocolBufferException {
1760 return newBuilder().mergeFrom(data, extensionRegistry)
1761 .buildParsed();
1762 }
1763 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(byte[] data)
1764 throws com.google.protobuf.InvalidProtocolBufferException {
1765 return newBuilder().mergeFrom(data).buildParsed();
1766 }
1767 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1768 byte[] data,
1769 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1770 throws com.google.protobuf.InvalidProtocolBufferException {
1771 return newBuilder().mergeFrom(data, extensionRegistry)
1772 .buildParsed();
1773 }
1774 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(java.io.InputStream input)
1775 throws java.io.IOException {
1776 return newBuilder().mergeFrom(input).buildParsed();
1777 }
1778 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1779 java.io.InputStream input,
1780 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781 throws java.io.IOException {
1782 return newBuilder().mergeFrom(input, extensionRegistry)
1783 .buildParsed();
1784 }
1785 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(java.io.InputStream input)
1786 throws java.io.IOException {
1787 Builder builder = newBuilder();
1788 if (builder.mergeDelimitedFrom(input)) {
1789 return builder.buildParsed();
1790 } else {
1791 return null;
1792 }
1793 }
1794 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(
1795 java.io.InputStream input,
1796 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1797 throws java.io.IOException {
1798 Builder builder = newBuilder();
1799 if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
1800 return builder.buildParsed();
1801 } else {
1802 return null;
1803 }
1804 }
1805 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1806 com.google.protobuf.CodedInputStream input)
1807 throws java.io.IOException {
1808 return newBuilder().mergeFrom(input).buildParsed();
1809 }
1810 public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1811 com.google.protobuf.CodedInputStream input,
1812 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1813 throws java.io.IOException {
1814 return newBuilder().mergeFrom(input, extensionRegistry)
1815 .buildParsed();
1816 }
1817
1818 public static Builder newBuilder() { return Builder.create(); }
1819 public Builder newBuilderForType() { return newBuilder(); }
1820 public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus prototype) {
1821 return newBuilder().mergeFrom(prototype);
1822 }
1823 public Builder toBuilder() { return newBuilder(this); }
1824
1825 @java.lang.Override
1826 protected Builder newBuilderForType(
1827 com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1828 Builder builder = new Builder(parent);
1829 return builder;
1830 }
1831 public static final class Builder extends
1832 com.google.protobuf.GeneratedMessage.Builder<Builder>
1833 implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatusOrBuilder {
1834 public static final com.google.protobuf.Descriptors.Descriptor
1835 getDescriptor() {
1836 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
1837 }
1838
1839 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1840 internalGetFieldAccessorTable() {
1841 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
1842 }
1843
1844
1845 private Builder() {
1846 maybeForceBuilderInitialization();
1847 }
1848
1849 private Builder(BuilderParent parent) {
1850 super(parent);
1851 maybeForceBuilderInitialization();
1852 }
1853 private void maybeForceBuilderInitialization() {
1854 if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1855 getLiveNodesFieldBuilder();
1856 }
1857 }
1858 private static Builder create() {
1859 return new Builder();
1860 }
1861
1862 public Builder clear() {
1863 super.clear();
1864 if (liveNodesBuilder_ == null) {
1865 liveNodes_ = java.util.Collections.emptyList();
1866 bitField0_ = (bitField0_ & ~0x00000001);
1867 } else {
1868 liveNodesBuilder_.clear();
1869 }
1870 deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
1871 bitField0_ = (bitField0_ & ~0x00000002);
1872 regions_ = 0;
1873 bitField0_ = (bitField0_ & ~0x00000004);
1874 requests_ = 0;
1875 bitField0_ = (bitField0_ & ~0x00000008);
1876 averageLoad_ = 0D;
1877 bitField0_ = (bitField0_ & ~0x00000010);
1878 return this;
1879 }
1880
1881 public Builder clone() {
1882 return create().mergeFrom(buildPartial());
1883 }
1884
1885 public com.google.protobuf.Descriptors.Descriptor
1886 getDescriptorForType() {
1887 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDescriptor();
1888 }
1889
1890 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus getDefaultInstanceForType() {
1891 return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance();
1892 }
1893
1894 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus build() {
1895 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = buildPartial();
1896 if (!result.isInitialized()) {
1897 throw newUninitializedMessageException(result);
1898 }
1899 return result;
1900 }
1901
1902 private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildParsed()
1903 throws com.google.protobuf.InvalidProtocolBufferException {
1904 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = buildPartial();
1905 if (!result.isInitialized()) {
1906 throw newUninitializedMessageException(
1907 result).asInvalidProtocolBufferException();
1908 }
1909 return result;
1910 }
1911
1912 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildPartial() {
1913 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus(this);
1914 int from_bitField0_ = bitField0_;
1915 int to_bitField0_ = 0;
1916 if (liveNodesBuilder_ == null) {
1917 if (((bitField0_ & 0x00000001) == 0x00000001)) {
1918 liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
1919 bitField0_ = (bitField0_ & ~0x00000001);
1920 }
1921 result.liveNodes_ = liveNodes_;
1922 } else {
1923 result.liveNodes_ = liveNodesBuilder_.build();
1924 }
1925 if (((bitField0_ & 0x00000002) == 0x00000002)) {
1926 deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(
1927 deadNodes_);
1928 bitField0_ = (bitField0_ & ~0x00000002);
1929 }
1930 result.deadNodes_ = deadNodes_;
1931 if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1932 to_bitField0_ |= 0x00000001;
1933 }
1934 result.regions_ = regions_;
1935 if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1936 to_bitField0_ |= 0x00000002;
1937 }
1938 result.requests_ = requests_;
1939 if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
1940 to_bitField0_ |= 0x00000004;
1941 }
1942 result.averageLoad_ = averageLoad_;
1943 result.bitField0_ = to_bitField0_;
1944 onBuilt();
1945 return result;
1946 }
1947
1948 public Builder mergeFrom(com.google.protobuf.Message other) {
1949 if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) {
1950 return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus)other);
1951 } else {
1952 super.mergeFrom(other);
1953 return this;
1954 }
1955 }
1956
1957 public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus other) {
1958 if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance()) return this;
1959 if (liveNodesBuilder_ == null) {
1960 if (!other.liveNodes_.isEmpty()) {
1961 if (liveNodes_.isEmpty()) {
1962 liveNodes_ = other.liveNodes_;
1963 bitField0_ = (bitField0_ & ~0x00000001);
1964 } else {
1965 ensureLiveNodesIsMutable();
1966 liveNodes_.addAll(other.liveNodes_);
1967 }
1968 onChanged();
1969 }
1970 } else {
1971 if (!other.liveNodes_.isEmpty()) {
1972 if (liveNodesBuilder_.isEmpty()) {
1973 liveNodesBuilder_.dispose();
1974 liveNodesBuilder_ = null;
1975 liveNodes_ = other.liveNodes_;
1976 bitField0_ = (bitField0_ & ~0x00000001);
1977 liveNodesBuilder_ =
1978 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
1979 getLiveNodesFieldBuilder() : null;
1980 } else {
1981 liveNodesBuilder_.addAllMessages(other.liveNodes_);
1982 }
1983 }
1984 }
1985 if (!other.deadNodes_.isEmpty()) {
1986 if (deadNodes_.isEmpty()) {
1987 deadNodes_ = other.deadNodes_;
1988 bitField0_ = (bitField0_ & ~0x00000002);
1989 } else {
1990 ensureDeadNodesIsMutable();
1991 deadNodes_.addAll(other.deadNodes_);
1992 }
1993 onChanged();
1994 }
1995 if (other.hasRegions()) {
1996 setRegions(other.getRegions());
1997 }
1998 if (other.hasRequests()) {
1999 setRequests(other.getRequests());
2000 }
2001 if (other.hasAverageLoad()) {
2002 setAverageLoad(other.getAverageLoad());
2003 }
2004 this.mergeUnknownFields(other.getUnknownFields());
2005 return this;
2006 }
2007
2008 public final boolean isInitialized() {
2009 for (int i = 0; i < getLiveNodesCount(); i++) {
2010 if (!getLiveNodes(i).isInitialized()) {
2011
2012 return false;
2013 }
2014 }
2015 return true;
2016 }
2017
2018 public Builder mergeFrom(
2019 com.google.protobuf.CodedInputStream input,
2020 com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2021 throws java.io.IOException {
2022 com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2023 com.google.protobuf.UnknownFieldSet.newBuilder(
2024 this.getUnknownFields());
2025 while (true) {
2026 int tag = input.readTag();
2027 switch (tag) {
2028 case 0:
2029 this.setUnknownFields(unknownFields.build());
2030 onChanged();
2031 return this;
2032 default: {
2033 if (!parseUnknownField(input, unknownFields,
2034 extensionRegistry, tag)) {
2035 this.setUnknownFields(unknownFields.build());
2036 onChanged();
2037 return this;
2038 }
2039 break;
2040 }
2041 case 10: {
2042 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder subBuilder = org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder();
2043 input.readMessage(subBuilder, extensionRegistry);
2044 addLiveNodes(subBuilder.buildPartial());
2045 break;
2046 }
2047 case 18: {
2048 ensureDeadNodesIsMutable();
2049 deadNodes_.add(input.readBytes());
2050 break;
2051 }
2052 case 24: {
2053 bitField0_ |= 0x00000004;
2054 regions_ = input.readInt32();
2055 break;
2056 }
2057 case 32: {
2058 bitField0_ |= 0x00000008;
2059 requests_ = input.readInt32();
2060 break;
2061 }
2062 case 41: {
2063 bitField0_ |= 0x00000010;
2064 averageLoad_ = input.readDouble();
2065 break;
2066 }
2067 }
2068 }
2069 }
2070
2071 private int bitField0_;
2072
2073
2074 private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> liveNodes_ =
2075 java.util.Collections.emptyList();
2076 private void ensureLiveNodesIsMutable() {
2077 if (!((bitField0_ & 0x00000001) == 0x00000001)) {
2078 liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>(liveNodes_);
2079 bitField0_ |= 0x00000001;
2080 }
2081 }
2082
2083 private com.google.protobuf.RepeatedFieldBuilder<
2084 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder> liveNodesBuilder_;
2085
2086 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
2087 if (liveNodesBuilder_ == null) {
2088 return java.util.Collections.unmodifiableList(liveNodes_);
2089 } else {
2090 return liveNodesBuilder_.getMessageList();
2091 }
2092 }
2093 public int getLiveNodesCount() {
2094 if (liveNodesBuilder_ == null) {
2095 return liveNodes_.size();
2096 } else {
2097 return liveNodesBuilder_.getCount();
2098 }
2099 }
2100 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
2101 if (liveNodesBuilder_ == null) {
2102 return liveNodes_.get(index);
2103 } else {
2104 return liveNodesBuilder_.getMessage(index);
2105 }
2106 }
2107 public Builder setLiveNodes(
2108 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
2109 if (liveNodesBuilder_ == null) {
2110 if (value == null) {
2111 throw new NullPointerException();
2112 }
2113 ensureLiveNodesIsMutable();
2114 liveNodes_.set(index, value);
2115 onChanged();
2116 } else {
2117 liveNodesBuilder_.setMessage(index, value);
2118 }
2119 return this;
2120 }
2121 public Builder setLiveNodes(
2122 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
2123 if (liveNodesBuilder_ == null) {
2124 ensureLiveNodesIsMutable();
2125 liveNodes_.set(index, builderForValue.build());
2126 onChanged();
2127 } else {
2128 liveNodesBuilder_.setMessage(index, builderForValue.build());
2129 }
2130 return this;
2131 }
2132 public Builder addLiveNodes(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
2133 if (liveNodesBuilder_ == null) {
2134 if (value == null) {
2135 throw new NullPointerException();
2136 }
2137 ensureLiveNodesIsMutable();
2138 liveNodes_.add(value);
2139 onChanged();
2140 } else {
2141 liveNodesBuilder_.addMessage(value);
2142 }
2143 return this;
2144 }
2145 public Builder addLiveNodes(
2146 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
2147 if (liveNodesBuilder_ == null) {
2148 if (value == null) {
2149 throw new NullPointerException();
2150 }
2151 ensureLiveNodesIsMutable();
2152 liveNodes_.add(index, value);
2153 onChanged();
2154 } else {
2155 liveNodesBuilder_.addMessage(index, value);
2156 }
2157 return this;
2158 }
2159 public Builder addLiveNodes(
2160 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
2161 if (liveNodesBuilder_ == null) {
2162 ensureLiveNodesIsMutable();
2163 liveNodes_.add(builderForValue.build());
2164 onChanged();
2165 } else {
2166 liveNodesBuilder_.addMessage(builderForValue.build());
2167 }
2168 return this;
2169 }
2170 public Builder addLiveNodes(
2171 int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
2172 if (liveNodesBuilder_ == null) {
2173 ensureLiveNodesIsMutable();
2174 liveNodes_.add(index, builderForValue.build());
2175 onChanged();
2176 } else {
2177 liveNodesBuilder_.addMessage(index, builderForValue.build());
2178 }
2179 return this;
2180 }
2181 public Builder addAllLiveNodes(
2182 java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> values) {
2183 if (liveNodesBuilder_ == null) {
2184 ensureLiveNodesIsMutable();
2185 super.addAll(values, liveNodes_);
2186 onChanged();
2187 } else {
2188 liveNodesBuilder_.addAllMessages(values);
2189 }
2190 return this;
2191 }
2192 public Builder clearLiveNodes() {
2193 if (liveNodesBuilder_ == null) {
2194 liveNodes_ = java.util.Collections.emptyList();
2195 bitField0_ = (bitField0_ & ~0x00000001);
2196 onChanged();
2197 } else {
2198 liveNodesBuilder_.clear();
2199 }
2200 return this;
2201 }
2202 public Builder removeLiveNodes(int index) {
2203 if (liveNodesBuilder_ == null) {
2204 ensureLiveNodesIsMutable();
2205 liveNodes_.remove(index);
2206 onChanged();
2207 } else {
2208 liveNodesBuilder_.remove(index);
2209 }
2210 return this;
2211 }
2212 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder getLiveNodesBuilder(
2213 int index) {
2214 return getLiveNodesFieldBuilder().getBuilder(index);
2215 }
2216 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
2217 int index) {
2218 if (liveNodesBuilder_ == null) {
2219 return liveNodes_.get(index); } else {
2220 return liveNodesBuilder_.getMessageOrBuilder(index);
2221 }
2222 }
2223 public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
2224 getLiveNodesOrBuilderList() {
2225 if (liveNodesBuilder_ != null) {
2226 return liveNodesBuilder_.getMessageOrBuilderList();
2227 } else {
2228 return java.util.Collections.unmodifiableList(liveNodes_);
2229 }
2230 }
2231 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder addLiveNodesBuilder() {
2232 return getLiveNodesFieldBuilder().addBuilder(
2233 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance());
2234 }
2235 public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder addLiveNodesBuilder(
2236 int index) {
2237 return getLiveNodesFieldBuilder().addBuilder(
2238 index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance());
2239 }
2240 public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder>
2241 getLiveNodesBuilderList() {
2242 return getLiveNodesFieldBuilder().getBuilderList();
2243 }
2244 private com.google.protobuf.RepeatedFieldBuilder<
2245 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>
2246 getLiveNodesFieldBuilder() {
2247 if (liveNodesBuilder_ == null) {
2248 liveNodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
2249 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder>(
2250 liveNodes_,
2251 ((bitField0_ & 0x00000001) == 0x00000001),
2252 getParentForChildren(),
2253 isClean());
2254 liveNodes_ = null;
2255 }
2256 return liveNodesBuilder_;
2257 }
2258
2259
2260 private com.google.protobuf.LazyStringList deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
2261 private void ensureDeadNodesIsMutable() {
2262 if (!((bitField0_ & 0x00000002) == 0x00000002)) {
2263 deadNodes_ = new com.google.protobuf.LazyStringArrayList(deadNodes_);
2264 bitField0_ |= 0x00000002;
2265 }
2266 }
2267 public java.util.List<String>
2268 getDeadNodesList() {
2269 return java.util.Collections.unmodifiableList(deadNodes_);
2270 }
2271 public int getDeadNodesCount() {
2272 return deadNodes_.size();
2273 }
2274 public String getDeadNodes(int index) {
2275 return deadNodes_.get(index);
2276 }
2277 public Builder setDeadNodes(
2278 int index, String value) {
2279 if (value == null) {
2280 throw new NullPointerException();
2281 }
2282 ensureDeadNodesIsMutable();
2283 deadNodes_.set(index, value);
2284 onChanged();
2285 return this;
2286 }
2287 public Builder addDeadNodes(String value) {
2288 if (value == null) {
2289 throw new NullPointerException();
2290 }
2291 ensureDeadNodesIsMutable();
2292 deadNodes_.add(value);
2293 onChanged();
2294 return this;
2295 }
2296 public Builder addAllDeadNodes(
2297 java.lang.Iterable<String> values) {
2298 ensureDeadNodesIsMutable();
2299 super.addAll(values, deadNodes_);
2300 onChanged();
2301 return this;
2302 }
2303 public Builder clearDeadNodes() {
2304 deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
2305 bitField0_ = (bitField0_ & ~0x00000002);
2306 onChanged();
2307 return this;
2308 }
2309 void addDeadNodes(com.google.protobuf.ByteString value) {
2310 ensureDeadNodesIsMutable();
2311 deadNodes_.add(value);
2312 onChanged();
2313 }
2314
2315
2316 private int regions_ ;
2317 public boolean hasRegions() {
2318 return ((bitField0_ & 0x00000004) == 0x00000004);
2319 }
2320 public int getRegions() {
2321 return regions_;
2322 }
2323 public Builder setRegions(int value) {
2324 bitField0_ |= 0x00000004;
2325 regions_ = value;
2326 onChanged();
2327 return this;
2328 }
2329 public Builder clearRegions() {
2330 bitField0_ = (bitField0_ & ~0x00000004);
2331 regions_ = 0;
2332 onChanged();
2333 return this;
2334 }
2335
2336
2337 private int requests_ ;
2338 public boolean hasRequests() {
2339 return ((bitField0_ & 0x00000008) == 0x00000008);
2340 }
2341 public int getRequests() {
2342 return requests_;
2343 }
2344 public Builder setRequests(int value) {
2345 bitField0_ |= 0x00000008;
2346 requests_ = value;
2347 onChanged();
2348 return this;
2349 }
2350 public Builder clearRequests() {
2351 bitField0_ = (bitField0_ & ~0x00000008);
2352 requests_ = 0;
2353 onChanged();
2354 return this;
2355 }
2356
2357
2358 private double averageLoad_ ;
2359 public boolean hasAverageLoad() {
2360 return ((bitField0_ & 0x00000010) == 0x00000010);
2361 }
2362 public double getAverageLoad() {
2363 return averageLoad_;
2364 }
2365 public Builder setAverageLoad(double value) {
2366 bitField0_ |= 0x00000010;
2367 averageLoad_ = value;
2368 onChanged();
2369 return this;
2370 }
2371 public Builder clearAverageLoad() {
2372 bitField0_ = (bitField0_ & ~0x00000010);
2373 averageLoad_ = 0D;
2374 onChanged();
2375 return this;
2376 }
2377
2378
2379 }
2380
2381 static {
2382 defaultInstance = new StorageClusterStatus(true);
2383 defaultInstance.initFields();
2384 }
2385
2386
2387 }
2388
2389 private static com.google.protobuf.Descriptors.Descriptor
2390 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
2391 private static
2392 com.google.protobuf.GeneratedMessage.FieldAccessorTable
2393 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
2394 private static com.google.protobuf.Descriptors.Descriptor
2395 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
2396 private static
2397 com.google.protobuf.GeneratedMessage.FieldAccessorTable
2398 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
2399 private static com.google.protobuf.Descriptors.Descriptor
2400 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
2401 private static
2402 com.google.protobuf.GeneratedMessage.FieldAccessorTable
2403 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
2404
2405 public static com.google.protobuf.Descriptors.FileDescriptor
2406 getDescriptor() {
2407 return descriptor;
2408 }
2409 private static com.google.protobuf.Descriptors.FileDescriptor
2410 descriptor;
2411 static {
2412 java.lang.String[] descriptorData = {
2413 "\n!StorageClusterStatusMessage.proto\022/org" +
2414 ".apache.hadoop.hbase.rest.protobuf.gener" +
2415 "ated\"\222\004\n\024StorageClusterStatus\022]\n\tliveNod" +
2416 "es\030\001 \003(\0132J.org.apache.hadoop.hbase.rest." +
2417 "protobuf.generated.StorageClusterStatus." +
2418 "Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regions\030\003 \001(\005" +
2419 "\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoad\030\005 \001(\001\032" +
2420 "\211\001\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" +
2421 "\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefileSizeMB\030" +
2422 "\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n\024storefi",
2423 "leIndexSizeMB\030\006 \001(\005\032\303\001\n\004Node\022\014\n\004name\030\001 \002" +
2424 "(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\005\022" +
2425 "\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 \001" +
2426 "(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop." +
2427 "hbase.rest.protobuf.generated.StorageClu" +
2428 "sterStatus.Region"
2429 };
2430 com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
2431 new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
2432 public com.google.protobuf.ExtensionRegistry assignDescriptors(
2433 com.google.protobuf.Descriptors.FileDescriptor root) {
2434 descriptor = root;
2435 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor =
2436 getDescriptor().getMessageTypes().get(0);
2437 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable = new
2438 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
2439 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor,
2440 new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
2441 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
2442 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
2443 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor =
2444 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
2445 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new
2446 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
2447 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor,
2448 new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", },
2449 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class,
2450 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
2451 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor =
2452 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(1);
2453 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
2454 com.google.protobuf.GeneratedMessage.FieldAccessorTable(
2455 internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor,
2456 new java.lang.String[] { "Name", "StartCode", "Requests", "HeapSizeMB", "MaxHeapSizeMB", "Regions", },
2457 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
2458 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
2459 return null;
2460 }
2461 };
2462 com.google.protobuf.Descriptors.FileDescriptor
2463 .internalBuildGeneratedFileFrom(descriptorData,
2464 new com.google.protobuf.Descriptors.FileDescriptor[] {
2465 }, assigner);
2466 }
2467
2468
2469 }