1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease;
23
24 import java.io.DataInput;
25 import java.io.DataOutput;
26 import java.io.EOFException;
27 import java.io.FileNotFoundException;
28 import java.io.IOException;
29 import java.io.OutputStream;
30 import java.io.UnsupportedEncodingException;
31 import java.lang.reflect.InvocationTargetException;
32 import java.lang.reflect.Method;
33 import java.net.URLEncoder;
34 import java.util.ArrayList;
35 import java.util.Collections;
36 import java.util.HashMap;
37 import java.util.LinkedList;
38 import java.util.List;
39 import java.util.Map;
40 import java.util.NavigableSet;
41 import java.util.SortedMap;
42 import java.util.TreeMap;
43 import java.util.TreeSet;
44 import java.util.concurrent.Callable;
45 import java.util.concurrent.ConcurrentSkipListMap;
46 import java.util.concurrent.CopyOnWriteArrayList;
47 import java.util.concurrent.ExecutionException;
48 import java.util.concurrent.Executors;
49 import java.util.concurrent.Future;
50 import java.util.concurrent.ThreadPoolExecutor;
51 import java.util.concurrent.TimeUnit;
52 import java.util.concurrent.atomic.AtomicInteger;
53 import java.util.concurrent.atomic.AtomicLong;
54 import java.util.concurrent.locks.Condition;
55 import java.util.concurrent.locks.Lock;
56 import java.util.concurrent.locks.ReentrantLock;
57 import java.util.regex.Matcher;
58 import java.util.regex.Pattern;
59
60 import org.apache.commons.logging.Log;
61 import org.apache.commons.logging.LogFactory;
62 import org.apache.hadoop.conf.Configuration;
63 import org.apache.hadoop.fs.FileStatus;
64 import org.apache.hadoop.fs.FileSystem;
65 import org.apache.hadoop.fs.Path;
66 import org.apache.hadoop.fs.PathFilter;
67 import org.apache.hadoop.fs.Syncable;
68 import org.apache.hadoop.hbase.HBaseConfiguration;
69 import org.apache.hadoop.hbase.HConstants;
70 import org.apache.hadoop.hbase.HRegionInfo;
71 import org.apache.hadoop.hbase.HServerInfo;
72 import org.apache.hadoop.hbase.HTableDescriptor;
73 import org.apache.hadoop.hbase.KeyValue;
74 import org.apache.hadoop.hbase.RemoteExceptionHandler;
75 import org.apache.hadoop.hbase.regionserver.HRegion;
76 import org.apache.hadoop.hbase.util.Bytes;
77 import org.apache.hadoop.hbase.util.ClassSize;
78 import org.apache.hadoop.hbase.util.FSUtils;
79 import org.apache.hadoop.hbase.util.Threads;
80 import org.apache.hadoop.io.Writable;
81
82 import com.google.common.util.concurrent.NamingThreadFactory;
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 public class HLog implements Syncable {
124 static final Log LOG = LogFactory.getLog(HLog.class);
125 public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
126 static final byte [] METAROW = Bytes.toBytes("METAROW");
127
128
129
130
131
132 private static final String RECOVERED_EDITS_DIR = "recovered.edits";
133 private static final Pattern EDITFILES_NAME_PATTERN =
134 Pattern.compile("-?[0-9]+");
135
136 private final FileSystem fs;
137 private final Path dir;
138 private final Configuration conf;
139 private final LogRollListener listener;
140 private final long optionalFlushInterval;
141 private final long blocksize;
142 private final int flushlogentries;
143 private final String prefix;
144 private final Path oldLogDir;
145 private final List<LogActionsListener> actionListeners =
146 Collections.synchronizedList(new ArrayList<LogActionsListener>());
147 private boolean logRollRequested;
148
149
150 private static Class<? extends Writer> logWriterClass;
151 private static Class<? extends Reader> logReaderClass;
152
153 private OutputStream hdfs_out;
154 private int initialReplication;
155 private Method getNumCurrentReplicas;
156 final static Object [] NO_ARGS = new Object []{};
157
158
159 private boolean forceSync = false;
160
161 public interface Reader {
162 void init(FileSystem fs, Path path, Configuration c) throws IOException;
163 void close() throws IOException;
164 Entry next() throws IOException;
165 Entry next(Entry reuse) throws IOException;
166 void seek(long pos) throws IOException;
167 long getPosition() throws IOException;
168 }
169
170 public interface Writer {
171 void init(FileSystem fs, Path path, Configuration c) throws IOException;
172 void close() throws IOException;
173 void sync() throws IOException;
174 void append(Entry entry) throws IOException;
175 long getLength() throws IOException;
176 }
177
178
179
180
181 Writer writer;
182
183
184
185
186 final SortedMap<Long, Path> outputfiles =
187 Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
188
189
190
191
192 private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
193 new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
194
195 private volatile boolean closed = false;
196
197 private final AtomicLong logSeqNum = new AtomicLong(0);
198
199
200 private volatile long filenum = -1;
201
202
203 private final AtomicInteger numEntries = new AtomicInteger(0);
204
205
206
207 private final long logrollsize;
208
209
210
211 private final Lock cacheFlushLock = new ReentrantLock();
212
213
214
215
216 private final Object updateLock = new Object();
217
218 private final boolean enabled;
219
220
221
222
223
224
225 private final int maxLogs;
226
227
228
229
230 private final LogSyncer logSyncerThread;
231
232 private final List<LogEntryVisitor> logEntryVisitors =
233 new CopyOnWriteArrayList<LogEntryVisitor>();
234
235
236
237
238 private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
239
240 static byte [] COMPLETE_CACHE_FLUSH;
241 static {
242 try {
243 COMPLETE_CACHE_FLUSH =
244 "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
245 } catch (UnsupportedEncodingException e) {
246 assert(false);
247 }
248 }
249
250
251 private static volatile long writeOps;
252 private static volatile long writeTime;
253
254 private static volatile long syncOps;
255 private static volatile long syncTime;
256
257 public static long getWriteOps() {
258 long ret = writeOps;
259 writeOps = 0;
260 return ret;
261 }
262
263 public static long getWriteTime() {
264 long ret = writeTime;
265 writeTime = 0;
266 return ret;
267 }
268
269 public static long getSyncOps() {
270 long ret = syncOps;
271 syncOps = 0;
272 return ret;
273 }
274
275 public static long getSyncTime() {
276 long ret = syncTime;
277 syncTime = 0;
278 return ret;
279 }
280
281
282
283
284
285
286
287
288
289
290
291 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
292 final Configuration conf, final LogRollListener listener)
293 throws IOException {
294 this(fs, dir, oldLogDir, conf, listener, null, null);
295 }
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
316 final Configuration conf, final LogRollListener listener,
317 final LogActionsListener actionListener, final String prefix)
318 throws IOException {
319 super();
320 this.fs = fs;
321 this.dir = dir;
322 this.conf = conf;
323 this.listener = listener;
324 this.flushlogentries =
325 conf.getInt("hbase.regionserver.flushlogentries", 1);
326 this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
327 this.fs.getDefaultBlockSize());
328
329 float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
330 this.logrollsize = (long)(this.blocksize * multi);
331 this.optionalFlushInterval =
332 conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
333 if (fs.exists(dir)) {
334 throw new IOException("Target HLog directory already exists: " + dir);
335 }
336 fs.mkdirs(dir);
337 this.oldLogDir = oldLogDir;
338 if (!fs.exists(oldLogDir)) {
339 fs.mkdirs(this.oldLogDir);
340 }
341 this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
342 this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
343 LOG.info("HLog configuration: blocksize=" + this.blocksize +
344 ", rollsize=" + this.logrollsize +
345 ", enabled=" + this.enabled +
346 ", flushlogentries=" + this.flushlogentries +
347 ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
348 if (actionListener != null) {
349 addLogActionsListerner(actionListener);
350 }
351
352 this.prefix = prefix == null || prefix.isEmpty() ?
353 "hlog" : URLEncoder.encode(prefix, "UTF8");
354
355 rollWriter();
356
357
358 this.getNumCurrentReplicas = null;
359 if(this.hdfs_out != null) {
360 try {
361 this.getNumCurrentReplicas = this.hdfs_out.getClass().
362 getMethod("getNumCurrentReplicas", new Class<?> []{});
363 this.getNumCurrentReplicas.setAccessible(true);
364 } catch (NoSuchMethodException e) {
365
366 } catch (SecurityException e) {
367
368 this.getNumCurrentReplicas = null;
369 }
370 }
371 if(this.getNumCurrentReplicas != null) {
372 LOG.info("Using getNumCurrentReplicas--HDFS-826");
373 } else {
374 LOG.info("getNumCurrentReplicas--HDFS-826 not available" );
375 }
376
377 logSyncerThread = new LogSyncer(this.optionalFlushInterval);
378 Threads.setDaemonThreadRunning(logSyncerThread,
379 Thread.currentThread().getName() + ".logSyncer");
380 }
381
382
383
384
385 public long getFilenum() {
386 return this.filenum;
387 }
388
389
390
391
392
393
394
395
396
397 public void setSequenceNumber(final long newvalue) {
398 for (long id = this.logSeqNum.get(); id < newvalue &&
399 !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
400
401
402 LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
403 }
404 }
405
406
407
408
409 public long getSequenceNumber() {
410 return logSeqNum.get();
411 }
412
413
414 OutputStream getOutputStream() {
415 return this.hdfs_out;
416 }
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437 public byte [][] rollWriter() throws FailedLogCloseException, IOException {
438
439 if (this.writer != null && this.numEntries.get() <= 0) {
440 return null;
441 }
442 byte [][] regionsToFlush = null;
443 this.cacheFlushLock.lock();
444 try {
445 if (closed) {
446 return regionsToFlush;
447 }
448
449
450 long currentFilenum = this.filenum;
451 this.filenum = System.currentTimeMillis();
452 Path newPath = computeFilename();
453 HLog.Writer nextWriter = createWriter(fs, newPath, HBaseConfiguration.create(conf));
454 int nextInitialReplication = fs.getFileStatus(newPath).getReplication();
455
456
457
458 OutputStream nextHdfsOut = null;
459 if (nextWriter instanceof SequenceFileLogWriter) {
460 nextHdfsOut =
461 ((SequenceFileLogWriter)nextWriter).getDFSCOutputStream();
462 }
463 synchronized (updateLock) {
464
465 Path oldFile = cleanupCurrentWriter(currentFilenum);
466 this.writer = nextWriter;
467 this.initialReplication = nextInitialReplication;
468 this.hdfs_out = nextHdfsOut;
469
470 LOG.info((oldFile != null?
471 "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
472 this.numEntries.get() +
473 ", filesize=" +
474 this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
475 "New hlog " + FSUtils.getPath(newPath));
476 this.numEntries.set(0);
477 this.logRollRequested = false;
478 }
479
480 if (!this.actionListeners.isEmpty()) {
481 for (LogActionsListener list : this.actionListeners) {
482 list.logRolled(newPath);
483 }
484 }
485
486 if (this.outputfiles.size() > 0) {
487 if (this.lastSeqWritten.size() <= 0) {
488 LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
489
490
491
492 for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
493 archiveLogFile(e.getValue(), e.getKey());
494 }
495 this.outputfiles.clear();
496 } else {
497 regionsToFlush = cleanOldLogs();
498 }
499 }
500 } finally {
501 this.cacheFlushLock.unlock();
502 }
503 return regionsToFlush;
504 }
505
506
507
508
509
510
511
512
513
514 public static Reader getReader(final FileSystem fs,
515 final Path path, Configuration conf)
516 throws IOException {
517 try {
518 if (logReaderClass == null) {
519 logReaderClass =conf.getClass("hbase.regionserver.hlog.reader.impl",
520 SequenceFileLogReader.class, Reader.class);
521 }
522
523 HLog.Reader reader = logReaderClass.newInstance();
524 reader.init(fs, path, conf);
525 return reader;
526 } catch (IOException e) {
527 throw e;
528 }
529 catch (Exception e) {
530 throw new IOException("Cannot get log reader", e);
531 }
532 }
533
534
535
536
537
538
539
540
541 public static Writer createWriter(final FileSystem fs,
542 final Path path, Configuration conf)
543 throws IOException {
544 try {
545 if (logWriterClass == null) {
546 logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
547 SequenceFileLogWriter.class, Writer.class);
548 }
549 HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
550 writer.init(fs, path, conf);
551 return writer;
552 } catch (Exception e) {
553 IOException ie = new IOException("cannot get log writer");
554 ie.initCause(e);
555 throw ie;
556 }
557 }
558
559
560
561
562
563
564
565 private byte [][] cleanOldLogs() throws IOException {
566 Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
567
568
569 TreeSet<Long> sequenceNumbers =
570 new TreeSet<Long>(this.outputfiles.headMap(
571 (Long.valueOf(oldestOutstandingSeqNum.longValue() + 1L))).keySet());
572
573 int logsToRemove = sequenceNumbers.size();
574 if (logsToRemove > 0) {
575 if (LOG.isDebugEnabled()) {
576
577 byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
578 LOG.debug("Found " + logsToRemove + " hlogs to remove " +
579 " out of total " + this.outputfiles.size() + "; " +
580 "oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
581 " from region " + Bytes.toString(oldestRegion));
582 }
583 for (Long seq : sequenceNumbers) {
584 archiveLogFile(this.outputfiles.remove(seq), seq);
585 }
586 }
587
588
589 byte [][] regions = null;
590 int logCount = this.outputfiles.size() - logsToRemove;
591 if (logCount > this.maxLogs && this.outputfiles != null &&
592 this.outputfiles.size() > 0) {
593 regions = findMemstoresWithEditsOlderThan(this.outputfiles.firstKey(),
594 this.lastSeqWritten);
595 StringBuilder sb = new StringBuilder();
596 for (int i = 0; i < regions.length; i++) {
597 if (i > 0) sb.append(", ");
598 sb.append(Bytes.toStringBinary(regions[i]));
599 }
600 LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
601 this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
602 sb.toString());
603 }
604 return regions;
605 }
606
607
608
609
610
611
612
613
614
615 static byte [][] findMemstoresWithEditsOlderThan(final long oldestWALseqid,
616 final Map<byte [], Long> regionsToSeqids) {
617
618 List<byte []> regions = null;
619 for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
620 if (e.getValue().longValue() < oldestWALseqid) {
621 if (regions == null) regions = new ArrayList<byte []>();
622 regions.add(e.getKey());
623 }
624 }
625 return regions == null?
626 null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
627 }
628
629
630
631
632 private Long getOldestOutstandingSeqNum() {
633 return Collections.min(this.lastSeqWritten.values());
634 }
635
636 private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
637 byte [] oldestRegion = null;
638 for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
639 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
640 oldestRegion = e.getKey();
641 break;
642 }
643 }
644 return oldestRegion;
645 }
646
647
648
649
650
651
652
653 private Path cleanupCurrentWriter(final long currentfilenum)
654 throws IOException {
655 Path oldFile = null;
656 if (this.writer != null) {
657
658 try {
659 this.writer.close();
660 } catch (IOException e) {
661
662
663
664 FailedLogCloseException flce =
665 new FailedLogCloseException("#" + currentfilenum);
666 flce.initCause(e);
667 throw e;
668 }
669 if (currentfilenum >= 0) {
670 oldFile = computeFilename(currentfilenum);
671 this.outputfiles.put(Long.valueOf(this.logSeqNum.get() - 1), oldFile);
672 }
673 }
674 return oldFile;
675 }
676
677 private void archiveLogFile(final Path p, final Long seqno) throws IOException {
678 Path newPath = getHLogArchivePath(this.oldLogDir, p);
679 LOG.info("moving old hlog file " + FSUtils.getPath(p) +
680 " whose highest sequenceid is " + seqno + " to " +
681 FSUtils.getPath(newPath));
682 this.fs.rename(p, newPath);
683 }
684
685
686
687
688
689
690 protected Path computeFilename() {
691 return computeFilename(this.filenum);
692 }
693
694
695
696
697
698
699
700 protected Path computeFilename(long filenum) {
701 if (filenum < 0) {
702 throw new RuntimeException("hlog file number can't be < 0");
703 }
704 return new Path(dir, prefix + "." + filenum);
705 }
706
707
708
709
710
711
712 public void closeAndDelete() throws IOException {
713 close();
714 FileStatus[] files = fs.listStatus(this.dir);
715 for(FileStatus file : files) {
716 fs.rename(file.getPath(),
717 getHLogArchivePath(this.oldLogDir, file.getPath()));
718 }
719 LOG.debug("Moved " + files.length + " log files to " +
720 FSUtils.getPath(this.oldLogDir));
721 fs.delete(dir, true);
722 }
723
724
725
726
727
728
729 public void close() throws IOException {
730 try {
731 logSyncerThread.interrupt();
732
733 logSyncerThread.join(this.optionalFlushInterval*2);
734 } catch (InterruptedException e) {
735 LOG.error("Exception while waiting for syncer thread to die", e);
736 }
737
738 cacheFlushLock.lock();
739 try {
740 synchronized (updateLock) {
741 this.closed = true;
742 if (LOG.isDebugEnabled()) {
743 LOG.debug("closing hlog writer in " + this.dir.toString());
744 }
745 this.writer.close();
746 }
747 } finally {
748 cacheFlushLock.unlock();
749 }
750 }
751
752
753
754
755
756
757
758
759 public void append(HRegionInfo regionInfo, WALEdit logEdit,
760 final long now,
761 final boolean isMetaRegion)
762 throws IOException {
763 byte [] regionName = regionInfo.getRegionName();
764 byte [] tableName = regionInfo.getTableDesc().getName();
765 this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
766 }
767
768
769
770
771
772
773
774 protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum, long now) {
775 return new HLogKey(regionName, tableName, seqnum, now);
776 }
777
778
779
780
781
782
783
784
785
786
787 public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
788 throws IOException {
789 if (this.closed) {
790 throw new IOException("Cannot append; log is closed");
791 }
792 byte [] regionName = regionInfo.getRegionName();
793 synchronized (updateLock) {
794 long seqNum = obtainSeqNum();
795 logKey.setLogSeqNum(seqNum);
796
797
798
799
800
801 this.lastSeqWritten.putIfAbsent(regionName, Long.valueOf(seqNum));
802 doWrite(regionInfo, logKey, logEdit);
803 this.numEntries.incrementAndGet();
804 }
805
806
807 this.sync();
808 }
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833 public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
834 final long now)
835 throws IOException {
836 if (edits.isEmpty()) return;
837
838 byte[] regionName = info.getRegionName();
839 if (this.closed) {
840 throw new IOException("Cannot append; log is closed");
841 }
842 synchronized (this.updateLock) {
843 long seqNum = obtainSeqNum();
844
845
846
847
848
849 this.lastSeqWritten.putIfAbsent(regionName, seqNum);
850 HLogKey logKey = makeKey(regionName, tableName, seqNum, now);
851 doWrite(info, logKey, edits);
852 this.numEntries.incrementAndGet();
853 }
854
855
856 if (info.isMetaRegion() || !info.getTableDesc().isDeferredLogFlush()) {
857
858 this.sync();
859 }
860 }
861
862
863
864
865
866 class LogSyncer extends Thread {
867
868 private final long optionalFlushInterval;
869
870 private boolean syncerShuttingDown = false;
871
872 LogSyncer(long optionalFlushInterval) {
873 this.optionalFlushInterval = optionalFlushInterval;
874 }
875
876 @Override
877 public void run() {
878 try {
879
880
881 while(!this.isInterrupted()) {
882
883 Thread.sleep(this.optionalFlushInterval);
884 sync();
885 }
886 } catch (IOException e) {
887 LOG.error("Error while syncing, requesting close of hlog ", e);
888 requestLogRoll();
889 } catch (InterruptedException e) {
890 LOG.debug(getName() + "interrupted while waiting for sync requests");
891 } finally {
892 syncerShuttingDown = true;
893 LOG.info(getName() + " exiting");
894 }
895 }
896 }
897
898 public void sync() throws IOException {
899 synchronized (this.updateLock) {
900 if (this.closed) {
901 return;
902 }
903 }
904 try {
905 long now = System.currentTimeMillis();
906
907 this.writer.sync();
908 synchronized (this.updateLock) {
909 syncTime += System.currentTimeMillis() - now;
910 syncOps++;
911 if (!logRollRequested) {
912 checkLowReplication();
913 if (this.writer.getLength() > this.logrollsize) {
914 requestLogRoll();
915 }
916 }
917 }
918
919 } catch (IOException e) {
920 LOG.fatal("Could not append. Requesting close of hlog", e);
921 requestLogRoll();
922 throw e;
923 }
924 }
925
926 private void checkLowReplication() {
927
928
929 try {
930 int numCurrentReplicas = getLogReplication();
931 if (numCurrentReplicas != 0 &&
932 numCurrentReplicas < this.initialReplication) {
933 LOG.warn("HDFS pipeline error detected. " +
934 "Found " + numCurrentReplicas + " replicas but expecting " +
935 this.initialReplication + " replicas. " +
936 " Requesting close of hlog.");
937 requestLogRoll();
938 logRollRequested = true;
939 }
940 } catch (Exception e) {
941 LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
942 " still proceeding ahead...");
943 }
944 }
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
960 if(this.getNumCurrentReplicas != null && this.hdfs_out != null) {
961 Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS);
962 if (repl instanceof Integer) {
963 return ((Integer)repl).intValue();
964 }
965 }
966 return 0;
967 }
968
969 boolean canGetCurReplicas() {
970 return this.getNumCurrentReplicas != null;
971 }
972
973 public void hsync() throws IOException {
974
975 sync();
976 }
977
978 private void requestLogRoll() {
979 if (this.listener != null) {
980 this.listener.logRollRequested();
981 }
982 }
983
984 protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
985 throws IOException {
986 if (!this.enabled) {
987 return;
988 }
989 if (!this.logEntryVisitors.isEmpty()) {
990 for (LogEntryVisitor visitor : this.logEntryVisitors) {
991 visitor.visitLogEntryBeforeWrite(info, logKey, logEdit);
992 }
993 }
994 try {
995 long now = System.currentTimeMillis();
996 this.writer.append(new HLog.Entry(logKey, logEdit));
997 long took = System.currentTimeMillis() - now;
998 writeTime += took;
999 writeOps++;
1000 if (took > 1000) {
1001 LOG.warn(Thread.currentThread().getName() + " took " + took +
1002 "ms appending an edit to hlog; editcount=" + this.numEntries.get());
1003 }
1004 } catch (IOException e) {
1005 LOG.fatal("Could not append. Requesting close of hlog", e);
1006 requestLogRoll();
1007 throw e;
1008 }
1009 }
1010
1011
1012 int getNumEntries() {
1013 return numEntries.get();
1014 }
1015
1016
1017
1018
1019 private long obtainSeqNum() {
1020 return this.logSeqNum.incrementAndGet();
1021 }
1022
1023
1024 int getNumLogFiles() {
1025 return outputfiles.size();
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 public long startCacheFlush() {
1042 this.cacheFlushLock.lock();
1043 return obtainSeqNum();
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 public void completeCacheFlush(final byte [] regionName, final byte [] tableName,
1057 final long logSeqId,
1058 final boolean isMetaRegion)
1059 throws IOException {
1060 try {
1061 if (this.closed) {
1062 return;
1063 }
1064 synchronized (updateLock) {
1065 long now = System.currentTimeMillis();
1066 WALEdit edit = completeCacheFlushLogEdit();
1067 HLogKey key = makeKey(regionName, tableName, logSeqId,
1068 System.currentTimeMillis());
1069 this.writer.append(new Entry(key, edit));
1070 writeTime += System.currentTimeMillis() - now;
1071 writeOps++;
1072 this.numEntries.incrementAndGet();
1073 Long seq = this.lastSeqWritten.get(regionName);
1074 if (seq != null && logSeqId >= seq.longValue()) {
1075 this.lastSeqWritten.remove(regionName);
1076 }
1077 }
1078
1079 this.sync();
1080
1081 } finally {
1082 this.cacheFlushLock.unlock();
1083 }
1084 }
1085
1086 private WALEdit completeCacheFlushLogEdit() {
1087 KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1088 System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1089 WALEdit e = new WALEdit();
1090 e.add(kv);
1091 return e;
1092 }
1093
1094
1095
1096
1097
1098
1099
1100 public void abortCacheFlush() {
1101 this.cacheFlushLock.unlock();
1102 }
1103
1104
1105
1106
1107
1108 public static boolean isMetaFamily(byte [] family) {
1109 return Bytes.equals(METAFAMILY, family);
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 public static List<Path> splitLog(final Path rootDir, final Path srcDir,
1127 Path oldLogDir, final FileSystem fs, final Configuration conf)
1128 throws IOException {
1129
1130 long millis = System.currentTimeMillis();
1131 List<Path> splits = null;
1132 if (!fs.exists(srcDir)) {
1133
1134 return splits;
1135 }
1136 FileStatus [] logfiles = fs.listStatus(srcDir);
1137 if (logfiles == null || logfiles.length == 0) {
1138
1139 return splits;
1140 }
1141 LOG.info("Splitting " + logfiles.length + " hlog(s) in " +
1142 srcDir.toString());
1143 splits = splitLog(rootDir, srcDir, oldLogDir, logfiles, fs, conf);
1144 try {
1145 FileStatus[] files = fs.listStatus(srcDir);
1146 for(FileStatus file : files) {
1147 Path newPath = getHLogArchivePath(oldLogDir, file.getPath());
1148 LOG.info("Moving " + FSUtils.getPath(file.getPath()) + " to " +
1149 FSUtils.getPath(newPath));
1150 fs.rename(file.getPath(), newPath);
1151 }
1152 LOG.debug("Moved " + files.length + " log files to " +
1153 FSUtils.getPath(oldLogDir));
1154 fs.delete(srcDir, true);
1155 } catch (IOException e) {
1156 e = RemoteExceptionHandler.checkIOException(e);
1157 IOException io = new IOException("Cannot delete: " + srcDir);
1158 io.initCause(e);
1159 throw io;
1160 }
1161 long endMillis = System.currentTimeMillis();
1162 LOG.info("hlog file splitting completed in " + (endMillis - millis) +
1163 " millis for " + srcDir.toString());
1164 return splits;
1165 }
1166
1167
1168 private final static class WriterAndPath {
1169 final Path p;
1170 final Writer w;
1171 WriterAndPath(final Path p, final Writer w) {
1172 this.p = p;
1173 this.w = w;
1174 }
1175 }
1176
1177 @SuppressWarnings("unchecked")
1178 public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1179 return (Class<? extends HLogKey>)
1180 conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1181 }
1182
1183 public static HLogKey newKey(Configuration conf) throws IOException {
1184 Class<? extends HLogKey> keyClass = getKeyClass(conf);
1185 try {
1186 return keyClass.newInstance();
1187 } catch (InstantiationException e) {
1188 throw new IOException("cannot create hlog key");
1189 } catch (IllegalAccessException e) {
1190 throw new IOException("cannot create hlog key");
1191 }
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 private static List<Path> splitLog(final Path rootDir, final Path srcDir,
1222 Path oldLogDir, final FileStatus[] logfiles, final FileSystem fs,
1223 final Configuration conf)
1224 throws IOException {
1225 List<Path> processedLogs = new ArrayList<Path>();
1226 List<Path> corruptedLogs = new ArrayList<Path>();
1227 final Map<byte [], WriterAndPath> logWriters =
1228 Collections.synchronizedMap(
1229 new TreeMap<byte [], WriterAndPath>(Bytes.BYTES_COMPARATOR));
1230 List<Path> splits = null;
1231
1232
1233
1234
1235 int logFilesPerStep = conf.getInt("hbase.hlog.split.batch.size", 3);
1236 boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", false);
1237
1238
1239 try {
1240 int i = -1;
1241 while (i < logfiles.length) {
1242 final Map<byte[], LinkedList<Entry>> editsByRegion =
1243 new TreeMap<byte[], LinkedList<Entry>>(Bytes.BYTES_COMPARATOR);
1244 for (int j = 0; j < logFilesPerStep; j++) {
1245 i++;
1246 if (i == logfiles.length) {
1247 break;
1248 }
1249 FileStatus log = logfiles[i];
1250 Path logPath = log.getPath();
1251 long logLength = log.getLen();
1252 LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length +
1253 ": " + logPath + ", length=" + logLength );
1254 try {
1255 recoverFileLease(fs, logPath, conf);
1256 parseHLog(log, editsByRegion, fs, conf);
1257 processedLogs.add(logPath);
1258 } catch (EOFException eof) {
1259
1260 LOG.info("EOF from hlog " + logPath + ". continuing");
1261 processedLogs.add(logPath);
1262 } catch (IOException e) {
1263 if (skipErrors) {
1264 LOG.warn("Got while parsing hlog " + logPath +
1265 ". Marking as corrupted", e);
1266 corruptedLogs.add(logPath);
1267 } else {
1268 throw e;
1269 }
1270 }
1271 }
1272 writeEditsBatchToRegions(editsByRegion, logWriters, rootDir, fs, conf);
1273 }
1274 if (fs.listStatus(srcDir).length > processedLogs.size() + corruptedLogs.size()) {
1275 throw new IOException("Discovered orphan hlog after split. Maybe " +
1276 "HRegionServer was not dead when we started");
1277 }
1278 archiveLogs(corruptedLogs, processedLogs, oldLogDir, fs, conf);
1279 } finally {
1280 splits = new ArrayList<Path>(logWriters.size());
1281 for (WriterAndPath wap : logWriters.values()) {
1282 wap.w.close();
1283 splits.add(wap.p);
1284 LOG.debug("Closed " + wap.p);
1285 }
1286 }
1287 return splits;
1288 }
1289
1290
1291
1292
1293
1294
1295 public static class Entry implements Writable {
1296 private WALEdit edit;
1297 private HLogKey key;
1298
1299 public Entry() {
1300 edit = new WALEdit();
1301 key = new HLogKey();
1302 }
1303
1304
1305
1306
1307
1308
1309 public Entry(HLogKey key, WALEdit edit) {
1310 super();
1311 this.key = key;
1312 this.edit = edit;
1313 }
1314
1315
1316
1317
1318 public WALEdit getEdit() {
1319 return edit;
1320 }
1321
1322
1323
1324
1325 public HLogKey getKey() {
1326 return key;
1327 }
1328
1329 @Override
1330 public String toString() {
1331 return this.key + "=" + this.edit;
1332 }
1333
1334 @Override
1335 public void write(DataOutput dataOutput) throws IOException {
1336 this.key.write(dataOutput);
1337 this.edit.write(dataOutput);
1338 }
1339
1340 @Override
1341 public void readFields(DataInput dataInput) throws IOException {
1342 this.key.readFields(dataInput);
1343 this.edit.readFields(dataInput);
1344 }
1345 }
1346
1347
1348
1349
1350
1351
1352
1353 public static String getHLogDirectoryName(HServerInfo info) {
1354 return getHLogDirectoryName(info.getServerName());
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364 public static String getHLogDirectoryName(String serverAddress,
1365 long startCode) {
1366 if (serverAddress == null || serverAddress.length() == 0) {
1367 return null;
1368 }
1369 return getHLogDirectoryName(
1370 HServerInfo.getServerName(serverAddress, startCode));
1371 }
1372
1373
1374
1375
1376
1377
1378
1379 public static String getHLogDirectoryName(String serverName) {
1380 StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1381 dirName.append("/");
1382 dirName.append(serverName);
1383 return dirName.toString();
1384 }
1385
1386 public static boolean validateHLogFilename(String filename) {
1387 return pattern.matcher(filename).matches();
1388 }
1389
1390 private static Path getHLogArchivePath(Path oldLogDir, Path p) {
1391 return new Path(oldLogDir, p.getName());
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404 private static void writeEditsBatchToRegions(
1405 final Map<byte[], LinkedList<Entry>> splitLogsMap,
1406 final Map<byte[], WriterAndPath> logWriters,
1407 final Path rootDir, final FileSystem fs, final Configuration conf)
1408 throws IOException {
1409
1410
1411 int logWriterThreads =
1412 conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
1413 boolean skipErrors = conf.getBoolean("hbase.skip.errors", false);
1414 HashMap<byte[], Future> writeFutureResult = new HashMap<byte[], Future>();
1415 NamingThreadFactory f = new NamingThreadFactory(
1416 "SplitWriter-%1$d", Executors.defaultThreadFactory());
1417 ThreadPoolExecutor threadPool = (ThreadPoolExecutor)Executors.newFixedThreadPool(logWriterThreads, f);
1418 for (final byte [] region : splitLogsMap.keySet()) {
1419 Callable splitter = createNewSplitter(rootDir, logWriters, splitLogsMap, region, fs, conf);
1420 writeFutureResult.put(region, threadPool.submit(splitter));
1421 }
1422
1423 threadPool.shutdown();
1424
1425 try {
1426 for (int j = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); j++) {
1427 String message = "Waiting for hlog writers to terminate, elapsed " + j * 5 + " seconds";
1428 if (j < 30) {
1429 LOG.debug(message);
1430 } else {
1431 LOG.info(message);
1432 }
1433
1434 }
1435 } catch(InterruptedException ex) {
1436 LOG.warn("Hlog writers were interrupted, possible data loss!");
1437 if (!skipErrors) {
1438 throw new IOException("Could not finish writing log entries", ex);
1439
1440 }
1441 }
1442
1443 for (Map.Entry<byte[], Future> entry : writeFutureResult.entrySet()) {
1444 try {
1445 entry.getValue().get();
1446 } catch (ExecutionException e) {
1447 throw (new IOException(e.getCause()));
1448 } catch (InterruptedException e1) {
1449 LOG.warn("Writer for region " + Bytes.toString(entry.getKey()) +
1450 " was interrupted, however the write process should have " +
1451 "finished. Throwing up ", e1);
1452 throw (new IOException(e1.getCause()));
1453 }
1454 }
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 private static void parseHLog(final FileStatus logfile,
1468 final Map<byte[], LinkedList<Entry>> splitLogsMap, final FileSystem fs,
1469 final Configuration conf)
1470 throws IOException {
1471
1472
1473
1474 long length = logfile.getLen();
1475 if (length <= 0) {
1476 LOG.warn("File " + logfile.getPath() + " might be still open, length is 0");
1477 }
1478 Path path = logfile.getPath();
1479 Reader in;
1480 int editsCount = 0;
1481 try {
1482 in = HLog.getReader(fs, path, conf);
1483 } catch (EOFException e) {
1484 if (length <= 0) {
1485
1486
1487
1488
1489 LOG.warn("Could not open " + path + " for reading. File is empty" + e);
1490 return;
1491 } else {
1492 throw e;
1493 }
1494 }
1495 try {
1496 Entry entry;
1497 while ((entry = in.next()) != null) {
1498 byte[] region = entry.getKey().getRegionName();
1499 LinkedList<Entry> queue = splitLogsMap.get(region);
1500 if (queue == null) {
1501 queue = new LinkedList<Entry>();
1502 splitLogsMap.put(region, queue);
1503 }
1504 queue.addLast(entry);
1505 editsCount++;
1506 }
1507 } finally {
1508 LOG.debug("Pushed=" + editsCount + " entries from " + path);
1509 try {
1510 if (in != null) {
1511 in.close();
1512 }
1513 } catch (IOException e) {
1514 LOG.warn("Close log reader in finally threw exception -- continuing", e);
1515 }
1516 }
1517 }
1518
1519 private static Callable<Void> createNewSplitter(final Path rootDir,
1520 final Map<byte[], WriterAndPath> logWriters,
1521 final Map<byte[], LinkedList<Entry>> logEntries,
1522 final byte[] region, final FileSystem fs, final Configuration conf) {
1523 return new Callable<Void>() {
1524 public String getName() {
1525 return "Split writer thread for region " + Bytes.toStringBinary(region);
1526 }
1527
1528 @Override
1529 public Void call() throws IOException {
1530 LinkedList<Entry> entries = logEntries.get(region);
1531 LOG.debug(this.getName()+" got " + entries.size() + " to process");
1532 long threadTime = System.currentTimeMillis();
1533 try {
1534 int editsCount = 0;
1535 WriterAndPath wap = logWriters.get(region);
1536 for (Entry logEntry: entries) {
1537 if (wap == null) {
1538 Path regionedits = getRegionSplitEditsPath(fs, logEntry, rootDir);
1539 if (fs.exists(regionedits)) {
1540 LOG.warn("Found existing old edits file. It could be the " +
1541 "result of a previous failed split attempt. Deleting " +
1542 regionedits + ", length=" + fs.getFileStatus(regionedits).getLen());
1543 if (!fs.delete(regionedits, false)) {
1544 LOG.warn("Failed delete of old " + regionedits);
1545 }
1546 }
1547 Writer w = createWriter(fs, regionedits, conf);
1548 wap = new WriterAndPath(regionedits, w);
1549 logWriters.put(region, wap);
1550 LOG.debug("Creating writer path=" + regionedits +
1551 " region=" + Bytes.toStringBinary(region));
1552 }
1553 wap.w.append(logEntry);
1554 editsCount++;
1555 }
1556 LOG.debug(this.getName() + " Applied " + editsCount +
1557 " total edits to " + Bytes.toStringBinary(region) +
1558 " in " + (System.currentTimeMillis() - threadTime) + "ms");
1559 } catch (IOException e) {
1560 e = RemoteExceptionHandler.checkIOException(e);
1561 LOG.fatal(this.getName() + " Got while writing log entry to log", e);
1562 throw e;
1563 }
1564 return null;
1565 }
1566 };
1567 }
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 private static void archiveLogs(final List<Path> corruptedLogs,
1582 final List<Path> processedLogs, final Path oldLogDir,
1583 final FileSystem fs, final Configuration conf)
1584 throws IOException{
1585 final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR),
1586 conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"));
1587
1588 fs.mkdirs(corruptDir);
1589 fs.mkdirs(oldLogDir);
1590
1591 for (Path corrupted: corruptedLogs) {
1592 Path p = new Path(corruptDir, corrupted.getName());
1593 LOG.info("Moving corrupted log " + corrupted + " to " + p);
1594 fs.rename(corrupted, p);
1595 }
1596
1597 for (Path p: processedLogs) {
1598 Path newPath = getHLogArchivePath(oldLogDir, p);
1599 fs.rename(p, newPath);
1600 LOG.info("Archived processed log " + p + " to " + newPath);
1601 }
1602 }
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 private static Path getRegionSplitEditsPath(final FileSystem fs,
1617 final Entry logEntry, final Path rootDir)
1618 throws IOException {
1619 Path tableDir = HTableDescriptor.getTableDir(rootDir,
1620 logEntry.getKey().getTablename());
1621 Path regiondir = HRegion.getRegionDir(tableDir,
1622 HRegionInfo.encodeRegionName(logEntry.getKey().getRegionName()));
1623 Path dir = getRegionDirRecoveredEditsDir(regiondir);
1624 if (!fs.exists(dir)) {
1625 if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir);
1626 }
1627 return new Path(dir,
1628 formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum()));
1629 }
1630
1631 static String formatRecoveredEditsFileName(final long seqid) {
1632 return String.format("%019d", seqid);
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1644 final Path regiondir)
1645 throws IOException {
1646 Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1647 FileStatus [] files = fs.listStatus(editsdir, new PathFilter () {
1648 @Override
1649 public boolean accept(Path p) {
1650 boolean result = false;
1651 try {
1652
1653
1654
1655
1656 Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1657 result = fs.isFile(p) && m.matches();
1658 } catch (IOException e) {
1659 LOG.warn("Failed isFile check on " + p);
1660 }
1661 return result;
1662 }
1663 });
1664 NavigableSet<Path> filesSorted = new TreeSet<Path>();
1665 if (files == null) return filesSorted;
1666 for (FileStatus status: files) {
1667 filesSorted.add(status.getPath());
1668 }
1669 return filesSorted;
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679 public static Path moveAsideBadEditsFile(final FileSystem fs,
1680 final Path edits)
1681 throws IOException {
1682 Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1683 System.currentTimeMillis());
1684 if (!fs.rename(edits, moveAsideName)) {
1685 LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1686 }
1687 return moveAsideName;
1688 }
1689
1690
1691
1692
1693
1694
1695 public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1696 return new Path(regiondir, RECOVERED_EDITS_DIR);
1697 }
1698
1699
1700
1701
1702
1703 public void addLogEntryVisitor(LogEntryVisitor visitor) {
1704 this.logEntryVisitors.add(visitor);
1705 }
1706
1707
1708
1709
1710
1711 public void removeLogEntryVisitor(LogEntryVisitor visitor) {
1712 this.logEntryVisitors.remove(visitor);
1713 }
1714
1715
1716 public void addLogActionsListerner(LogActionsListener list) {
1717 LOG.info("Adding a listener");
1718 this.actionListeners.add(list);
1719 }
1720
1721 public boolean removeLogActionsListener(LogActionsListener list) {
1722 return this.actionListeners.remove(list);
1723 }
1724
1725 public static final long FIXED_OVERHEAD = ClassSize.align(
1726 ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1727 ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1728
1729 private static void usage() {
1730 System.err.println("Usage: HLog <ARGS>");
1731 System.err.println("Arguments:");
1732 System.err.println(" --dump Dump textual representation of passed one or more files");
1733 System.err.println(" For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1734 System.err.println(" --split Split the passed directory of WAL logs");
1735 System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1736 }
1737
1738 private static void dump(final Configuration conf, final Path p)
1739 throws IOException {
1740 FileSystem fs = FileSystem.get(conf);
1741 if (!fs.exists(p)) {
1742 throw new FileNotFoundException(p.toString());
1743 }
1744 if (!fs.isFile(p)) {
1745 throw new IOException(p + " is not a file");
1746 }
1747 Reader log = getReader(fs, p, conf);
1748 try {
1749 int count = 0;
1750 HLog.Entry entry;
1751 while ((entry = log.next()) != null) {
1752 System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
1753 entry.toString());
1754 count++;
1755 }
1756 } finally {
1757 log.close();
1758 }
1759 }
1760
1761 private static void split(final Configuration conf, final Path p)
1762 throws IOException {
1763 FileSystem fs = FileSystem.get(conf);
1764 if (!fs.exists(p)) {
1765 throw new FileNotFoundException(p.toString());
1766 }
1767 final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1768 final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1769 if (!fs.getFileStatus(p).isDir()) {
1770 throw new IOException(p + " is not a directory");
1771 }
1772 splitLog(baseDir, p, oldLogDir, fs, conf);
1773 }
1774
1775
1776
1777
1778
1779
1780
1781
1782 public static void main(String[] args) throws IOException {
1783 if (args.length < 2) {
1784 usage();
1785 System.exit(-1);
1786 }
1787 boolean dump = true;
1788 if (args[0].compareTo("--dump") != 0) {
1789 if (args[0].compareTo("--split") == 0) {
1790 dump = false;
1791
1792 } else {
1793 usage();
1794 System.exit(-1);
1795 }
1796 }
1797 Configuration conf = HBaseConfiguration.create();
1798 for (int i = 1; i < args.length; i++) {
1799 Path logPath = new Path(args[i]);
1800 try {
1801 if (dump) {
1802 dump(conf, logPath);
1803 } else {
1804 split(conf, logPath);
1805 }
1806 } catch (Throwable t) {
1807 t.printStackTrace(System.err);
1808 System.exit(-1);
1809 }
1810 }
1811 }
1812 }