1   /**
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.FileNotFoundException;
29  import java.io.IOException;
30  import java.util.ArrayList;
31  import java.util.Collections;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.NavigableSet;
35  import java.util.concurrent.atomic.AtomicBoolean;
36  import java.util.concurrent.atomic.AtomicLong;
37  
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.conf.Configuration;
41  import org.apache.hadoop.fs.FSDataInputStream;
42  import org.apache.hadoop.fs.FSDataOutputStream;
43  import org.apache.hadoop.fs.FileStatus;
44  import org.apache.hadoop.fs.FileSystem;
45  import org.apache.hadoop.fs.FileUtil;
46  import org.apache.hadoop.fs.Path;
47  import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
48  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
49  import org.apache.hadoop.hbase.HBaseTestingUtility;
50  import org.apache.hadoop.hbase.HColumnDescriptor;
51  import org.apache.hadoop.hbase.HConstants;
52  import org.apache.hadoop.hbase.HRegionInfo;
53  import org.apache.hadoop.hbase.HTableDescriptor;
54  import org.apache.hadoop.hbase.KeyValue;
55  import org.apache.hadoop.hbase.regionserver.HRegion;
56  import org.apache.hadoop.hbase.util.Bytes;
57  import org.apache.hadoop.hbase.util.CancelableProgressable;
58  import org.apache.hadoop.hbase.util.Threads;
59  import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
60  import org.apache.hadoop.ipc.RemoteException;
61  import org.junit.After;
62  import org.junit.AfterClass;
63  import org.junit.Assert;
64  import org.junit.Before;
65  import org.junit.BeforeClass;
66  import org.junit.Ignore;
67  import org.junit.Test;
68  import org.mockito.Mockito;
69  import org.mockito.invocation.InvocationOnMock;
70  import org.mockito.stubbing.Answer;
71  
72  import com.google.common.base.Joiner;
73  import com.google.common.collect.ImmutableList;
74  
75  /**
76   * Testing {@link HLog} splitting code.
77   */
78  public class TestHLogSplit {
79  
80    private final static Log LOG = LogFactory.getLog(TestHLogSplit.class);
81  
82    private Configuration conf;
83    private FileSystem fs;
84  
85    private final static HBaseTestingUtility
86            TEST_UTIL = new HBaseTestingUtility();
87  
88  
89    private static final Path hbaseDir = new Path("/hbase");
90    private static final Path hlogDir = new Path(hbaseDir, "hlog");
91    private static final Path oldLogDir = new Path(hbaseDir, "hlog.old");
92    private static final Path corruptDir = new Path(hbaseDir, ".corrupt");
93  
94    private static final int NUM_WRITERS = 10;
95    private static final int ENTRIES = 10; // entries per writer per region
96  
97    private HLog.Writer[] writer = new HLog.Writer[NUM_WRITERS];
98    private long seq = 0;
99    private static final byte[] TABLE_NAME = "t1".getBytes();
100   private static final byte[] FAMILY = "f1".getBytes();
101   private static final byte[] QUALIFIER = "q1".getBytes();
102   private static final byte[] VALUE = "v1".getBytes();
103   private static final String HLOG_FILE_PREFIX = "hlog.dat.";
104   private static List<String> regions;
105   private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
106   private static final Path tabledir =
107       new Path(hbaseDir, Bytes.toString(TABLE_NAME));
108 
109   static enum Corruptions {
110     INSERT_GARBAGE_ON_FIRST_LINE,
111     INSERT_GARBAGE_IN_THE_MIDDLE,
112     APPEND_GARBAGE,
113     TRUNCATE,
114   }
115 
116   @BeforeClass
117   public static void setUpBeforeClass() throws Exception {
118     TEST_UTIL.getConfiguration().
119             setBoolean("dfs.support.append", true);
120     TEST_UTIL.getConfiguration().
121             setStrings("hbase.rootdir", hbaseDir.toString());
122     TEST_UTIL.getConfiguration().
123             setClass("hbase.regionserver.hlog.writer.impl",
124                 InstrumentedSequenceFileLogWriter.class, HLog.Writer.class);
125 
126     TEST_UTIL.startMiniDFSCluster(2);
127   }
128 
129   @AfterClass
130   public static void tearDownAfterClass() throws Exception {
131     TEST_UTIL.shutdownMiniDFSCluster();
132   }
133 
134   @Before
135   public void setUp() throws Exception {
136     flushToConsole("Cleaning up cluster for new test\n"
137         + "--------------------------");
138     conf = TEST_UTIL.getConfiguration();
139     fs = TEST_UTIL.getDFSCluster().getFileSystem();
140     FileStatus[] entries = fs.listStatus(new Path("/"));
141     flushToConsole("Num entries in /:" + entries.length);
142     for (FileStatus dir : entries){
143       assertTrue("Deleting " + dir.getPath(),
144           fs.delete(dir.getPath(), true));
145     }
146     // create the HLog directory because recursive log creates are not allowed
147     fs.mkdirs(hlogDir);
148     seq = 0;
149     regions = new ArrayList<String>();
150     Collections.addAll(regions, "bbb", "ccc");
151     InstrumentedSequenceFileLogWriter.activateFailure = false;
152   }
153 
154   @After
155   public void tearDown() throws Exception {
156   }
157 
158   /**
159    * @throws IOException
160    * @see https://issues.apache.org/jira/browse/HBASE-3020
161    */
162   @Test 
163   public void testRecoveredEditsPathForMeta() throws IOException {
164     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
165     byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
166     Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
167     Path regiondir = new Path(tdir,
168         HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
169     fs.mkdirs(regiondir);
170     long now = System.currentTimeMillis();
171     HLog.Entry entry =
172         new HLog.Entry(new HLogKey(encoded,
173             HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
174       new WALEdit());
175     Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true);
176     String parentOfParent = p.getParent().getParent().getName();
177     assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
178   }
179 
180   @Test(expected = OrphanHLogAfterSplitException.class)
181   public void testSplitFailsIfNewHLogGetsCreatedAfterSplitStarted()
182   throws IOException {
183     AtomicBoolean stop = new AtomicBoolean(false);
184 
185     assertFalse("Previous test should clean up table dir",
186       fs.exists(new Path("/hbase/t1")));
187 
188     generateHLogs(-1);
189 
190     try {
191     (new ZombieNewLogWriterRegionServer(stop)).start();
192     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
193         hbaseDir, hlogDir, oldLogDir, fs);
194     logSplitter.splitLog();
195     } finally {
196       stop.set(true);
197     }
198   }
199 
200   @Test
201   public void testSplitPreservesEdits() throws IOException{
202     final String REGION = "region__1";
203     regions.removeAll(regions);
204     regions.add(REGION);
205 
206     generateHLogs(1, 10, -1);
207     fs.initialize(fs.getUri(), conf);
208     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
209       hbaseDir, hlogDir, oldLogDir, fs);
210     logSplitter.splitLog();
211 
212     Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
213     Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
214 
215     assertEquals("edits differ after split", true, logsAreEqual(originalLog, splitLog));
216   }
217 
218 
219   @Test
220   public void testEmptyLogFiles() throws IOException {
221 
222     injectEmptyFile(".empty", true);
223     generateHLogs(Integer.MAX_VALUE);
224     injectEmptyFile("empty", true);
225 
226     // make fs act as a different client now
227     // initialize will create a new DFSClient with a new client ID
228     fs.initialize(fs.getUri(), conf);
229 
230     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
231         hbaseDir, hlogDir, oldLogDir, fs);
232     logSplitter.splitLog();
233 
234 
235     for (String region : regions) {
236       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
237       assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
238     }
239 
240   }
241 
242 
243   @Test
244   public void testEmptyOpenLogFiles() throws IOException {
245     injectEmptyFile(".empty", false);
246     generateHLogs(Integer.MAX_VALUE);
247     injectEmptyFile("empty", false);
248 
249     // make fs act as a different client now
250     // initialize will create a new DFSClient with a new client ID
251     fs.initialize(fs.getUri(), conf);
252 
253     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
254         hbaseDir, hlogDir, oldLogDir, fs);
255     logSplitter.splitLog();
256 
257     for (String region : regions) {
258       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
259       assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
260     }
261   }
262 
263   @Test
264   public void testOpenZeroLengthReportedFileButWithDataGetsSplit() throws IOException {
265     // generate logs but leave hlog.dat.5 open.
266     generateHLogs(5);
267 
268     fs.initialize(fs.getUri(), conf);
269 
270     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
271         hbaseDir, hlogDir, oldLogDir, fs);
272     logSplitter.splitLog();
273 
274     for (String region : regions) {
275       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
276       assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
277     }
278 
279 
280   }
281 
282 
283   @Test
284   public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOException {
285     conf.setBoolean(HBASE_SKIP_ERRORS, true);
286     generateHLogs(Integer.MAX_VALUE);
287     corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
288             Corruptions.APPEND_GARBAGE, true, fs);
289     fs.initialize(fs.getUri(), conf);
290 
291     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
292         hbaseDir, hlogDir, oldLogDir, fs);
293     logSplitter.splitLog();
294     for (String region : regions) {
295       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
296       assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
297     }
298 
299 
300   }
301 
302   @Test
303   public void testFirstLineCorruptionLogFileSkipErrorsPasses() throws IOException {
304     conf.setBoolean(HBASE_SKIP_ERRORS, true);
305     generateHLogs(Integer.MAX_VALUE);
306     corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
307             Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
308     fs.initialize(fs.getUri(), conf);
309 
310     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
311         hbaseDir, hlogDir, oldLogDir, fs);
312     logSplitter.splitLog();
313     for (String region : regions) {
314       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
315       assertEquals((NUM_WRITERS - 1) * ENTRIES, countHLog(logfile, fs, conf));
316     }
317 
318 
319   }
320 
321 
322   @Test
323   public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOException {
324     conf.setBoolean(HBASE_SKIP_ERRORS, true);
325     generateHLogs(Integer.MAX_VALUE);
326     corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"),
327             Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs);
328     fs.initialize(fs.getUri(), conf);
329     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
330         hbaseDir, hlogDir, oldLogDir, fs);
331     logSplitter.splitLog();
332 
333     for (String region : regions) {
334       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
335       // the entries in the original logs are alternating regions
336       // considering the sequence file header, the middle corruption should
337       // affect at least half of the entries
338       int goodEntries = (NUM_WRITERS - 1) * ENTRIES;
339       int firstHalfEntries = (int) Math.ceil(ENTRIES / 2) - 1;
340       assertTrue("The file up to the corrupted area hasn't been parsed",
341               goodEntries + firstHalfEntries <= countHLog(logfile, fs, conf));
342     }
343   }
344 
345   @Test
346   public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException {
347     conf.setBoolean(HBASE_SKIP_ERRORS, true);
348     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
349         Reader.class);
350     InstrumentedSequenceFileLogWriter.activateFailure = false;
351     HLog.resetLogReaderClass();
352 
353     try {
354     Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
355       conf.setClass("hbase.regionserver.hlog.reader.impl",
356           FaultySequenceFileLogReader.class, HLog.Reader.class);
357       for (FaultySequenceFileLogReader.FailureType  failureType : FaultySequenceFileLogReader.FailureType.values()) {
358         conf.set("faultysequencefilelogreader.failuretype", failureType.name());
359         generateHLogs(1, ENTRIES, -1);
360         fs.initialize(fs.getUri(), conf);
361         HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
362             hbaseDir, hlogDir, oldLogDir, fs);
363         logSplitter.splitLog();
364         FileStatus[] archivedLogs = fs.listStatus(corruptDir);
365         assertEquals("expected a different file", c1.getName(), archivedLogs[0]
366             .getPath().getName());
367         assertEquals(archivedLogs.length, 1);
368         fs.delete(new Path(oldLogDir, HLOG_FILE_PREFIX + "0"), false);
369       }
370     } finally {
371       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
372           Reader.class);
373       HLog.resetLogReaderClass();
374     }
375   }
376 
377   @Test(expected = IOException.class)
378   public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows()
379       throws IOException {
380     conf.setBoolean(HBASE_SKIP_ERRORS, false);
381     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
382         Reader.class);
383     InstrumentedSequenceFileLogWriter.activateFailure = false;
384     HLog.resetLogReaderClass();
385 
386     try {
387       conf.setClass("hbase.regionserver.hlog.reader.impl",
388           FaultySequenceFileLogReader.class, HLog.Reader.class);
389       conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
390       generateHLogs(Integer.MAX_VALUE);
391     fs.initialize(fs.getUri(), conf);
392     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
393         hbaseDir, hlogDir, oldLogDir, fs);
394     logSplitter.splitLog();
395     } finally {
396       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
397           Reader.class);
398       HLog.resetLogReaderClass();
399     }
400 
401   }
402 
403   @Test
404   public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs()
405       throws IOException {
406     conf.setBoolean(HBASE_SKIP_ERRORS, false);
407     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
408         Reader.class);
409     InstrumentedSequenceFileLogWriter.activateFailure = false;
410     HLog.resetLogReaderClass();
411 
412     try {
413       conf.setClass("hbase.regionserver.hlog.reader.impl",
414           FaultySequenceFileLogReader.class, HLog.Reader.class);
415       conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name());
416       generateHLogs(-1);
417       fs.initialize(fs.getUri(), conf);
418       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
419           hbaseDir, hlogDir, oldLogDir, fs);
420       try {
421         logSplitter.splitLog();
422       } catch (IOException e) {
423         assertEquals(
424             "if skip.errors is false all files should remain in place",
425             NUM_WRITERS, fs.listStatus(hlogDir).length);
426       }
427     } finally {
428       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
429           Reader.class);
430       HLog.resetLogReaderClass();
431     }
432 
433   }
434 
435   @Test
436   public void testEOFisIgnored() throws IOException {
437     conf.setBoolean(HBASE_SKIP_ERRORS, false);
438 
439     final String REGION = "region__1";
440     regions.removeAll(regions);
441     regions.add(REGION);
442 
443     int entryCount = 10;
444     Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
445     generateHLogs(1, entryCount, -1);
446     corruptHLog(c1, Corruptions.TRUNCATE, true, fs);
447 
448     fs.initialize(fs.getUri(), conf);
449     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
450         hbaseDir, hlogDir, oldLogDir, fs);
451     logSplitter.splitLog();
452 
453     Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
454     Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
455 
456     int actualCount = 0;
457     HLog.Reader in = HLog.getReader(fs, splitLog, conf);
458     HLog.Entry entry;
459     while ((entry = in.next()) != null) ++actualCount;
460     assertEquals(entryCount-1, actualCount);
461 
462     // should not have stored the EOF files as corrupt
463     FileStatus[] archivedLogs = fs.listStatus(corruptDir);
464     assertEquals(archivedLogs.length, 0);
465   }
466 
467   @Test
468   public void testLogsGetArchivedAfterSplit() throws IOException {
469     conf.setBoolean(HBASE_SKIP_ERRORS, false);
470 
471     generateHLogs(-1);
472 
473     fs.initialize(fs.getUri(), conf);
474     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
475         hbaseDir, hlogDir, oldLogDir, fs);
476     logSplitter.splitLog();
477 
478     FileStatus[] archivedLogs = fs.listStatus(oldLogDir);
479 
480     assertEquals("wrong number of files in the archive log", NUM_WRITERS, archivedLogs.length);
481   }
482 
483   @Test
484   public void testSplit() throws IOException {
485     generateHLogs(-1);
486     fs.initialize(fs.getUri(), conf);
487     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
488         hbaseDir, hlogDir, oldLogDir, fs);
489     logSplitter.splitLog();
490 
491     for (String region : regions) {
492       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, region);
493       assertEquals(NUM_WRITERS * ENTRIES, countHLog(logfile, fs, conf));
494 
495     }
496   }
497 
498   @Test
499   public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit()
500   throws IOException {
501     generateHLogs(-1);
502     fs.initialize(fs.getUri(), conf);
503     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
504         hbaseDir, hlogDir, oldLogDir, fs);
505     logSplitter.splitLog();
506     FileStatus [] statuses = null;
507     try {
508       statuses = fs.listStatus(hlogDir);
509       if (statuses != null) {
510         Assert.fail("Files left in log dir: " +
511             Joiner.on(",").join(FileUtil.stat2Paths(statuses)));
512       }
513     } catch (FileNotFoundException e) {
514       // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null
515     }
516   }
517 /* DISABLED for now.  TODO: HBASE-2645
518   @Test
519   public void testLogCannotBeWrittenOnceParsed() throws IOException {
520     AtomicLong counter = new AtomicLong(0);
521     AtomicBoolean stop = new AtomicBoolean(false);
522     generateHLogs(9);
523     fs.initialize(fs.getUri(), conf);
524 
525     Thread zombie = new ZombieLastLogWriterRegionServer(writer[9], counter, stop);
526 
527 
528 
529     try {
530       zombie.start();
531 
532       HLog.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf);
533 
534       Path logfile = getLogForRegion(hbaseDir, TABLE_NAME, "juliet");
535 
536       // It's possible that the writer got an error while appending and didn't count it
537       // however the entry will in fact be written to file and split with the rest
538       long numberOfEditsInRegion = countHLog(logfile, fs, conf);
539       assertTrue("The log file could have at most 1 extra log entry, but " +
540               "can't have less. Zombie could write "+counter.get() +" and logfile had only"+ numberOfEditsInRegion+" "  + logfile, counter.get() == numberOfEditsInRegion ||
541                       counter.get() + 1 == numberOfEditsInRegion);
542     } finally {
543       stop.set(true);
544     }
545   }
546 */
547 
548   @Test
549   public void testSplitWillNotTouchLogsIfNewHLogGetsCreatedAfterSplitStarted()
550   throws IOException {
551     AtomicBoolean stop = new AtomicBoolean(false);
552     generateHLogs(-1);
553     fs.initialize(fs.getUri(), conf);
554     Thread zombie = new ZombieNewLogWriterRegionServer(stop);
555 
556     try {
557       zombie.start();
558       try {
559         HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
560             hbaseDir, hlogDir, oldLogDir, fs);
561         logSplitter.splitLog();
562       } catch (IOException ex) {/* expected */}
563       int logFilesNumber = fs.listStatus(hlogDir).length;
564 
565       assertEquals("Log files should not be archived if there's an extra file after split",
566               NUM_WRITERS + 1, logFilesNumber);
567     } finally {
568       stop.set(true);
569     }
570 
571   }
572 
573 
574 
575   @Test(expected = IOException.class)
576   public void testSplitWillFailIfWritingToRegionFails() throws Exception {
577     //leave 5th log open so we could append the "trap"
578     generateHLogs(4);
579 
580     fs.initialize(fs.getUri(), conf);
581 
582     String region = "break";
583     Path regiondir = new Path(tabledir, region);
584     fs.mkdirs(regiondir);
585 
586     InstrumentedSequenceFileLogWriter.activateFailure = false;
587     appendEntry(writer[4], TABLE_NAME, Bytes.toBytes(region),
588         ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
589     writer[4].close();
590 
591     try {
592       InstrumentedSequenceFileLogWriter.activateFailure = true;
593       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
594           hbaseDir, hlogDir, oldLogDir, fs);
595       logSplitter.splitLog();
596 
597     } catch (IOException e) {
598       assertEquals("This exception is instrumented and should only be thrown for testing", e.getMessage());
599       throw e;
600     } finally {
601       InstrumentedSequenceFileLogWriter.activateFailure = false;
602     }
603   }
604 
605 
606   // @Test TODO this test has been disabled since it was created!
607   // It currently fails because the second split doesn't output anything
608   // -- because there are no region dirs after we move aside the first
609   // split result
610   public void testSplittingLargeNumberOfRegionsConsistency() throws IOException {
611 
612     regions.removeAll(regions);
613     for (int i=0; i<100; i++) {
614       regions.add("region__"+i);
615     }
616 
617     generateHLogs(1, 100, -1);
618     fs.initialize(fs.getUri(), conf);
619 
620     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
621         hbaseDir, hlogDir, oldLogDir, fs);
622     logSplitter.splitLog();
623     fs.rename(oldLogDir, hlogDir);
624     Path firstSplitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME) + ".first");
625     Path splitPath = new Path(hbaseDir, Bytes.toString(TABLE_NAME));
626     fs.rename(splitPath,
627             firstSplitPath);
628 
629 
630     fs.initialize(fs.getUri(), conf);
631     logSplitter = HLogSplitter.createLogSplitter(conf,
632         hbaseDir, hlogDir, oldLogDir, fs);
633     logSplitter.splitLog();
634 
635     assertEquals(0, compareHLogSplitDirs(firstSplitPath, splitPath));
636   }
637 
638   @Test
639   public void testSplitDeletedRegion() throws IOException {
640     regions.removeAll(regions);
641     String region = "region_that_splits";
642     regions.add(region);
643 
644     generateHLogs(1);
645 
646     fs.initialize(fs.getUri(), conf);
647 
648     Path regiondir = new Path(tabledir, region);
649     fs.delete(regiondir, true);
650 
651     HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
652         hbaseDir, hlogDir, oldLogDir, fs);
653     logSplitter.splitLog();
654 
655     assertFalse(fs.exists(regiondir));
656   }
657 
658   @Test
659   public void testIOEOnOutputThread() throws Exception {
660     conf.setBoolean(HBASE_SKIP_ERRORS, false);
661 
662     generateHLogs(-1);
663 
664     fs.initialize(fs.getUri(), conf);
665     // Set up a splitter that will throw an IOE on the output side
666     HLogSplitter logSplitter = new HLogSplitter(
667         conf, hbaseDir, hlogDir, oldLogDir, fs) {
668       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
669       throws IOException {
670         HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
671         Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
672         return mockWriter;
673 
674       }
675     };
676     try {
677       logSplitter.splitLog();
678       fail("Didn't throw!");
679     } catch (IOException ioe) {
680       assertTrue(ioe.toString().contains("Injected"));
681     }
682   }
683 
684   // Test for HBASE-3412
685   @Test
686   public void testMovedHLogDuringRecovery() throws Exception {
687     generateHLogs(-1);
688 
689     fs.initialize(fs.getUri(), conf);
690 
691     // This partial mock will throw LEE for every file simulating
692     // files that were moved
693     FileSystem spiedFs = Mockito.spy(fs);
694     // The "File does not exist" part is very important,
695     // that's how it comes out of HDFS
696     Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).
697         when(spiedFs).append(Mockito.<Path>any());
698 
699     HLogSplitter logSplitter = new HLogSplitter(
700         conf, hbaseDir, hlogDir, oldLogDir, spiedFs);
701 
702     try {
703       logSplitter.splitLog();
704       assertEquals(NUM_WRITERS, fs.listStatus(oldLogDir).length);
705       assertFalse(fs.exists(hlogDir));
706     } catch (IOException e) {
707       fail("There shouldn't be any exception but: " + e.toString());
708     }
709   }
710 
711   /**
712    * Test log split process with fake data and lots of edits to trigger threading
713    * issues.
714    */
715   @Test
716   public void testThreading() throws Exception {
717     doTestThreading(20000, 128*1024*1024, 0);
718   }
719 
720   /**
721    * Test blocking behavior of the log split process if writers are writing slower
722    * than the reader is reading.
723    */
724   @Test
725   public void testThreadingSlowWriterSmallBuffer() throws Exception {
726     doTestThreading(200, 1024, 50);
727   }
728 
729   /**
730    * Sets up a log splitter with a mock reader and writer. The mock reader generates
731    * a specified number of edits spread across 5 regions. The mock writer optionally
732    * sleeps for each edit it is fed.
733    * *
734    * After the split is complete, verifies that the statistics show the correct number
735    * of edits output into each region.
736    *
737    * @param numFakeEdits number of fake edits to push through pipeline
738    * @param bufferSize size of in-memory buffer
739    * @param writerSlowness writer threads will sleep this many ms per edit
740    */
741   private void doTestThreading(final int numFakeEdits,
742       final int bufferSize,
743       final int writerSlowness) throws Exception {
744 
745     Configuration localConf = new Configuration(conf);
746     localConf.setInt("hbase.regionserver.hlog.splitlog.buffersize", bufferSize);
747 
748     // Create a fake log file (we'll override the reader to produce a stream of edits)
749     FSDataOutputStream out = fs.create(new Path(hlogDir, HLOG_FILE_PREFIX + ".fake"));
750     out.close();
751 
752     // Make region dirs for our destination regions so the output doesn't get skipped
753     final List<String> regions = ImmutableList.of("r0", "r1", "r2", "r3", "r4");
754     makeRegionDirs(fs, regions);
755 
756     // Create a splitter that reads and writes the data without touching disk
757     HLogSplitter logSplitter = new HLogSplitter(
758         localConf, hbaseDir, hlogDir, oldLogDir, fs) {
759 
760       /* Produce a mock writer that doesn't write anywhere */
761       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
762       throws IOException {
763         HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
764         Mockito.doAnswer(new Answer<Void>() {
765           int expectedIndex = 0;
766 
767           @Override
768           public Void answer(InvocationOnMock invocation) {
769             if (writerSlowness > 0) {
770               try {
771                 Thread.sleep(writerSlowness);
772               } catch (InterruptedException ie) {
773                 Thread.currentThread().interrupt();
774               }
775             }
776             HLog.Entry entry = (Entry) invocation.getArguments()[0];
777             WALEdit edit = entry.getEdit();
778             List<KeyValue> keyValues = edit.getKeyValues();
779             assertEquals(1, keyValues.size());
780             KeyValue kv = keyValues.get(0);
781 
782             // Check that the edits come in the right order.
783             assertEquals(expectedIndex, Bytes.toInt(kv.getRow()));
784             expectedIndex++;
785             return null;
786           }
787         }).when(mockWriter).append(Mockito.<HLog.Entry>any());
788         return mockWriter;
789       }
790 
791 
792       /* Produce a mock reader that generates fake entries */
793       protected Reader getReader(FileSystem fs, Path curLogFile, Configuration conf)
794       throws IOException {
795         Reader mockReader = Mockito.mock(Reader.class);
796         Mockito.doAnswer(new Answer<HLog.Entry>() {
797           int index = 0;
798 
799           @Override
800           public HLog.Entry answer(InvocationOnMock invocation) throws Throwable {
801             if (index >= numFakeEdits) return null;
802 
803             // Generate r0 through r4 in round robin fashion
804             int regionIdx = index % regions.size();
805             byte region[] = new byte[] {(byte)'r', (byte) (0x30 + regionIdx)};
806 
807             HLog.Entry ret = createTestEntry(TABLE_NAME, region,
808                 Bytes.toBytes((int)(index / regions.size())),
809                 FAMILY, QUALIFIER, VALUE, index);
810             index++;
811             return ret;
812           }
813         }).when(mockReader).next();
814         return mockReader;
815       }
816     };
817 
818     logSplitter.splitLog();
819 
820     // Verify number of written edits per region
821 
822     Map<byte[], Long> outputCounts = logSplitter.getOutputCounts();
823     for (Map.Entry<byte[], Long> entry : outputCounts.entrySet()) {
824       LOG.info("Got " + entry.getValue() + " output edits for region " +
825           Bytes.toString(entry.getKey()));
826 
827       assertEquals((long)entry.getValue(), numFakeEdits / regions.size());
828     }
829     assertEquals(regions.size(), outputCounts.size());
830   }
831 
832   // HBASE-2312: tests the case where a RegionServer enters a GC pause,
833   // comes back online after the master declared it dead and started to split.
834   // Want log rolling after a master split to fail
835   @Test
836   @Ignore("Need HADOOP-6886, HADOOP-6840, & HDFS-617 for this. HDFS 0.20.205.1+ should have this")
837   public void testLogRollAfterSplitStart() throws IOException {
838     // set flush interval to a large number so it doesn't interrupt us
839     final String F_INTERVAL = "hbase.regionserver.optionallogflushinterval";
840     long oldFlushInterval = conf.getLong(F_INTERVAL, 1000);
841     conf.setLong(F_INTERVAL, 1000*1000*100);
842     HLog log = null;
843     Path thisTestsDir = new Path(hbaseDir, "testLogRollAfterSplitStart");
844 
845     try {
846       // put some entries in an HLog
847       byte [] tableName = Bytes.toBytes(this.getClass().getName());
848       HRegionInfo regioninfo = new HRegionInfo(tableName,
849           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
850       log = new HLog(fs, thisTestsDir, oldLogDir, conf);
851       final int total = 20;
852       for (int i = 0; i < total; i++) {
853         WALEdit kvs = new WALEdit();
854         kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
855         HTableDescriptor htd = new HTableDescriptor(tableName);
856         htd.addFamily(new HColumnDescriptor("column"));
857         log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
858       }
859       // Send the data to HDFS datanodes and close the HDFS writer
860       log.sync();
861       log.cleanupCurrentWriter(log.getFilenum());
862 
863       /* code taken from ProcessServerShutdown.process()
864        * handles RS shutdowns (as observed by the Master)
865        */
866       // rename the directory so a rogue RS doesn't create more HLogs
867       Path rsSplitDir = new Path(thisTestsDir.getParent(),
868                                  thisTestsDir.getName() + "-splitting");
869       fs.rename(thisTestsDir, rsSplitDir);
870       LOG.debug("Renamed region directory: " + rsSplitDir);
871 
872       // Process the old log files
873       HLogSplitter splitter = HLogSplitter.createLogSplitter(conf,
874         hbaseDir, rsSplitDir, oldLogDir, fs);
875       splitter.splitLog();
876 
877       // Now, try to roll the HLog and verify failure
878       try {
879         log.rollWriter();
880         Assert.fail("rollWriter() did not throw any exception.");
881       } catch (IOException ioe) {
882         if (ioe.getCause().getMessage().contains("FileNotFound")) {
883           LOG.info("Got the expected exception: ", ioe.getCause());
884         } else {
885           Assert.fail("Unexpected exception: " + ioe);
886         }
887       }
888     } finally {
889       conf.setLong(F_INTERVAL, oldFlushInterval);
890       if (log != null) {
891         log.close();
892       }
893       if (fs.exists(thisTestsDir)) {
894         fs.delete(thisTestsDir, true);
895       }
896     }
897   }
898 
899   /**
900    * This thread will keep writing to the file after the split process has started
901    * It simulates a region server that was considered dead but woke up and wrote
902    * some more to he last log entry
903    */
904   class ZombieLastLogWriterRegionServer extends Thread {
905     AtomicLong editsCount;
906     AtomicBoolean stop;
907     Path log;
908     HLog.Writer lastLogWriter;
909     public ZombieLastLogWriterRegionServer(HLog.Writer writer, AtomicLong counter, AtomicBoolean stop) {
910       this.stop = stop;
911       this.editsCount = counter;
912       this.lastLogWriter = writer;
913     }
914 
915     @Override
916     public void run() {
917       if (stop.get()){
918         return;
919       }
920       flushToConsole("starting");
921       while (true) {
922         try {
923           String region = "juliet";
924 
925           fs.mkdirs(new Path(new Path(hbaseDir, region), region));
926           appendEntry(lastLogWriter, TABLE_NAME, region.getBytes(),
927                   ("r" + editsCount).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
928           lastLogWriter.sync();
929           editsCount.incrementAndGet();
930           try {
931             Thread.sleep(1);
932           } catch (InterruptedException e) {
933             //
934           }
935 
936 
937         } catch (IOException ex) {
938           if (ex instanceof RemoteException) {
939             flushToConsole("Juliet: got RemoteException " +
940                     ex.getMessage() + " while writing " + (editsCount.get() + 1));
941             break;
942           } else {
943             assertTrue("Failed to write " + editsCount.get(), false);
944           }
945 
946         }
947       }
948 
949 
950     }
951   }
952 
953   /**
954    * This thread will keep adding new log files
955    * It simulates a region server that was considered dead but woke up and wrote
956    * some more to a new hlog
957    */
958   class ZombieNewLogWriterRegionServer extends Thread {
959     AtomicBoolean stop;
960     public ZombieNewLogWriterRegionServer(AtomicBoolean stop) {
961       super("ZombieNewLogWriterRegionServer");
962       this.stop = stop;
963     }
964 
965     @Override
966     public void run() {
967       if (stop.get()) {
968         return;
969       }
970       Path tableDir = new Path(hbaseDir, new String(TABLE_NAME));
971       Path regionDir = new Path(tableDir, regions.get(0));
972       Path recoveredEdits = new Path(regionDir, HLogSplitter.RECOVERED_EDITS);
973       String region = "juliet";
974       Path julietLog = new Path(hlogDir, HLOG_FILE_PREFIX + ".juliet");
975       try {
976 
977         while (!fs.exists(recoveredEdits) && !stop.get()) {
978           flushToConsole("Juliet: split not started, sleeping a bit...");
979           Threads.sleep(10);
980         }
981  
982         fs.mkdirs(new Path(tableDir, region));
983         HLog.Writer writer = HLog.createWriter(fs,
984             julietLog, conf);
985         appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
986             ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
987         writer.close();
988         flushToConsole("Juliet file creator: created file " + julietLog);
989       } catch (IOException e1) {
990         assertTrue("Failed to create file " + julietLog, false);
991       }
992     }
993   }
994 
995   private CancelableProgressable reporter = new CancelableProgressable() {
996     int count = 0;
997 
998     @Override
999     public boolean progress() {
1000       count++;
1001       LOG.debug("progress = " + count);
1002       return true;
1003     }
1004   };
1005 
1006   @Test
1007   public void testSplitLogFileWithOneRegion() throws IOException {
1008     LOG.info("testSplitLogFileWithOneRegion");
1009     final String REGION = "region__1";
1010     regions.removeAll(regions);
1011     regions.add(REGION);
1012 
1013 
1014     generateHLogs(1, 10, -1);
1015     FileStatus logfile = fs.listStatus(hlogDir)[0];
1016     fs.initialize(fs.getUri(), conf);
1017     HLogSplitter.splitLogFileToTemp(hbaseDir, "tmpdir", logfile, fs,
1018         conf, reporter);
1019     HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
1020         logfile.getPath().toString(), conf);
1021 
1022 
1023     Path originalLog = (fs.listStatus(oldLogDir))[0].getPath();
1024     Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
1025 
1026 
1027     assertEquals(true, logsAreEqual(originalLog, splitLog));
1028   }
1029   
1030   @Test
1031   public void testSplitLogFileDeletedRegionDir()
1032   throws IOException {
1033 	LOG.info("testSplitLogFileDeletedRegionDir");
1034 	final String REGION = "region__1";
1035     regions.removeAll(regions);
1036     regions.add(REGION);
1037 
1038 
1039     generateHLogs(1, 10, -1);
1040     FileStatus logfile = fs.listStatus(hlogDir)[0];
1041     fs.initialize(fs.getUri(), conf);
1042     
1043     Path regiondir = new Path(tabledir, REGION);
1044     LOG.info("Region directory is" + regiondir);
1045     fs.delete(regiondir, true);
1046     
1047     HLogSplitter.splitLogFileToTemp(hbaseDir, "tmpdir", logfile, fs,
1048         conf, reporter);
1049     HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
1050         logfile.getPath().toString(), conf);
1051     
1052     assertTrue(!fs.exists(regiondir));
1053     assertTrue(true);
1054   }
1055 
1056   
1057   
1058   @Test
1059   public void testSplitLogFileEmpty() throws IOException {
1060     LOG.info("testSplitLogFileEmpty");
1061     injectEmptyFile(".empty", true);
1062     FileStatus logfile = fs.listStatus(hlogDir)[0];
1063 
1064     fs.initialize(fs.getUri(), conf);
1065 
1066     HLogSplitter.splitLogFileToTemp(hbaseDir, "tmpdir", logfile, fs,
1067         conf, reporter);
1068     HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
1069         logfile.getPath().toString(), conf);
1070     Path tdir = HTableDescriptor.getTableDir(hbaseDir, TABLE_NAME);
1071     assertFalse(fs.exists(tdir));
1072 
1073     assertEquals(0, countHLog(fs.listStatus(oldLogDir)[0].getPath(), fs, conf));
1074   }
1075 
1076   @Test
1077   public void testSplitLogFileMultipleRegions() throws IOException {
1078     LOG.info("testSplitLogFileMultipleRegions");
1079     generateHLogs(1, 10, -1);
1080     FileStatus logfile = fs.listStatus(hlogDir)[0];
1081     fs.initialize(fs.getUri(), conf);
1082 
1083     HLogSplitter.splitLogFileToTemp(hbaseDir, "tmpdir", logfile, fs,
1084         conf, reporter);
1085     HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
1086         logfile.getPath().toString(), conf);
1087     for (String region : regions) {
1088       Path recovered = getLogForRegion(hbaseDir, TABLE_NAME, region);
1089       assertEquals(10, countHLog(recovered, fs, conf));
1090     }
1091   }
1092 
1093   @Test
1094   public void testSplitLogFileFirstLineCorruptionLog()
1095   throws IOException {
1096     conf.setBoolean(HBASE_SKIP_ERRORS, true);
1097     generateHLogs(1, 10, -1);
1098     FileStatus logfile = fs.listStatus(hlogDir)[0];
1099 
1100     corruptHLog(logfile.getPath(),
1101         Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs);
1102 
1103     fs.initialize(fs.getUri(), conf);
1104     HLogSplitter.splitLogFileToTemp(hbaseDir, "tmpdir", logfile, fs,
1105         conf, reporter);
1106     HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
1107         logfile.getPath().toString(), conf);
1108 
1109     final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get(
1110         "hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt"));
1111     assertEquals(1, fs.listStatus(corruptDir).length);
1112   }
1113 
1114   /**
1115    * @throws IOException
1116    * @see https://issues.apache.org/jira/browse/HBASE-4862
1117    */
1118   @Test
1119   public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException {
1120     LOG.info("testConcurrentSplitLogAndReplayRecoverEdit");
1121     // Generate hlogs for our destination region
1122     String regionName = "r0";
1123     final Path regiondir = new Path(tabledir, regionName);
1124     regions = new ArrayList<String>();
1125     regions.add(regionName);
1126     generateHLogs(-1);
1127 
1128     HLogSplitter logSplitter = new HLogSplitter(
1129         conf, hbaseDir, hlogDir, oldLogDir, fs) {
1130       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
1131       throws IOException {
1132         HLog.Writer writer = HLog.createWriter(fs, logfile, conf);
1133         // After creating writer, simulate region's
1134         // replayRecoveredEditsIfAny() which gets SplitEditFiles of this
1135         // region and delete them, excluding files with '.temp' suffix.
1136         NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs,
1137             regiondir);
1138         if (files != null && !files.isEmpty()) {
1139           for (Path file : files) {
1140             if (!this.fs.delete(file, false)) {
1141               LOG.error("Failed delete of " + file);
1142             } else {
1143               LOG.debug("Deleted recovered.edits file=" + file);
1144             }
1145           }
1146         }
1147         return writer;
1148       }
1149     };
1150     try{
1151       logSplitter.splitLog();
1152     } catch (IOException e) {
1153       LOG.info(e);
1154       Assert.fail("Throws IOException when spliting "
1155           + "log, it is most likely because writing file does not "
1156           + "exist which is caused by concurrent replayRecoveredEditsIfAny()");
1157     }
1158     if (fs.exists(corruptDir)) {
1159       if (fs.listStatus(corruptDir).length > 0) {
1160         Assert.fail("There are some corrupt logs, "
1161                 + "it is most likely caused by concurrent replayRecoveredEditsIfAny()");
1162       }
1163     }
1164   }
1165 
1166   private void flushToConsole(String s) {
1167     System.out.println(s);
1168     System.out.flush();
1169   }
1170 
1171 
1172   private void generateHLogs(int leaveOpen) throws IOException {
1173     generateHLogs(NUM_WRITERS, ENTRIES, leaveOpen);
1174   }
1175 
1176   private void makeRegionDirs(FileSystem fs, List<String> regions) throws IOException {
1177     for (String region : regions) {
1178       flushToConsole("Creating dir for region " + region);
1179       fs.mkdirs(new Path(tabledir, region));
1180     }
1181   }
1182 
1183   private void generateHLogs(int writers, int entries, int leaveOpen) throws IOException {
1184     makeRegionDirs(fs, regions);
1185     fs.mkdirs(hlogDir);
1186     for (int i = 0; i < writers; i++) {
1187       writer[i] = HLog.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), conf);
1188       for (int j = 0; j < entries; j++) {
1189         int prefix = 0;
1190         for (String region : regions) {
1191           String row_key = region + prefix++ + i + j;
1192           appendEntry(writer[i], TABLE_NAME, region.getBytes(),
1193                   row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq);
1194         }
1195       }
1196       if (i != leaveOpen) {
1197         writer[i].close();
1198         flushToConsole("Closing writer " + i);
1199       }
1200     }
1201   }
1202 
1203   private Path getLogForRegion(Path rootdir, byte[] table, String region)
1204   throws IOException {
1205     Path tdir = HTableDescriptor.getTableDir(rootdir, table);
1206     Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
1207       Bytes.toString(region.getBytes())));
1208     FileStatus [] files = this.fs.listStatus(editsdir);
1209     assertEquals(1, files.length);
1210     return files[0].getPath();
1211   }
1212 
1213   private void corruptHLog(Path path, Corruptions corruption, boolean close,
1214                            FileSystem fs) throws IOException {
1215 
1216     FSDataOutputStream out;
1217     int fileSize = (int) fs.listStatus(path)[0].getLen();
1218 
1219     FSDataInputStream in = fs.open(path);
1220     byte[] corrupted_bytes = new byte[fileSize];
1221     in.readFully(0, corrupted_bytes, 0, fileSize);
1222     in.close();
1223 
1224     switch (corruption) {
1225       case APPEND_GARBAGE:
1226         out = fs.append(path);
1227         out.write("-----".getBytes());
1228         closeOrFlush(close, out);
1229         break;
1230 
1231       case INSERT_GARBAGE_ON_FIRST_LINE:
1232         fs.delete(path, false);
1233         out = fs.create(path);
1234         out.write(0);
1235         out.write(corrupted_bytes);
1236         closeOrFlush(close, out);
1237         break;
1238 
1239       case INSERT_GARBAGE_IN_THE_MIDDLE:
1240         fs.delete(path, false);
1241         out = fs.create(path);
1242         int middle = (int) Math.floor(corrupted_bytes.length / 2);
1243         out.write(corrupted_bytes, 0, middle);
1244         out.write(0);
1245         out.write(corrupted_bytes, middle, corrupted_bytes.length - middle);
1246         closeOrFlush(close, out);
1247         break;
1248 
1249       case TRUNCATE:
1250         fs.delete(path, false);
1251         out = fs.create(path);
1252         out.write(corrupted_bytes, 0, fileSize-32);
1253         closeOrFlush(close, out);
1254 
1255         break;
1256     }
1257 
1258 
1259   }
1260 
1261   private void closeOrFlush(boolean close, FSDataOutputStream out)
1262   throws IOException {
1263     if (close) {
1264       out.close();
1265     } else {
1266       out.sync();
1267       // Not in 0out.hflush();
1268     }
1269   }
1270 
1271   @SuppressWarnings("unused")
1272   private void dumpHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1273     HLog.Entry entry;
1274     HLog.Reader in = HLog.getReader(fs, log, conf);
1275     while ((entry = in.next()) != null) {
1276       System.out.println(entry);
1277     }
1278   }
1279 
1280   private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
1281     int count = 0;
1282     HLog.Reader in = HLog.getReader(fs, log, conf);
1283     while (in.next() != null) {
1284       count++;
1285     }
1286     return count;
1287   }
1288 
1289 
1290   public long appendEntry(HLog.Writer writer, byte[] table, byte[] region,
1291                           byte[] row, byte[] family, byte[] qualifier,
1292                           byte[] value, long seq)
1293           throws IOException {
1294 
1295     writer.append(createTestEntry(table, region, row, family, qualifier, value, seq));
1296     writer.sync();
1297     return seq;
1298   }
1299 
1300   private HLog.Entry createTestEntry(
1301       byte[] table, byte[] region,
1302       byte[] row, byte[] family, byte[] qualifier,
1303       byte[] value, long seq) {
1304     long time = System.nanoTime();
1305     WALEdit edit = new WALEdit();
1306     seq++;
1307     edit.add(new KeyValue(row, family, qualifier, time, KeyValue.Type.Put, value));
1308     return new HLog.Entry(new HLogKey(region, table, seq, time,
1309         HConstants.DEFAULT_CLUSTER_ID), edit);
1310   }
1311 
1312 
1313   private void injectEmptyFile(String suffix, boolean closeFile)
1314           throws IOException {
1315     HLog.Writer writer = HLog.createWriter(
1316             fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
1317     if (closeFile) writer.close();
1318   }
1319 
1320   @SuppressWarnings("unused")
1321   private void listLogs(FileSystem fs, Path dir) throws IOException {
1322     for (FileStatus file : fs.listStatus(dir)) {
1323       System.out.println(file.getPath());
1324     }
1325 
1326   }
1327 
1328   private int compareHLogSplitDirs(Path p1, Path p2) throws IOException {
1329     FileStatus[] f1 = fs.listStatus(p1);
1330     FileStatus[] f2 = fs.listStatus(p2);
1331     assertNotNull("Path " + p1 + " doesn't exist", f1);
1332     assertNotNull("Path " + p2 + " doesn't exist", f2);
1333 
1334     System.out.println("Files in " + p1 + ": " +
1335         Joiner.on(",").join(FileUtil.stat2Paths(f1)));
1336     System.out.println("Files in " + p2 + ": " +
1337         Joiner.on(",").join(FileUtil.stat2Paths(f2)));
1338     assertEquals(f1.length, f2.length);
1339 
1340     for (int i = 0; i < f1.length; i++) {
1341       // Regions now have a directory named RECOVERED_EDITS_DIR and in here
1342       // are split edit files. In below presume only 1.
1343       Path rd1 = HLog.getRegionDirRecoveredEditsDir(f1[i].getPath());
1344       FileStatus[] rd1fs = fs.listStatus(rd1);
1345       assertEquals(1, rd1fs.length);
1346       Path rd2 = HLog.getRegionDirRecoveredEditsDir(f2[i].getPath());
1347       FileStatus[] rd2fs = fs.listStatus(rd2);
1348       assertEquals(1, rd2fs.length);
1349       if (!logsAreEqual(rd1fs[0].getPath(), rd2fs[0].getPath())) {
1350         return -1;
1351       }
1352     }
1353     return 0;
1354   }
1355 
1356   private boolean logsAreEqual(Path p1, Path p2) throws IOException {
1357     HLog.Reader in1, in2;
1358     in1 = HLog.getReader(fs, p1, conf);
1359     in2 = HLog.getReader(fs, p2, conf);
1360     HLog.Entry entry1;
1361     HLog.Entry entry2;
1362     while ((entry1 = in1.next()) != null) {
1363       entry2 = in2.next();
1364       if ((entry1.getKey().compareTo(entry2.getKey()) != 0) ||
1365               (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) {
1366         return false;
1367       }
1368     }
1369     return true;
1370   }
1371 }