1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.master;
21  
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertNull;
25  import static org.junit.Assert.assertTrue;
26  
27  import java.io.FileNotFoundException;
28  import java.io.IOException;
29  import java.util.ArrayList;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.SortedMap;
33  import java.util.TreeMap;
34  
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FileSystem;
37  import org.apache.hadoop.fs.Path;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HColumnDescriptor;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.HTableDescriptor;
43  import org.apache.hadoop.hbase.KeyValue;
44  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
45  import org.apache.hadoop.hbase.Server;
46  import org.apache.hadoop.hbase.ServerName;
47  import org.apache.hadoop.hbase.TableDescriptors;
48  import org.apache.hadoop.hbase.TableExistsException;
49  import org.apache.hadoop.hbase.catalog.CatalogTracker;
50  import org.apache.hadoop.hbase.client.HConnection;
51  import org.apache.hadoop.hbase.client.HConnectionManager;
52  import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
53  import org.apache.hadoop.hbase.client.Result;
54  import org.apache.hadoop.hbase.executor.ExecutorService;
55  import org.apache.hadoop.hbase.io.Reference;
56  import org.apache.hadoop.hbase.ipc.HRegionInterface;
57  import org.apache.hadoop.hbase.regionserver.Store;
58  import org.apache.hadoop.hbase.util.Bytes;
59  import org.apache.hadoop.hbase.util.Writables;
60  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
61  import org.junit.Test;
62  import org.mockito.Mockito;
63  
64  public class TestCatalogJanitor {
65    /**
66     * Pseudo server for below tests.
67     * Be sure to call stop on the way out else could leave some mess around.
68     */
69    class MockServer implements Server {
70      private final HConnection connection;
71      private final Configuration c;
72      private final CatalogTracker ct;
73  
74      MockServer(final HBaseTestingUtility htu)
75      throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
76        this.c = htu.getConfiguration();
77        // Mock an HConnection and a HRegionInterface implementation.  Have the
78        // HConnection return the HRI.  Have the HRI return a few mocked up responses
79        // to make our test work.
80        this.connection = HConnectionTestingUtility.getMockedConnection(this.c);
81        // Set hbase.rootdir into test dir.
82        FileSystem fs = FileSystem.get(this.c);
83        Path rootdir = fs.makeQualified(new Path(this.c.get(HConstants.HBASE_DIR)));
84        this.c.set(HConstants.HBASE_DIR, rootdir.toString());
85        this.ct = Mockito.mock(CatalogTracker.class);
86        HRegionInterface hri = Mockito.mock(HRegionInterface.class);
87        Mockito.when(this.ct.getConnection()).thenReturn(this.connection);
88        Mockito.when(ct.waitForMetaServerConnectionDefault()).thenReturn(hri);
89      }
90  
91      @Override
92      public CatalogTracker getCatalogTracker() {
93        return this.ct;
94      }
95  
96      @Override
97      public Configuration getConfiguration() {
98        return this.c;
99      }
100 
101     @Override
102     public ServerName getServerName() {
103       return new ServerName("mockserver.example.org", 1234, -1L);
104     }
105 
106     @Override
107     public ZooKeeperWatcher getZooKeeper() {
108       return null;
109     }
110 
111     @Override
112     public void abort(String why, Throwable e) {
113       //no-op
114     }
115     
116     @Override
117     public boolean isAborted() {
118       return false;
119     }
120 
121     @Override
122     public boolean isStopped() {
123       return false;
124     }
125 
126     @Override
127     public void stop(String why) {
128       if (this.ct != null) {
129         this.ct.stop();
130       }
131       if (this.connection != null) {
132         HConnectionManager.deleteConnection(this.connection.getConfiguration(), true);
133       }
134     }
135   }
136 
137   /**
138    * Mock MasterServices for tests below.
139    */
140   class MockMasterServices implements MasterServices {
141     private final MasterFileSystem mfs;
142     private final AssignmentManager asm;
143 
144     MockMasterServices(final Server server) throws IOException {
145       this.mfs = new MasterFileSystem(server, this, null);
146       this.asm = Mockito.mock(AssignmentManager.class);
147     }
148 
149     @Override
150     public void checkTableModifiable(byte[] tableName) throws IOException {
151       //no-op
152     }
153 
154     @Override
155     public void createTable(HTableDescriptor desc, byte[][] splitKeys)
156         throws IOException {
157       // no-op
158     }
159 
160     @Override
161     public AssignmentManager getAssignmentManager() {
162       return this.asm;
163     }
164 
165     @Override
166     public ExecutorService getExecutorService() {
167       return null;
168     }
169 
170     @Override
171     public MasterFileSystem getMasterFileSystem() {
172       return this.mfs;
173     }
174 
175     @Override
176     public ServerManager getServerManager() {
177       return null;
178     }
179 
180     @Override
181     public ZooKeeperWatcher getZooKeeper() {
182       return null;
183     }
184 
185     @Override
186     public CatalogTracker getCatalogTracker() {
187       return null;
188     }
189 
190     @Override
191     public Configuration getConfiguration() {
192       return null;
193     }
194 
195     @Override
196     public ServerName getServerName() {
197       return null;
198     }
199 
200     @Override
201     public void abort(String why, Throwable e) {
202       //no-op
203     }
204     
205     @Override
206     public boolean isAborted() {
207       return false;
208     }
209 
210     @Override
211     public void stop(String why) {
212       //no-op
213     }
214 
215     @Override
216     public boolean isStopped() {
217       return false;
218     }
219 
220     @Override
221     public TableDescriptors getTableDescriptors() {
222       return new TableDescriptors() {
223         @Override
224         public HTableDescriptor remove(String tablename) throws IOException {
225           // TODO Auto-generated method stub
226           return null;
227         }
228         
229         @Override
230         public Map<String, HTableDescriptor> getAll() throws IOException {
231           // TODO Auto-generated method stub
232           return null;
233         }
234         
235         @Override
236         public HTableDescriptor get(byte[] tablename)
237         throws TableExistsException, FileNotFoundException, IOException {
238           return get(Bytes.toString(tablename));
239         }
240         
241         @Override
242         public HTableDescriptor get(String tablename)
243         throws TableExistsException, FileNotFoundException, IOException {
244           return createHTableDescriptor();
245         }
246         
247         @Override
248         public void add(HTableDescriptor htd) throws IOException {
249           // TODO Auto-generated method stub
250           
251         }
252       };
253     }
254   }
255 
256   @Test
257   public void testGetHRegionInfo() throws IOException {
258     assertNull(CatalogJanitor.getHRegionInfo(new Result()));
259     List<KeyValue> kvs = new ArrayList<KeyValue>();
260     Result r = new Result(kvs);
261     assertNull(CatalogJanitor.getHRegionInfo(r));
262     byte [] f = HConstants.CATALOG_FAMILY;
263     // Make a key value that doesn't have the expected qualifier.
264     kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
265       HConstants.SERVER_QUALIFIER, f));
266     r = new Result(kvs);
267     assertNull(CatalogJanitor.getHRegionInfo(r));
268     // Make a key that does not have a regioninfo value.
269     kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
270       HConstants.REGIONINFO_QUALIFIER, f));
271     HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
272     assertTrue(hri == null);
273     // OK, give it what it expects
274     kvs.clear();
275     kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
276       HConstants.REGIONINFO_QUALIFIER,
277       Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
278     hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
279     assertNotNull(hri);
280     assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
281   }
282 
283   @Test
284   public void testCleanParent() throws IOException, InterruptedException {
285     HBaseTestingUtility htu = new HBaseTestingUtility();
286     setRootDirAndCleanIt(htu, "testCleanParent");
287     Server server = new MockServer(htu);
288     try {
289       MasterServices services = new MockMasterServices(server);
290       CatalogJanitor janitor = new CatalogJanitor(server, services);
291       // Create regions.
292       HTableDescriptor htd = new HTableDescriptor("table");
293       htd.addFamily(new HColumnDescriptor("f"));
294       HRegionInfo parent =
295         new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
296             Bytes.toBytes("eee"));
297       HRegionInfo splita =
298         new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
299             Bytes.toBytes("ccc"));
300       HRegionInfo splitb =
301         new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
302             Bytes.toBytes("eee"));
303       // Test that when both daughter regions are in place, that we do not
304       // remove the parent.
305       List<KeyValue> kvs = new ArrayList<KeyValue>();
306       kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
307           HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
308       kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
309           HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
310       Result r = new Result(kvs);
311       // Add a reference under splitA directory so we don't clear out the parent.
312       Path rootdir = services.getMasterFileSystem().getRootDir();
313       Path tabledir =
314         HTableDescriptor.getTableDir(rootdir, htd.getName());
315       Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
316           htd.getColumnFamilies()[0].getName());
317       Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top);
318       long now = System.currentTimeMillis();
319       // Reference name has this format: StoreFile#REF_NAME_PARSER
320       Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
321       FileSystem fs = services.getMasterFileSystem().getFileSystem();
322       Path path = ref.write(fs, p);
323       assertTrue(fs.exists(path));
324       assertFalse(janitor.cleanParent(parent, r));
325       // Remove the reference file and try again.
326       assertTrue(fs.delete(p, true));
327       assertTrue(janitor.cleanParent(parent, r));
328     } finally {
329       server.stop("shutdown");
330     }
331   }
332 
333   /**
334    * Make sure parent gets cleaned up even if daughter is cleaned up before it.
335    * @throws IOException
336    * @throws InterruptedException 
337    */
338   @Test
339   public void testParentCleanedEvenIfDaughterGoneFirst()
340   throws IOException, InterruptedException {
341     HBaseTestingUtility htu = new HBaseTestingUtility();
342     setRootDirAndCleanIt(htu, "testParentCleanedEvenIfDaughterGoneFirst");
343     Server server = new MockServer(htu);
344     MasterServices services = new MockMasterServices(server);
345     CatalogJanitor janitor = new CatalogJanitor(server, services);
346     final HTableDescriptor htd = createHTableDescriptor();
347 
348     // Create regions: aaa->eee, aaa->ccc, aaa->bbb, bbb->ccc, etc.
349 
350     // Parent
351     HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
352       Bytes.toBytes("eee"));
353     // Sleep a second else the encoded name on these regions comes out
354     // same for all with same start key and made in same second.
355     Thread.sleep(1001);
356 
357     // Daughter a
358     HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
359       Bytes.toBytes("ccc"));
360     Thread.sleep(1001);
361     // Make daughters of daughter a; splitaa and splitab.
362     HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
363       Bytes.toBytes("bbb"));
364     HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"),
365       Bytes.toBytes("ccc"));
366 
367     // Daughter b
368     HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
369       Bytes.toBytes("eee"));
370     Thread.sleep(1001);
371     // Make Daughters of daughterb; splitba and splitbb.
372     HRegionInfo splitba = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
373       Bytes.toBytes("ddd"));
374     HRegionInfo splitbb = new HRegionInfo(htd.getName(), Bytes.toBytes("ddd"),
375       Bytes.toBytes("eee"));
376 
377     // First test that our Comparator works right up in CatalogJanitor.
378     // Just fo kicks.
379     SortedMap<HRegionInfo, Result> regions =
380       new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
381     // Now make sure that this regions map sorts as we expect it to.
382     regions.put(parent, createResult(parent, splita, splitb));
383     regions.put(splitb, createResult(splitb, splitba, splitbb));
384     regions.put(splita, createResult(splita, splitaa, splitab));
385     // Assert its properly sorted.
386     int index = 0;
387     for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
388       if (index == 0) {
389         assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
390       } else if (index == 1) {
391         assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
392       } else if (index == 2) {
393         assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
394       }
395       index++;
396     }
397 
398     // Now play around with the cleanParent function.  Create a ref from splita
399     // up to the parent.
400     Path splitaRef =
401       createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
402     // Make sure actual super parent sticks around because splita has a ref.
403     assertFalse(janitor.cleanParent(parent, regions.get(parent)));
404 
405     //splitba, and split bb, do not have dirs in fs.  That means that if
406     // we test splitb, it should get cleaned up.
407     assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
408 
409     // Now remove ref from splita to parent... so parent can be let go and so
410     // the daughter splita can be split (can't split if still references).
411     // BUT make the timing such that the daughter gets cleaned up before we
412     // can get a chance to let go of the parent.
413     FileSystem fs = FileSystem.get(htu.getConfiguration());
414     assertTrue(fs.delete(splitaRef, true));
415     // Create the refs from daughters of splita.
416     Path splitaaRef =
417       createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
418     Path splitabRef =
419       createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
420 
421     // Test splita.  It should stick around because references from splitab, etc.
422     assertFalse(janitor.cleanParent(splita, regions.get(splita)));
423 
424     // Now clean up parent daughter first.  Remove references from its daughters.
425     assertTrue(fs.delete(splitaaRef, true));
426     assertTrue(fs.delete(splitabRef, true));
427     assertTrue(janitor.cleanParent(splita, regions.get(splita)));
428 
429     // Super parent should get cleaned up now both splita and splitb are gone.
430     assertTrue(janitor.cleanParent(parent, regions.get(parent)));
431   }
432 
433   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
434       final String subdir)
435   throws IOException {
436     Path testdir = htu.getDataTestDir(subdir);
437     FileSystem fs = FileSystem.get(htu.getConfiguration());
438     if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
439     htu.getConfiguration().set(HConstants.HBASE_DIR, testdir.toString());
440     return htu.getConfiguration().get(HConstants.HBASE_DIR);
441   }
442 
443   /**
444    * @param services Master services instance.
445    * @param htd
446    * @param parent
447    * @param daughter
448    * @param midkey
449    * @param top True if we are to write a 'top' reference.
450    * @return Path to reference we created.
451    * @throws IOException
452    */
453   private Path createReferences(final MasterServices services,
454       final HTableDescriptor htd, final HRegionInfo parent,
455       final HRegionInfo daughter, final byte [] midkey, final boolean top)
456   throws IOException {
457     Path rootdir = services.getMasterFileSystem().getRootDir();
458     Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
459     Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(),
460       htd.getColumnFamilies()[0].getName());
461     Reference ref = new Reference(midkey,
462       top? Reference.Range.top: Reference.Range.bottom);
463     long now = System.currentTimeMillis();
464     // Reference name has this format: StoreFile#REF_NAME_PARSER
465     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
466     FileSystem fs = services.getMasterFileSystem().getFileSystem();
467     ref.write(fs, p);
468     return p;
469   }
470 
471   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
472       final HRegionInfo b)
473   throws IOException {
474     List<KeyValue> kvs = new ArrayList<KeyValue>();
475     kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
476       HConstants.SPLITA_QUALIFIER, Writables.getBytes(a)));
477     kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
478       HConstants.SPLITB_QUALIFIER, Writables.getBytes(b)));
479     return new Result(kvs);
480   }
481 
482   private HTableDescriptor createHTableDescriptor() {
483     HTableDescriptor htd = new HTableDescriptor("t");
484     htd.addFamily(new HColumnDescriptor("f"));
485     return htd;
486   }
487 }