1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNotSame;
24  import static org.junit.Assert.assertTrue;
25  
26  import java.io.IOException;
27  import java.util.List;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HConstants;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.MasterNotRunningException;
35  import org.apache.hadoop.hbase.MiniHBaseCluster;
36  import org.apache.hadoop.hbase.ServerName;
37  import org.apache.hadoop.hbase.UnknownRegionException;
38  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
39  import org.apache.hadoop.hbase.client.Delete;
40  import org.apache.hadoop.hbase.client.HBaseAdmin;
41  import org.apache.hadoop.hbase.client.HTable;
42  import org.apache.hadoop.hbase.executor.EventHandler.EventType;
43  import org.apache.hadoop.hbase.executor.RegionTransitionData;
44  import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
47  import org.apache.hadoop.hbase.util.Threads;
48  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
49  import org.apache.zookeeper.KeeperException;
50  import org.apache.zookeeper.KeeperException.NodeExistsException;
51  import org.apache.zookeeper.data.Stat;
52  import org.junit.AfterClass;
53  import org.junit.Before;
54  import org.junit.BeforeClass;
55  import org.junit.Test;
56  
57  /**
58   * Like {@link TestSplitTransaction} in that we're testing {@link SplitTransaction}
59   * only the below tests are against a running cluster where {@link TestSplitTransaction}
60   * is tests against a bare {@link HRegion}.
61   */
62  public class TestSplitTransactionOnCluster {
63    private static final Log LOG =
64      LogFactory.getLog(TestSplitTransactionOnCluster.class);
65    private HBaseAdmin admin = null;
66    private MiniHBaseCluster cluster = null;
67    private static final int NB_SERVERS = 2;
68  
69    private static final HBaseTestingUtility TESTING_UTIL =
70      new HBaseTestingUtility();
71  
72    @BeforeClass public static void before() throws Exception {
73      TESTING_UTIL.getConfiguration().setInt("hbase.balancer.period", 60000);
74      // Needed because some tests have splits happening on RS that are killed
75      // We don't want to wait 3min for the master to figure it out
76      TESTING_UTIL.getConfiguration().setInt(
77          "hbase.master.assignment.timeoutmonitor.timeout", 4000);
78      TESTING_UTIL.startMiniCluster(NB_SERVERS);
79    }
80  
81    @AfterClass public static void after() throws Exception {
82      TESTING_UTIL.shutdownMiniCluster();
83    }
84  
85    @Before public void setup() throws IOException {
86      TESTING_UTIL.ensureSomeRegionServersAvailable(NB_SERVERS);
87      this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
88      this.cluster = TESTING_UTIL.getMiniHBaseCluster();
89    }
90  
91    private HRegionInfo getAndCheckSingleTableRegion(final List<HRegion> regions) {
92      assertEquals(1, regions.size());
93      return regions.get(0).getRegionInfo();
94    }
95  
96    /**
97     * A test that intentionally has master fail the processing of the split message.
98     * Tests that the regionserver split ephemeral node gets cleaned up if it
99     * crashes and that after we process server shutdown, the daughters are up on
100    * line.
101    * @throws IOException
102    * @throws InterruptedException
103    * @throws NodeExistsException
104    * @throws KeeperException
105    */
106   @Test (timeout = 300000) public void testRSSplitEphemeralsDisappearButDaughtersAreOnlinedAfterShutdownHandling()
107   throws IOException, InterruptedException, NodeExistsException, KeeperException {
108     final byte [] tableName =
109       Bytes.toBytes("ephemeral");
110 
111     // Create table then get the single region for our new table.
112     HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
113 
114     List<HRegion> regions = cluster.getRegions(tableName);
115     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
116 
117     int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
118 
119     // Turn off balancer so it doesn't cut in and mess up our placements.
120     this.admin.balanceSwitch(false);
121     // Turn off the meta scanner so it don't remove parent on us.
122     cluster.getMaster().setCatalogJanitorEnabled(false);
123     try {
124       // Add a bit of load up into the table so splittable.
125       TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
126       // Get region pre-split.
127       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
128       printOutRegions(server, "Initial regions: ");
129       int regionCount = server.getOnlineRegions().size();
130       // Now, before we split, set special flag in master, a flag that has
131       // it FAIL the processing of split.
132       SplitRegionHandler.TEST_SKIP = true;
133       // Now try splitting and it should work.
134       split(hri, server, regionCount);
135       // Get daughters
136       List<HRegion> daughters = cluster.getRegions(tableName);
137       assertTrue(daughters.size() >= 2);
138       // Assert the ephemeral node is up in zk.
139       String path = ZKAssign.getNodeName(t.getConnection().getZooKeeperWatcher(),
140         hri.getEncodedName());
141       Stat stats =
142         t.getConnection().getZooKeeperWatcher().getRecoverableZooKeeper().exists(path, false);
143       LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats=" + stats);
144       RegionTransitionData rtd =
145         ZKAssign.getData(t.getConnection().getZooKeeperWatcher(),
146           hri.getEncodedName());
147       // State could be SPLIT or SPLITTING.
148       assertTrue(rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLIT) ||
149         rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLITTING));
150       // Now crash the server
151       cluster.abortRegionServer(tableRegionIndex);
152       waitUntilRegionServerDead();
153 
154       // Wait till regions are back on line again.
155       while(cluster.getRegions(tableName).size() < daughters.size()) {
156         LOG.info("Waiting for repair to happen");
157         Thread.sleep(1000);
158       }
159       // Assert daughters are online.
160       regions = cluster.getRegions(tableName);
161       for (HRegion r: regions) {
162         assertTrue(daughters.contains(r));
163       }
164       // Finally assert that the ephemeral SPLIT znode was cleaned up.
165       stats = t.getConnection().getZooKeeperWatcher().getRecoverableZooKeeper().exists(path, false);
166       LOG.info("EPHEMERAL NODE AFTER SERVER ABORT, path=" + path + ", stats=" + stats);
167       assertTrue(stats == null);
168     } finally {
169       // Set this flag back.
170       SplitRegionHandler.TEST_SKIP = false;
171       admin.balanceSwitch(true);
172       cluster.getMaster().setCatalogJanitorEnabled(true);
173     }
174   }
175 
176   @Test (timeout = 300000) public void testExistingZnodeBlocksSplitAndWeRollback()
177   throws IOException, InterruptedException, NodeExistsException, KeeperException {
178     final byte [] tableName =
179       Bytes.toBytes("testExistingZnodeBlocksSplitAndWeRollback");
180 
181     // Create table then get the single region for our new table.
182     HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
183 
184     List<HRegion> regions = cluster.getRegions(tableName);
185     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
186 
187     int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
188 
189     // Turn off balancer so it doesn't cut in and mess up our placements.
190     this.admin.balanceSwitch(false);
191     // Turn off the meta scanner so it don't remove parent on us.
192     cluster.getMaster().setCatalogJanitorEnabled(false);
193     try {
194       // Add a bit of load up into the table so splittable.
195       TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
196       // Get region pre-split.
197       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
198       printOutRegions(server, "Initial regions: ");
199       int regionCount = server.getOnlineRegions().size();
200       // Insert into zk a blocking znode, a znode of same name as region
201       // so it gets in way of our splitting.
202       ZKAssign.createNodeClosing(t.getConnection().getZooKeeperWatcher(),
203         hri, new ServerName("any.old.server", 1234, -1));
204       // Now try splitting.... should fail.  And each should successfully
205       // rollback.
206       this.admin.split(hri.getRegionNameAsString());
207       this.admin.split(hri.getRegionNameAsString());
208       this.admin.split(hri.getRegionNameAsString());
209       // Wait around a while and assert count of regions remains constant.
210       for (int i = 0; i < 10; i++) {
211         Thread.sleep(100);
212         assertEquals(regionCount, server.getOnlineRegions().size());
213       }
214       // Now clear the zknode
215       ZKAssign.deleteClosingNode(t.getConnection().getZooKeeperWatcher(), hri);
216       // Now try splitting and it should work.
217       split(hri, server, regionCount);
218       // Get daughters
219       List<HRegion> daughters = cluster.getRegions(tableName);
220       assertTrue(daughters.size() >= 2);
221       // OK, so split happened after we cleared the blocking node.
222     } finally {
223       admin.balanceSwitch(true);
224       cluster.getMaster().setCatalogJanitorEnabled(true);
225     }
226   }
227 
228   /**
229    * Messy test that simulates case where SplitTransactions fails to add one
230    * of the daughters up into the .META. table before crash.  We're testing
231    * fact that the shutdown handler will fixup the missing daughter region
232    * adding it back into .META.
233    * @throws IOException
234    * @throws InterruptedException
235    */
236   @Test (timeout = 300000) public void testShutdownSimpleFixup()
237   throws IOException, InterruptedException {
238     final byte [] tableName = Bytes.toBytes("testShutdownSimpleFixup");
239 
240     // Create table then get the single region for our new table.
241     HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
242 
243     List<HRegion> regions = cluster.getRegions(tableName);
244     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
245 
246     int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
247 
248     // Turn off balancer so it doesn't cut in and mess up our placements.
249     this.admin.balanceSwitch(false);
250     // Turn off the meta scanner so it don't remove parent on us.
251     cluster.getMaster().setCatalogJanitorEnabled(false);
252     try {
253       // Add a bit of load up into the table so splittable.
254       TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
255       // Get region pre-split.
256       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
257       printOutRegions(server, "Initial regions: ");
258       int regionCount = server.getOnlineRegions().size();
259       // Now split.
260       split(hri, server, regionCount);
261       // Get daughters
262       List<HRegion> daughters = cluster.getRegions(tableName);
263       assertTrue(daughters.size() >= 2);
264       // Remove one of the daughters from .META. to simulate failed insert of
265       // daughter region up into .META.
266       removeDaughterFromMeta(daughters.get(0).getRegionName());
267       // Now crash the server
268       cluster.abortRegionServer(tableRegionIndex);
269       waitUntilRegionServerDead();
270       // Wait till regions are back on line again.
271       while(cluster.getRegions(tableName).size() < daughters.size()) {
272         LOG.info("Waiting for repair to happen");
273         Thread.sleep(1000);
274       }
275       // Assert daughters are online.
276       regions = cluster.getRegions(tableName);
277       for (HRegion r: regions) {
278         assertTrue(daughters.contains(r));
279       }
280     } finally {
281       admin.balanceSwitch(true);
282       cluster.getMaster().setCatalogJanitorEnabled(true);
283     }
284   }
285 
286   /**
287    * Test that if daughter split on us, we won't do the shutdown handler fixup
288    * just because we can't find the immediate daughter of an offlined parent.
289    * @throws IOException
290    * @throws InterruptedException
291    */
292   @Test (timeout=300000) public void testShutdownFixupWhenDaughterHasSplit()
293   throws IOException, InterruptedException {
294     final byte [] tableName =
295       Bytes.toBytes("testShutdownFixupWhenDaughterHasSplit");
296 
297     // Create table then get the single region for our new table.
298     HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
299 
300     List<HRegion> regions = cluster.getRegions(tableName);
301     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
302 
303     int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
304 
305     // Turn off balancer so it doesn't cut in and mess up our placements.
306     this.admin.balanceSwitch(false);
307     // Turn off the meta scanner so it don't remove parent on us.
308     cluster.getMaster().setCatalogJanitorEnabled(false);
309     try {
310       // Add a bit of load up into the table so splittable.
311       TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
312       // Get region pre-split.
313       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
314       printOutRegions(server, "Initial regions: ");
315       int regionCount = server.getOnlineRegions().size();
316       // Now split.
317       split(hri, server, regionCount);
318       // Get daughters
319       List<HRegion> daughters = cluster.getRegions(tableName);
320       assertTrue(daughters.size() >= 2);
321       // Now split one of the daughters.
322       regionCount = server.getOnlineRegions().size();
323       HRegionInfo daughter = daughters.get(0).getRegionInfo();
324       // Compact first to ensure we have cleaned up references -- else the split
325       // will fail.
326       this.admin.compact(daughter.getRegionName());
327       daughters = cluster.getRegions(tableName);
328       HRegion daughterRegion = null;
329       for (HRegion r: daughters) {
330         if (r.getRegionInfo().equals(daughter)) daughterRegion = r;
331       }
332       assertTrue(daughterRegion != null);
333       while (true) {
334         if (!daughterRegion.hasReferences()) break;
335         Threads.sleep(100);
336       }
337       split(daughter, server, regionCount);
338       // Get list of daughters
339       daughters = cluster.getRegions(tableName);
340       // Now crash the server
341       cluster.abortRegionServer(tableRegionIndex);
342       waitUntilRegionServerDead();
343       // Wait till regions are back on line again.
344       while(cluster.getRegions(tableName).size() < daughters.size()) {
345         LOG.info("Waiting for repair to happen");
346         Thread.sleep(1000);
347       }
348       // Assert daughters are online and ONLY the original daughters -- that
349       // fixup didn't insert one during server shutdown recover.
350       regions = cluster.getRegions(tableName);
351       assertEquals(daughters.size(), regions.size());
352       for (HRegion r: regions) {
353         assertTrue(daughters.contains(r));
354       }
355     } finally {
356       admin.balanceSwitch(true);
357       cluster.getMaster().setCatalogJanitorEnabled(true);
358     }
359   }
360 
361   private void split(final HRegionInfo hri, final HRegionServer server,
362       final int regionCount)
363   throws IOException, InterruptedException {
364     this.admin.split(hri.getRegionNameAsString());
365     while (server.getOnlineRegions().size() <= regionCount) {
366       LOG.debug("Waiting on region to split");
367       Thread.sleep(100);
368     }
369   }
370 
371   private void removeDaughterFromMeta(final byte [] regionName) throws IOException {
372     HTable metaTable =
373       new HTable(TESTING_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
374     Delete d = new Delete(regionName);
375     LOG.info("Deleted " + Bytes.toString(regionName));
376     metaTable.delete(d);
377   }
378 
379   /**
380    * Ensure single table region is not on same server as the single .META. table
381    * region.
382    * @param admin
383    * @param hri
384    * @return Index of the server hosting the single table region
385    * @throws UnknownRegionException
386    * @throws MasterNotRunningException
387    * @throws ZooKeeperConnectionException
388    * @throws InterruptedException
389    */
390   private int ensureTableRegionNotOnSameServerAsMeta(final HBaseAdmin admin,
391       final HRegionInfo hri)
392   throws UnknownRegionException, MasterNotRunningException,
393   ZooKeeperConnectionException, InterruptedException {
394     MiniHBaseCluster cluster = TESTING_UTIL.getMiniHBaseCluster();
395     // Now make sure that the table region is not on same server as that hosting
396     // .META.  We don't want .META. replay polluting our test when we later crash
397     // the table region serving server.
398     int metaServerIndex = cluster.getServerWithMeta();
399     assertTrue(metaServerIndex != -1);
400     HRegionServer metaRegionServer = cluster.getRegionServer(metaServerIndex);
401     int tableRegionIndex = cluster.getServerWith(hri.getRegionName());
402     assertTrue(tableRegionIndex != -1);
403     HRegionServer tableRegionServer = cluster.getRegionServer(tableRegionIndex);
404     if (metaRegionServer.getServerName().equals(tableRegionServer.getServerName())) {
405       HRegionServer hrs = getOtherRegionServer(cluster, metaRegionServer);
406       LOG.info("Moving " + hri.getRegionNameAsString() + " to " +
407         hrs.getServerName() + "; metaServerIndex=" + metaServerIndex);
408       admin.move(hri.getEncodedNameAsBytes(),
409         Bytes.toBytes(hrs.getServerName().toString()));
410     }
411     // Wait till table region is up on the server that is NOT carrying .META..
412     while (true) {
413       tableRegionIndex = cluster.getServerWith(hri.getRegionName());
414       if (tableRegionIndex != -1 && tableRegionIndex != metaServerIndex) break;
415       LOG.debug("Waiting on region move off the .META. server; current index " +
416         tableRegionIndex + " and metaServerIndex=" + metaServerIndex);
417       Thread.sleep(100);
418     }
419     // Verify for sure table region is not on same server as .META.
420     tableRegionIndex = cluster.getServerWith(hri.getRegionName());
421     assertTrue(tableRegionIndex != -1);
422     assertNotSame(metaServerIndex, tableRegionIndex);
423     return tableRegionIndex;
424   }
425 
426   /**
427    * Find regionserver other than the one passed.
428    * Can't rely on indexes into list of regionservers since crashed servers
429    * occupy an index.
430    * @param cluster
431    * @param notThisOne
432    * @return A regionserver that is not <code>notThisOne</code> or null if none
433    * found
434    */
435   private HRegionServer getOtherRegionServer(final MiniHBaseCluster cluster,
436       final HRegionServer notThisOne) {
437     for (RegionServerThread rst: cluster.getRegionServerThreads()) {
438       HRegionServer hrs = rst.getRegionServer();
439       if (hrs.getServerName().equals(notThisOne.getServerName())) continue;
440       if (hrs.isStopping() || hrs.isStopped()) continue;
441       return hrs;
442     }
443     return null;
444   }
445 
446   private void printOutRegions(final HRegionServer hrs, final String prefix)
447       throws IOException {
448     List<HRegionInfo> regions = hrs.getOnlineRegions();
449     for (HRegionInfo region: regions) {
450       LOG.info(prefix + region.getRegionNameAsString());
451     }
452   }
453 
454   private void waitUntilRegionServerDead() throws InterruptedException {
455     // Wait until the master processes the RS shutdown
456     while (cluster.getMaster().getClusterStatus().
457         getServers().size() == NB_SERVERS) {
458       LOG.info("Waiting on server to go down");
459       Thread.sleep(100);
460     }
461   }
462 }