1   /*
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import org.apache.commons.logging.Log;
23  import org.apache.commons.logging.LogFactory;
24  
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.hbase.HBaseTestingUtility;
27  import org.apache.hadoop.hbase.HColumnDescriptor;
28  import org.apache.hadoop.hbase.HServerLoad;
29  import org.apache.hadoop.hbase.HTableDescriptor;
30  import org.apache.hadoop.hbase.MiniHBaseCluster;
31  import org.apache.hadoop.hbase.CoprocessorEnvironment;
32  import org.apache.hadoop.hbase.Coprocessor;
33  import org.apache.hadoop.hbase.client.HBaseAdmin;
34  import org.apache.hadoop.hbase.regionserver.HRegion;
35  import org.apache.hadoop.hbase.ServerName;
36  import org.apache.hadoop.hbase.TableNotEnabledException;
37  import org.apache.hadoop.hdfs.MiniDFSCluster;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  
41  import javax.tools.*;
42  import java.io.*;
43  import java.util.*;
44  import java.util.Arrays;
45  import java.util.jar.*;
46  
47  import org.junit.*;
48  
49  import static org.junit.Assert.assertEquals;
50  import static org.junit.Assert.assertTrue;
51  import static org.junit.Assert.assertFalse;
52  
53  /**
54   * Test coprocessors class loading.
55   */
56  public class TestClassLoading {
57    private static final Log LOG = LogFactory.getLog(TestClassLoading.class);
58    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
59  
60    private static Configuration conf;
61    private static MiniDFSCluster cluster;
62  
63    static final int BUFFER_SIZE = 4096;
64    static final String tableName = "TestClassLoading";
65    static final String cpName1 = "TestCP1";
66    static final String cpName2 = "TestCP2";
67    static final String cpName3 = "TestCP3";
68    static final String cpName4 = "TestCP4";
69    static final String cpName5 = "TestCP5";
70  
71    private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class;
72    private static Class regionCoprocessor2 = GenericEndpoint.class;
73    private static Class regionServerCoprocessor = SampleRegionWALObserver.class;
74    private static Class masterCoprocessor = BaseMasterObserver.class;
75  
76    private static final String[] regionServerSystemCoprocessors =
77        new String[]{
78        regionCoprocessor1.getSimpleName(),
79        regionServerCoprocessor.getSimpleName()
80    };
81  
82    private static final String[] regionServerSystemAndUserCoprocessors =
83        new String[] {
84        regionCoprocessor1.getSimpleName(),
85        regionCoprocessor2.getSimpleName(),
86        regionServerCoprocessor.getSimpleName()
87    };
88  
89    @BeforeClass
90    public static void setUpBeforeClass() throws Exception {
91      conf = TEST_UTIL.getConfiguration();
92  
93      // regionCoprocessor1 will be loaded on all regionservers, since it is
94      // loaded for any tables (user or meta).
95      conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
96          regionCoprocessor1.getName());
97  
98      // regionCoprocessor2 will be loaded only on regionservers that serve a
99      // user table region. Therefore, if there are no user tables loaded,
100     // this coprocessor will not be loaded on any regionserver.
101     conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
102         regionCoprocessor2.getName());
103 
104     conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
105         regionServerCoprocessor.getName());
106     conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
107         masterCoprocessor.getName());
108     TEST_UTIL.startMiniCluster(1);
109     cluster = TEST_UTIL.getDFSCluster();
110   }
111 
112   @AfterClass
113   public static void tearDownAfterClass() throws Exception {
114     TEST_UTIL.shutdownMiniCluster();
115   }
116 
117   // generate jar file
118   private boolean createJarArchive(File archiveFile, File[] tobeJared) {
119     try {
120       byte buffer[] = new byte[BUFFER_SIZE];
121       // Open archive file
122       FileOutputStream stream = new FileOutputStream(archiveFile);
123       JarOutputStream out = new JarOutputStream(stream, new Manifest());
124 
125       for (int i = 0; i < tobeJared.length; i++) {
126         if (tobeJared[i] == null || !tobeJared[i].exists()
127             || tobeJared[i].isDirectory()) {
128           continue;
129         }
130 
131         // Add archive entry
132         JarEntry jarAdd = new JarEntry(tobeJared[i].getName());
133         jarAdd.setTime(tobeJared[i].lastModified());
134         out.putNextEntry(jarAdd);
135 
136         // Write file to archive
137         FileInputStream in = new FileInputStream(tobeJared[i]);
138         while (true) {
139           int nRead = in.read(buffer, 0, buffer.length);
140           if (nRead <= 0)
141             break;
142           out.write(buffer, 0, nRead);
143         }
144         in.close();
145       }
146       out.close();
147       stream.close();
148       LOG.info("Adding classes to jar file completed");
149       return true;
150     } catch (Exception ex) {
151       LOG.error("Error: " + ex.getMessage());
152       return false;
153     }
154   }
155 
156   private File buildCoprocessorJar(String className) throws Exception {
157     // compose a java source file.
158     String javaCode = "import org.apache.hadoop.hbase.coprocessor.*;" +
159       "public class " + className + " extends BaseRegionObserver {}";
160     Path baseDir = TEST_UTIL.getDataTestDir();
161     Path srcDir = new Path(TEST_UTIL.getDataTestDir(), "src");
162     File srcDirPath = new File(srcDir.toString());
163     srcDirPath.mkdirs();
164     File sourceCodeFile = new File(srcDir.toString(), className + ".java");
165     BufferedWriter bw = new BufferedWriter(new FileWriter(sourceCodeFile));
166     bw.write(javaCode);
167     bw.close();
168 
169     // compile it by JavaCompiler
170     JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
171     ArrayList<String> srcFileNames = new ArrayList<String>();
172     srcFileNames.add(sourceCodeFile.toString());
173     StandardJavaFileManager fm = compiler.getStandardFileManager(null, null,
174       null);
175     Iterable<? extends JavaFileObject> cu =
176       fm.getJavaFileObjects(sourceCodeFile);
177     List<String> options = new ArrayList<String>();
178     options.add("-classpath");
179     // only add hbase classes to classpath. This is a little bit tricky: assume
180     // the classpath is {hbaseSrc}/target/classes.
181     String currentDir = new File(".").getAbsolutePath();
182     String classpath =
183         currentDir + Path.SEPARATOR + "target"+ Path.SEPARATOR + "classes" +
184         System.getProperty("path.separator") +
185         System.getProperty("surefire.test.class.path");
186     options.add(classpath);
187     LOG.debug("Setting classpath to: "+classpath);
188 
189     JavaCompiler.CompilationTask task = compiler.getTask(null, fm, null,
190       options, null, cu);
191     assertTrue("Compile file " + sourceCodeFile + " failed.", task.call());
192 
193     // build a jar file by the classes files
194     String jarFileName = className + ".jar";
195     File jarFile = new File(baseDir.toString(), jarFileName);
196     if (!createJarArchive(jarFile,
197         new File[]{new File(srcDir.toString(), className + ".class")})){
198       assertTrue("Build jar file failed.", false);
199     }
200 
201     return jarFile;
202   }
203 
204   @Test
205   // HBASE-3516: Test CP Class loading from HDFS
206   public void testClassLoadingFromHDFS() throws Exception {
207     FileSystem fs = cluster.getFileSystem();
208 
209     File jarFile1 = buildCoprocessorJar(cpName1);
210     File jarFile2 = buildCoprocessorJar(cpName2);
211 
212     // copy the jars into dfs
213     fs.copyFromLocalFile(new Path(jarFile1.getPath()),
214       new Path(fs.getUri().toString() + Path.SEPARATOR));
215     String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR +
216       jarFile1.getName();
217     assertTrue("Copy jar file to HDFS failed.",
218       fs.exists(new Path(jarFileOnHDFS1)));
219     LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1);
220 
221     fs.copyFromLocalFile(new Path(jarFile2.getPath()),
222         new Path(fs.getUri().toString() + Path.SEPARATOR));
223     String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR +
224       jarFile2.getName();
225     assertTrue("Copy jar file to HDFS failed.",
226       fs.exists(new Path(jarFileOnHDFS2)));
227     LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
228 
229     // create a table that references the coprocessors
230     HTableDescriptor htd = new HTableDescriptor(tableName);
231     htd.addFamily(new HColumnDescriptor("test"));
232       // without configuration values
233     htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 +
234       "|" + Coprocessor.PRIORITY_USER);
235       // with configuration values
236     htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 +
237       "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
238     HBaseAdmin admin = new HBaseAdmin(this.conf);
239     if (admin.tableExists(tableName)) {
240       admin.disableTable(tableName);
241       admin.deleteTable(tableName);
242     }
243     admin.createTable(htd);
244 
245     // verify that the coprocessors were loaded
246     boolean found1 = false, found2 = false, found2_k1 = false,
247         found2_k2 = false, found2_k3 = false;
248     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
249     for (HRegion region:
250         hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
251       if (region.getRegionNameAsString().startsWith(tableName)) {
252         CoprocessorEnvironment env;
253         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
254         if (env != null) {
255           found1 = true;
256         }
257         env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
258         if (env != null) {
259           found2 = true;
260           Configuration conf = env.getConfiguration();
261           found2_k1 = conf.get("k1") != null;
262           found2_k2 = conf.get("k2") != null;
263           found2_k3 = conf.get("k3") != null;
264         }
265       }
266     }
267     assertTrue("Class " + cpName1 + " was missing on a region", found1);
268     assertTrue("Class " + cpName2 + " was missing on a region", found2);
269     assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
270     assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
271     assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
272   }
273 
274   @Test
275   // HBASE-3516: Test CP Class loading from local file system
276   public void testClassLoadingFromLocalFS() throws Exception {
277     File jarFile = buildCoprocessorJar(cpName3);
278 
279     // create a table that references the jar
280     HTableDescriptor htd = new HTableDescriptor(cpName3);
281     htd.addFamily(new HColumnDescriptor("test"));
282     htd.setValue("COPROCESSOR$1", jarFile.toString() + "|" + cpName3 + "|" +
283       Coprocessor.PRIORITY_USER);
284     HBaseAdmin admin = new HBaseAdmin(this.conf);
285     admin.createTable(htd);
286 
287     // verify that the coprocessor was loaded
288     boolean found = false;
289     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
290     for (HRegion region:
291         hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
292       if (region.getRegionNameAsString().startsWith(cpName3)) {
293         found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
294       }
295     }
296     assertTrue("Class " + cpName3 + " was missing on a region", found);
297   }
298 
299   @Test
300   // HBase-3810: Registering a Coprocessor at HTableDescriptor should be
301   // less strict
302   public void testHBase3810() throws Exception {
303     // allowed value pattern: [path] | class name | [priority] | [key values]
304 
305     File jarFile1 = buildCoprocessorJar(cpName1);
306     File jarFile2 = buildCoprocessorJar(cpName2);
307     File jarFile4 = buildCoprocessorJar(cpName4);
308     File jarFile5 = buildCoprocessorJar(cpName5);
309 
310     String cpKey1 = "COPROCESSOR$1";
311     String cpKey2 = " Coprocessor$2 ";
312     String cpKey3 = " coprocessor$03 ";
313 
314     String cpValue1 = jarFile1.toString() + "|" + cpName1 + "|" +
315         Coprocessor.PRIORITY_USER;
316     String cpValue2 = jarFile2.toString() + " | " + cpName2 + " | ";
317     // load from default class loader
318     String cpValue3 =
319         " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v ";
320 
321     // create a table that references the jar
322     HTableDescriptor htd = new HTableDescriptor(tableName);
323     htd.addFamily(new HColumnDescriptor("test"));
324 
325     // add 3 coprocessors by setting htd attributes directly.
326     htd.setValue(cpKey1, cpValue1);
327     htd.setValue(cpKey2, cpValue2);
328     htd.setValue(cpKey3, cpValue3);
329 
330     // add 2 coprocessor by using new htd.addCoprocessor() api
331     htd.addCoprocessor(cpName4, new Path(jarFile4.getPath()),
332         Coprocessor.PRIORITY_USER, null);
333     Map<String, String> kvs = new HashMap<String, String>();
334     kvs.put("k1", "v1");
335     kvs.put("k2", "v2");
336     kvs.put("k3", "v3");
337     htd.addCoprocessor(cpName5, new Path(jarFile5.getPath()),
338         Coprocessor.PRIORITY_USER, kvs);
339 
340     HBaseAdmin admin = new HBaseAdmin(this.conf);
341     if (admin.tableExists(tableName)) {
342       admin.disableTable(tableName);
343       admin.deleteTable(tableName);
344     }
345     admin.createTable(htd);
346 
347     // verify that the coprocessor was loaded
348     boolean found_2 = false, found_1 = false, found_3 = false,
349         found_4 = false, found_5 = false;
350     boolean found5_k1 = false, found5_k2 = false, found5_k3 = false,
351         found5_k4 = false;
352 
353     MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
354     for (HRegion region:
355         hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
356       if (region.getRegionNameAsString().startsWith(tableName)) {
357         found_1 = found_1 ||
358             (region.getCoprocessorHost().findCoprocessor(cpName1) != null);
359         found_2 = found_2 ||
360             (region.getCoprocessorHost().findCoprocessor(cpName2) != null);
361         found_3 = found_3 ||
362             (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver")
363                 != null);
364         found_4 = found_4 ||
365             (region.getCoprocessorHost().findCoprocessor(cpName4) != null);
366 
367         CoprocessorEnvironment env =
368             region.getCoprocessorHost().findCoprocessorEnvironment(cpName5);
369         if (env != null) {
370           found_5 = true;
371           Configuration conf = env.getConfiguration();
372           found5_k1 = conf.get("k1") != null;
373           found5_k2 = conf.get("k2") != null;
374           found5_k3 = conf.get("k3") != null;
375         }
376       }
377     }
378 
379     assertTrue("Class " + cpName1 + " was missing on a region", found_1);
380     assertTrue("Class " + cpName2 + " was missing on a region", found_2);
381     assertTrue("Class SimpleRegionObserver was missing on a region", found_3);
382     assertTrue("Class " + cpName4 + " was missing on a region", found_4);
383     assertTrue("Class " + cpName5 + " was missing on a region", found_5);
384 
385     assertTrue("Configuration key 'k1' was missing on a region", found5_k1);
386     assertTrue("Configuration key 'k2' was missing on a region", found5_k2);
387     assertTrue("Configuration key 'k3' was missing on a region", found5_k3);
388     assertFalse("Configuration key 'k4' wasn't configured", found5_k4);
389   }
390 
391   @Test
392   public void testRegionServerCoprocessorsReported() throws Exception {
393     // HBASE 4070: Improve region server metrics to report loaded coprocessors
394     // to master: verify that each regionserver is reporting the correct set of
395     // loaded coprocessors.
396 
397     // We rely on the fact that getCoprocessors() will return a sorted
398     // display of the coprocessors' names, so for example, regionCoprocessor1's
399     // name "ColumnAggregationEndpoint" will appear before regionCoprocessor2's
400     // name "GenericEndpoint" because "C" is before "G" lexicographically.
401 
402     HBaseAdmin admin = new HBaseAdmin(this.conf);
403 
404     // disable all user tables, if any are loaded.
405     for (HTableDescriptor htd: admin.listTables()) {
406       if (!htd.isMetaTable()) {
407         String tableName = htd.getNameAsString();
408         if (admin.isTableEnabled(tableName)) {
409           try {
410             admin.disableTable(htd.getNameAsString());
411           } catch (TableNotEnabledException e) {
412             // ignoring this exception for now : not sure why it's happening.
413           }
414         }
415       }
416     }
417 
418     // should only be system coprocessors loaded at this point.
419     assertAllRegionServers(regionServerSystemCoprocessors,null);
420 
421     // The next two tests enable and disable user tables to see if coprocessor
422     // load reporting changes as coprocessors are loaded and unloaded.
423     //
424 
425     // Create a table.
426     // should cause regionCoprocessor2 to be loaded, since we've specified it
427     // for loading on any user table with USER_REGION_COPROCESSOR_CONF_KEY
428     // in setUpBeforeClass().
429     String userTable1 = "userTable1";
430     HTableDescriptor userTD1 = new HTableDescriptor(userTable1);
431     admin.createTable(userTD1);
432     // table should be enabled now.
433     assertTrue(admin.isTableEnabled(userTable1));
434     assertAllRegionServers(regionServerSystemAndUserCoprocessors, userTable1);
435 
436     // unload and make sure we're back to only system coprocessors again.
437     admin.disableTable(userTable1);
438     assertAllRegionServers(regionServerSystemCoprocessors,null);
439 
440     // create another table, with its own specified coprocessor.
441     String userTable2 = "userTable2";
442     HTableDescriptor htd2 = new HTableDescriptor(userTable2);
443 
444     String userTableCP = "userTableCP";
445     File jarFile1 = buildCoprocessorJar(userTableCP);
446     htd2.addFamily(new HColumnDescriptor("myfamily"));
447     htd2.setValue("COPROCESSOR$1", jarFile1.toString() + "|" + userTableCP +
448       "|" + Coprocessor.PRIORITY_USER);
449     admin.createTable(htd2);
450     // table should be enabled now.
451     assertTrue(admin.isTableEnabled(userTable2));
452 
453     ArrayList<String> existingCPsPlusNew =
454         new ArrayList<String>(Arrays.asList(regionServerSystemAndUserCoprocessors));
455     existingCPsPlusNew.add(userTableCP);
456     String[] existingCPsPlusNewArray = new String[existingCPsPlusNew.size()];
457     assertAllRegionServers(existingCPsPlusNew.toArray(existingCPsPlusNewArray),
458         userTable2);
459 
460     admin.disableTable(userTable2);
461     assertTrue(admin.isTableDisabled(userTable2));
462 
463     // we should be back to only system coprocessors again.
464     assertAllRegionServers(regionServerSystemCoprocessors, null);
465 
466   }
467 
468   /**
469    * return the subset of all regionservers
470    * (actually returns set of HServerLoads)
471    * which host some region in a given table.
472    * used by assertAllRegionServers() below to
473    * test reporting of loaded coprocessors.
474    * @param tableName : given table.
475    * @return subset of all servers.
476    */
477   Map<ServerName, HServerLoad> serversForTable(String tableName) {
478     Map<ServerName, HServerLoad> serverLoadHashMap =
479         new HashMap<ServerName, HServerLoad>();
480     for(Map.Entry<ServerName,HServerLoad> server:
481         TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
482             getOnlineServers().entrySet()) {
483       for(Map.Entry<byte[], HServerLoad.RegionLoad> region:
484           server.getValue().getRegionsLoad().entrySet()) {
485         if (region.getValue().getNameAsString().equals(tableName)) {
486           // this server server hosts a region of tableName: add this server..
487           serverLoadHashMap.put(server.getKey(),server.getValue());
488           // .. and skip the rest of the regions that it hosts.
489           break;
490         }
491       }
492     }
493     return serverLoadHashMap;
494   }
495 
496   void assertAllRegionServers(String[] expectedCoprocessors, String tableName)
497       throws InterruptedException {
498     Map<ServerName, HServerLoad> servers;
499     String[] actualCoprocessors = null;
500     boolean success = false;
501     for(int i = 0; i < 5; i++) {
502       if (tableName == null) {
503         //if no tableName specified, use all servers.
504         servers =
505             TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
506                 getOnlineServers();
507       } else {
508         servers = serversForTable(tableName);
509       }
510       boolean any_failed = false;
511       for(Map.Entry<ServerName,HServerLoad> server: servers.entrySet()) {
512         actualCoprocessors = server.getValue().getCoprocessors();
513         if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
514           LOG.debug("failed comparison: actual: " +
515               Arrays.toString(actualCoprocessors) +
516               " ; expected: " + Arrays.toString(expectedCoprocessors));
517           any_failed = true;
518           break;
519         }
520       }
521       if (any_failed == false) {
522         success = true;
523         break;
524       }
525       LOG.debug("retrying after failed comparison: " + i);
526       Thread.sleep(1000);
527     }
528     assertTrue(success);
529   }
530 
531   @Test
532   public void testMasterCoprocessorsReported() {
533     // HBASE 4070: Improve region server metrics to report loaded coprocessors
534     // to master: verify that the master is reporting the correct set of
535     // loaded coprocessors.
536     final String loadedMasterCoprocessorsVerify =
537         "[" + masterCoprocessor.getSimpleName() + "]";
538     String loadedMasterCoprocessors =
539         java.util.Arrays.toString(
540             TEST_UTIL.getHBaseCluster().getMaster().getCoprocessors());
541     assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors);
542   }
543 }