1   /*
2    * Copyright The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.replication;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertEquals;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.hbase.HBaseConfiguration;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HTableDescriptor;
36  import org.apache.hadoop.hbase.MiniHBaseCluster;
37  import org.apache.hadoop.hbase.client.Delete;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.HBaseAdmin;
40  import org.apache.hadoop.hbase.client.HTable;
41  import org.apache.hadoop.hbase.client.Put;
42  import org.apache.hadoop.hbase.client.Result;
43  import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
44  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
47  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
48  import org.junit.BeforeClass;
49  import org.junit.Test;
50  
51  public class TestMultiSlaveReplication {
52  
53    private static final Log LOG = LogFactory.getLog(TestReplication.class);
54  
55    private static Configuration conf1;
56    private static Configuration conf2;
57    private static Configuration conf3;
58  
59    private static String clusterKey2;
60    private static String clusterKey3;
61  
62    private static HBaseTestingUtility utility1;
63    private static HBaseTestingUtility utility2;
64    private static HBaseTestingUtility utility3;
65    private static final long SLEEP_TIME = 500;
66    private static final int NB_RETRIES = 10;
67  
68    private static final byte[] tableName = Bytes.toBytes("test");
69    private static final byte[] famName = Bytes.toBytes("f");
70    private static final byte[] row = Bytes.toBytes("row");
71    private static final byte[] row1 = Bytes.toBytes("row1");
72    private static final byte[] row2 = Bytes.toBytes("row2");
73    private static final byte[] row3 = Bytes.toBytes("row3");
74    private static final byte[] noRepfamName = Bytes.toBytes("norep");
75  
76    private static HTableDescriptor table;
77  
78    @BeforeClass
79    public static void setUpBeforeClass() throws Exception {
80      conf1 = HBaseConfiguration.create();
81      conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
82      // smaller block size and capacity to trigger more operations
83      // and test them
84      conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
85      conf1.setInt("replication.source.size.capacity", 1024);
86      conf1.setLong("replication.source.sleepforretries", 100);
87      conf1.setInt("hbase.regionserver.maxlogs", 10);
88      conf1.setLong("hbase.master.logcleaner.ttl", 10);
89      conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
90      conf1.setBoolean("dfs.support.append", true);
91      conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
92      conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
93          "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
94  
95      utility1 = new HBaseTestingUtility(conf1);
96      utility1.startMiniZKCluster();
97      MiniZooKeeperCluster miniZK = utility1.getZkCluster();
98      new ZooKeeperWatcher(conf1, "cluster1", null, true);
99  
100     conf2 = new Configuration(conf1);
101     conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
102 
103     conf3 = new Configuration(conf1);
104     conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
105 
106     utility2 = new HBaseTestingUtility(conf2);
107     utility2.setZkCluster(miniZK);
108     new ZooKeeperWatcher(conf2, "cluster3", null, true);
109 
110     utility3 = new HBaseTestingUtility(conf3);
111     utility3.setZkCluster(miniZK);
112     new ZooKeeperWatcher(conf3, "cluster3", null, true);
113 
114     clusterKey2 = conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
115     conf2.get("hbase.zookeeper.property.clientPort")+":/2";
116 
117     clusterKey3 = conf3.get(HConstants.ZOOKEEPER_QUORUM)+":" +
118     conf3.get("hbase.zookeeper.property.clientPort")+":/3";
119     
120     table = new HTableDescriptor(tableName);
121     HColumnDescriptor fam = new HColumnDescriptor(famName);
122     fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
123     table.addFamily(fam);
124     fam = new HColumnDescriptor(noRepfamName);
125     table.addFamily(fam);
126   }
127 
128   @Test(timeout=300000)
129   public void testMultiSlaveReplication() throws Exception {
130     LOG.info("testCyclicReplication");
131     MiniHBaseCluster master = utility1.startMiniCluster();
132     utility2.startMiniCluster();
133     utility3.startMiniCluster();
134     ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
135 
136     new HBaseAdmin(conf1).createTable(table);
137     new HBaseAdmin(conf2).createTable(table);
138     new HBaseAdmin(conf3).createTable(table);
139     HTable htable1 = new HTable(conf1, tableName);
140     htable1.setWriteBufferSize(1024);
141     HTable htable2 = new HTable(conf2, tableName);
142     htable2.setWriteBufferSize(1024);
143     HTable htable3 = new HTable(conf3, tableName);
144     htable3.setWriteBufferSize(1024);
145     
146     admin1.addPeer("1", clusterKey2);
147 
148     // put "row" and wait 'til it got around, then delete
149     putAndWait(row, famName, htable1, htable2);
150     deleteAndWait(row, htable1, htable2);
151     // check it wasn't replication to cluster 3
152     checkRow(row,0,htable3);
153 
154     putAndWait(row2, famName, htable1, htable2);
155 
156     // now roll the region server's logs
157     new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());
158     // after the log was rolled put a new row
159     putAndWait(row3, famName, htable1, htable2);
160 
161     admin1.addPeer("2", clusterKey3);
162 
163     // put a row, check it was replicated to all clusters
164     putAndWait(row1, famName, htable1, htable2, htable3);
165     // delete and verify
166     deleteAndWait(row1, htable1, htable2, htable3);
167 
168     // make sure row2 did not get replicated after
169     // cluster 3 was added
170     checkRow(row2,0,htable3);
171 
172     // row3 will get replicated, because it was in the
173     // latest log
174     checkRow(row3,1,htable3);
175 
176     Put p = new Put(row);
177     p.add(famName, row, row);
178     htable1.put(p);
179     // now roll the logs again
180     new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0)
181         .getServerName().toString());
182 
183     // cleanup "row2", also conveniently use this to wait replication
184     // to finish
185     deleteAndWait(row2, htable1, htable2, htable3);
186     // Even if the log was rolled in the middle of the replication
187     // "row" is still replication.
188     checkRow(row, 1, htable2, htable3);
189 
190     // cleanup the rest
191     deleteAndWait(row, htable1, htable2, htable3);
192     deleteAndWait(row3, htable1, htable2, htable3);
193 
194     utility3.shutdownMiniCluster();
195     utility2.shutdownMiniCluster();
196     utility1.shutdownMiniCluster();
197   }
198 
199   private void checkRow(byte[] row, int count, HTable... tables) throws IOException {
200     Get get = new Get(row);
201     for (HTable table : tables) {
202       Result res = table.get(get);
203       assertEquals(count, res.size());
204     }
205   }
206 
207   private void deleteAndWait(byte[] row, HTable source, HTable... targets)
208   throws Exception {
209     Delete del = new Delete(row);
210     source.delete(del);
211 
212     Get get = new Get(row);
213     for (int i = 0; i < NB_RETRIES; i++) {
214       if (i==NB_RETRIES-1) {
215         fail("Waited too much time for del replication");
216       }
217       boolean removedFromAll = true;
218       for (HTable target : targets) {
219         Result res = target.get(get);
220         if (res.size() >= 1) {
221           LOG.info("Row not deleted");
222           removedFromAll = false;
223           break;
224         }
225       }
226       if (removedFromAll) {
227         break;
228       } else {
229         Thread.sleep(SLEEP_TIME);        
230       }
231     }
232   }
233 
234   private void putAndWait(byte[] row, byte[] fam, HTable source, HTable... targets)
235   throws Exception {
236     Put put = new Put(row);
237     put.add(fam, row, row);
238     source.put(put);
239 
240     Get get = new Get(row);
241     for (int i = 0; i < NB_RETRIES; i++) {
242       if (i==NB_RETRIES-1) {
243         fail("Waited too much time for put replication");
244       }
245       boolean replicatedToAll = true;
246       for (HTable target : targets) {
247         Result res = target.get(get);
248         if (res.size() == 0) {
249           LOG.info("Row not available");
250           replicatedToAll = false;
251           break;
252         } else {
253           assertArrayEquals(res.value(), row);
254         }
255       }
256       if (replicatedToAll) {
257         break;
258       } else {
259         Thread.sleep(SLEEP_TIME);
260       }
261     }
262   }
263 }