1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.replication;
21
22 import static org.junit.Assert.assertArrayEquals;
23 import static org.junit.Assert.assertEquals;
24 import static org.junit.Assert.fail;
25
26 import java.io.IOException;
27 import java.util.List;
28 import java.util.Map;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.conf.Configuration;
33 import org.apache.hadoop.hbase.HBaseConfiguration;
34 import org.apache.hadoop.hbase.HBaseTestingUtility;
35 import org.apache.hadoop.hbase.HColumnDescriptor;
36 import org.apache.hadoop.hbase.HConstants;
37 import org.apache.hadoop.hbase.HTableDescriptor;
38 import org.apache.hadoop.hbase.KeyValue;
39 import org.apache.hadoop.hbase.client.Delete;
40 import org.apache.hadoop.hbase.client.Get;
41 import org.apache.hadoop.hbase.client.HBaseAdmin;
42 import org.apache.hadoop.hbase.client.HTable;
43 import org.apache.hadoop.hbase.client.Put;
44 import org.apache.hadoop.hbase.client.Result;
45 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
46 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
47 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
48 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
49 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.JVMClusterUtil;
52 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
53 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
54 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
55 import org.junit.After;
56 import org.junit.AfterClass;
57 import org.junit.BeforeClass;
58 import org.junit.Test;
59
60 public class TestMasterReplication {
61
62 private static final Log LOG = LogFactory.getLog(TestReplication.class);
63
64 private static Configuration conf1;
65 private static Configuration conf2;
66 private static Configuration conf3;
67
68 private static String clusterKey1;
69 private static String clusterKey2;
70 private static String clusterKey3;
71
72 private static HBaseTestingUtility utility1;
73 private static HBaseTestingUtility utility2;
74 private static HBaseTestingUtility utility3;
75 private static final long SLEEP_TIME = 500;
76 private static final int NB_RETRIES = 10;
77
78 private static final byte[] tableName = Bytes.toBytes("test");
79 private static final byte[] famName = Bytes.toBytes("f");
80 private static final byte[] row = Bytes.toBytes("row");
81 private static final byte[] row1 = Bytes.toBytes("row1");
82 private static final byte[] row2 = Bytes.toBytes("row2");
83 private static final byte[] noRepfamName = Bytes.toBytes("norep");
84
85 private static final byte[] count = Bytes.toBytes("count");
86 private static final byte[] put = Bytes.toBytes("put");
87 private static final byte[] delete = Bytes.toBytes("delete");
88
89 private static HTableDescriptor table;
90
91 @BeforeClass
92 public static void setUpBeforeClass() throws Exception {
93 conf1 = HBaseConfiguration.create();
94 conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
95
96
97 conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
98 conf1.setInt("replication.source.size.capacity", 1024);
99 conf1.setLong("replication.source.sleepforretries", 100);
100 conf1.setInt("hbase.regionserver.maxlogs", 10);
101 conf1.setLong("hbase.master.logcleaner.ttl", 10);
102 conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
103 conf1.setBoolean("dfs.support.append", true);
104 conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
105 conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
106 "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
107
108 utility1 = new HBaseTestingUtility(conf1);
109 utility1.startMiniZKCluster();
110 MiniZooKeeperCluster miniZK = utility1.getZkCluster();
111 new ZooKeeperWatcher(conf1, "cluster1", null, true);
112
113 conf2 = new Configuration(conf1);
114 conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
115
116 conf3 = new Configuration(conf1);
117 conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
118
119 utility2 = new HBaseTestingUtility(conf2);
120 utility2.setZkCluster(miniZK);
121 new ZooKeeperWatcher(conf2, "cluster3", null, true);
122
123 utility3 = new HBaseTestingUtility(conf3);
124 utility3.setZkCluster(miniZK);
125 new ZooKeeperWatcher(conf3, "cluster3", null, true);
126
127 clusterKey1 = conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" +
128 conf1.get("hbase.zookeeper.property.clientPort")+":/1";
129
130 clusterKey2 = conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
131 conf2.get("hbase.zookeeper.property.clientPort")+":/2";
132
133 clusterKey3 = conf3.get(HConstants.ZOOKEEPER_QUORUM)+":" +
134 conf3.get("hbase.zookeeper.property.clientPort")+":/3";
135
136 table = new HTableDescriptor(tableName);
137 HColumnDescriptor fam = new HColumnDescriptor(famName);
138 fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
139 table.addFamily(fam);
140 fam = new HColumnDescriptor(noRepfamName);
141 table.addFamily(fam);
142 }
143
144 @Test(timeout=300000)
145 public void testCyclicReplication() throws Exception {
146 LOG.info("testCyclicReplication");
147 utility1.startMiniCluster();
148 utility2.startMiniCluster();
149 utility3.startMiniCluster();
150 ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
151 ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
152 ReplicationAdmin admin3 = new ReplicationAdmin(conf3);
153
154 new HBaseAdmin(conf1).createTable(table);
155 new HBaseAdmin(conf2).createTable(table);
156 new HBaseAdmin(conf3).createTable(table);
157 HTable htable1 = new HTable(conf1, tableName);
158 htable1.setWriteBufferSize(1024);
159 HTable htable2 = new HTable(conf2, tableName);
160 htable2.setWriteBufferSize(1024);
161 HTable htable3 = new HTable(conf3, tableName);
162 htable3.setWriteBufferSize(1024);
163
164 admin1.addPeer("1", clusterKey2);
165 admin2.addPeer("1", clusterKey3);
166 admin3.addPeer("1", clusterKey1);
167
168
169 putAndWait(row, famName, htable1, htable3);
170
171 check(row,famName,htable2);
172
173 putAndWait(row1, famName, htable2, htable1);
174 check(row,famName,htable3);
175 putAndWait(row2, famName, htable3, htable2);
176 check(row,famName,htable1);
177
178 deleteAndWait(row,htable1,htable3);
179 deleteAndWait(row1,htable2,htable1);
180 deleteAndWait(row2,htable3,htable2);
181
182 assertEquals("Puts were replicated back ", 3, getCount(htable1, put));
183 assertEquals("Puts were replicated back ", 3, getCount(htable2, put));
184 assertEquals("Puts were replicated back ", 3, getCount(htable3, put));
185 assertEquals("Deletes were replicated back ", 3, getCount(htable1, delete));
186 assertEquals("Deletes were replicated back ", 3, getCount(htable2, delete));
187 assertEquals("Deletes were replicated back ", 3, getCount(htable3, delete));
188 utility3.shutdownMiniCluster();
189 utility2.shutdownMiniCluster();
190 utility1.shutdownMiniCluster();
191 }
192
193
194
195
196
197
198
199 @Test(timeout=300000)
200 public void testSimplePutDelete() throws Exception {
201 LOG.info("testSimplePutDelete");
202 utility1.startMiniCluster();
203 utility2.startMiniCluster();
204
205 ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
206 ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
207
208 new HBaseAdmin(conf1).createTable(table);
209 new HBaseAdmin(conf2).createTable(table);
210 HTable htable1 = new HTable(conf1, tableName);
211 htable1.setWriteBufferSize(1024);
212 HTable htable2 = new HTable(conf2, tableName);
213 htable2.setWriteBufferSize(1024);
214
215
216 admin1.addPeer("1", clusterKey2);
217 admin2.addPeer("1", clusterKey1);
218
219
220
221 putAndWait(row, famName, htable1, htable2);
222 putAndWait(row1, famName, htable2, htable1);
223
224
225 assertEquals("Puts were replicated back ", 2, getCount(htable1, put));
226
227
228 deleteAndWait(row, htable1, htable2);
229
230
231 assertEquals("Puts were replicated back ", 2, getCount(htable2, put));
232
233 deleteAndWait(row1, htable2, htable1);
234
235 assertEquals("Deletes were replicated back ", 2, getCount(htable1, delete));
236 utility2.shutdownMiniCluster();
237 utility1.shutdownMiniCluster();
238 }
239
240 private int getCount(HTable t, byte[] type) throws IOException {
241 Get test = new Get(row);
242 test.setAttribute("count", new byte[]{});
243 Result res = t.get(test);
244 return Bytes.toInt(res.getValue(count, type));
245 }
246
247 private void deleteAndWait(byte[] row, HTable source, HTable target)
248 throws Exception {
249 Delete del = new Delete(row);
250 source.delete(del);
251
252 Get get = new Get(row);
253 for (int i = 0; i < NB_RETRIES; i++) {
254 if (i==NB_RETRIES-1) {
255 fail("Waited too much time for del replication");
256 }
257 Result res = target.get(get);
258 if (res.size() >= 1) {
259 LOG.info("Row not deleted");
260 Thread.sleep(SLEEP_TIME);
261 } else {
262 break;
263 }
264 }
265 }
266
267 private void check(byte[] row, byte[] fam, HTable t) throws IOException {
268 Get get = new Get(row);
269 Result res = t.get(get);
270 if (res.size() == 0) {
271 fail("Row is missing");
272 }
273 }
274
275 private void putAndWait(byte[] row, byte[] fam, HTable source, HTable target)
276 throws Exception {
277 Put put = new Put(row);
278 put.add(fam, row, row);
279 source.put(put);
280
281 Get get = new Get(row);
282 for (int i = 0; i < NB_RETRIES; i++) {
283 if (i==NB_RETRIES-1) {
284 fail("Waited too much time for put replication");
285 }
286 Result res = target.get(get);
287 if (res.size() == 0) {
288 LOG.info("Row not available");
289 Thread.sleep(SLEEP_TIME);
290 } else {
291 assertArrayEquals(res.value(), row);
292 break;
293 }
294 }
295 }
296
297
298
299
300
301
302 public static class CoprocessorCounter extends BaseRegionObserver {
303 private int nCount = 0;
304 private int nDelete = 0;
305
306 @Override
307 public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
308 final Put put, final WALEdit edit,
309 final boolean writeToWAL)
310 throws IOException {
311 nCount++;
312 }
313 @Override
314 public void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
315 final Delete delete, final WALEdit edit,
316 final boolean writeToWAL)
317 throws IOException {
318 nDelete++;
319 }
320 @Override
321 public void preGet(final ObserverContext<RegionCoprocessorEnvironment> c,
322 final Get get, final List<KeyValue> result) throws IOException {
323 if (get.getAttribute("count") != null) {
324 result.clear();
325
326 result.add(new KeyValue(count, count, delete, Bytes.toBytes(nDelete)));
327 result.add(new KeyValue(count, count, put, Bytes.toBytes(nCount)));
328 c.bypass();
329 }
330 }
331 }
332 }