1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertTrue;
24
25 import org.apache.hadoop.hbase.HBaseTestingUtility;
26 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
27 import org.apache.hadoop.fs.FSDataOutputStream;
28 import org.apache.hadoop.fs.FileSystem;
29 import org.apache.hadoop.fs.FileStatus;
30 import org.apache.hadoop.fs.Path;
31 import org.apache.hadoop.hdfs.MiniDFSCluster;
32 import org.junit.Test;
33
34
35
36
37 public class TestFSUtils {
38 @Test public void testIsHDFS() throws Exception {
39 HBaseTestingUtility htu = new HBaseTestingUtility();
40 htu.getConfiguration().setBoolean("dfs.support.append", false);
41 assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
42 htu.getConfiguration().setBoolean("dfs.support.append", true);
43 MiniDFSCluster cluster = null;
44 try {
45 cluster = htu.startMiniDFSCluster(1);
46 assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
47 assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
48 } finally {
49 if (cluster != null) cluster.shutdown();
50 }
51 }
52
53 private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
54 throws Exception {
55 FSDataOutputStream out = fs.create(file);
56 byte [] data = new byte[dataSize];
57 out.write(data, 0, dataSize);
58 out.close();
59 }
60
61 @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
62 HBaseTestingUtility htu = new HBaseTestingUtility();
63 final int DEFAULT_BLOCK_SIZE = 1024;
64 htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
65 MiniDFSCluster cluster = null;
66 Path testFile = null;
67
68 try {
69
70 String hosts[] = new String[] { "host1", "host2", "host3" };
71 cluster = htu.startMiniDFSCluster(hosts);
72 cluster.waitActive();
73 FileSystem fs = cluster.getFileSystem();
74
75
76 testFile = new Path("/test1.txt");
77 WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
78
79
80
81
82 FileStatus status = fs.getFileStatus(testFile);
83 HDFSBlocksDistribution blocksDistribution =
84 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
85 long uniqueBlocksTotalWeight =
86 blocksDistribution.getUniqueBlocksTotalWeight();
87 for (String host : hosts) {
88 long weight = blocksDistribution.getWeight(host);
89 assertTrue(uniqueBlocksTotalWeight == weight);
90 }
91 } finally {
92 htu.shutdownMiniDFSCluster();
93 }
94
95
96 try {
97
98 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
99 cluster = htu.startMiniDFSCluster(hosts);
100 cluster.waitActive();
101 FileSystem fs = cluster.getFileSystem();
102
103
104 testFile = new Path("/test2.txt");
105 WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
106
107
108
109
110 FileStatus status = fs.getFileStatus(testFile);
111 HDFSBlocksDistribution blocksDistribution =
112 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
113 long uniqueBlocksTotalWeight =
114 blocksDistribution.getUniqueBlocksTotalWeight();
115
116 String tophost = blocksDistribution.getTopHosts().get(0);
117 long weight = blocksDistribution.getWeight(tophost);
118 assertTrue(uniqueBlocksTotalWeight == weight);
119
120 } finally {
121 htu.shutdownMiniDFSCluster();
122 }
123
124
125 try {
126
127 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
128 cluster = htu.startMiniDFSCluster(hosts);
129 cluster.waitActive();
130 FileSystem fs = cluster.getFileSystem();
131
132
133 testFile = new Path("/test3.txt");
134 WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
135
136
137
138 FileStatus status = fs.getFileStatus(testFile);
139 HDFSBlocksDistribution blocksDistribution =
140 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
141 assertTrue(blocksDistribution.getTopHosts().size() == 3);
142 } finally {
143 htu.shutdownMiniDFSCluster();
144 }
145
146 }
147
148 }