1   /**
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.util;
21  
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  
25  import org.apache.hadoop.hbase.HBaseTestingUtility;
26  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
27  import org.apache.hadoop.fs.FSDataOutputStream;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.FileStatus;
30  import org.apache.hadoop.fs.Path;
31  import org.apache.hadoop.hdfs.MiniDFSCluster;
32  import org.junit.Test;
33  
34  /**
35   * Test {@link FSUtils}.
36   */
37  public class TestFSUtils {
38    @Test public void testIsHDFS() throws Exception {
39      HBaseTestingUtility htu = new HBaseTestingUtility();
40      htu.getConfiguration().setBoolean("dfs.support.append", false);
41      assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
42      htu.getConfiguration().setBoolean("dfs.support.append", true);
43      MiniDFSCluster cluster = null;
44      try {
45        cluster = htu.startMiniDFSCluster(1);
46        assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
47        assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
48      } finally {
49        if (cluster != null) cluster.shutdown();
50      }
51    }
52    
53    private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
54      throws Exception {
55      FSDataOutputStream out = fs.create(file);
56      byte [] data = new byte[dataSize];
57      out.write(data, 0, dataSize);
58      out.close();
59    }
60    
61    @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
62      HBaseTestingUtility htu = new HBaseTestingUtility();
63      final int DEFAULT_BLOCK_SIZE = 1024;
64      htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
65      MiniDFSCluster cluster = null;
66      Path testFile = null;
67      
68      try {
69        // set up a cluster with 3 nodes
70        String hosts[] = new String[] { "host1", "host2", "host3" };
71        cluster = htu.startMiniDFSCluster(hosts);
72        cluster.waitActive();
73        FileSystem fs = cluster.getFileSystem();
74  
75        // create a file with two blocks
76        testFile = new Path("/test1.txt");
77        WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
78        
79        // given the default replication factor is 3, the same as the number of
80        // datanodes; the locality index for each host should be 100%,
81        // or getWeight for each host should be the same as getUniqueBlocksWeights
82        FileStatus status = fs.getFileStatus(testFile);
83        HDFSBlocksDistribution blocksDistribution =
84          FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
85        long uniqueBlocksTotalWeight =
86          blocksDistribution.getUniqueBlocksTotalWeight();
87        for (String host : hosts) {
88          long weight = blocksDistribution.getWeight(host);
89          assertTrue(uniqueBlocksTotalWeight == weight);
90        }
91      } finally {
92        htu.shutdownMiniDFSCluster();
93      }
94  
95      
96      try {
97        // set up a cluster with 4 nodes
98        String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
99        cluster = htu.startMiniDFSCluster(hosts);
100       cluster.waitActive();
101       FileSystem fs = cluster.getFileSystem();
102 
103       // create a file with three blocks
104       testFile = new Path("/test2.txt");        
105       WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
106               
107       // given the default replication factor is 3, we will have total of 9
108       // replica of blocks; thus the host with the highest weight should have
109       // weight == 3 * DEFAULT_BLOCK_SIZE
110       FileStatus status = fs.getFileStatus(testFile);
111       HDFSBlocksDistribution blocksDistribution =
112         FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
113       long uniqueBlocksTotalWeight =
114         blocksDistribution.getUniqueBlocksTotalWeight();
115       
116       String tophost = blocksDistribution.getTopHosts().get(0);
117       long weight = blocksDistribution.getWeight(tophost);
118       assertTrue(uniqueBlocksTotalWeight == weight);
119       
120     } finally {
121       htu.shutdownMiniDFSCluster();
122     }
123 
124     
125     try {
126       // set up a cluster with 4 nodes
127       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
128       cluster = htu.startMiniDFSCluster(hosts);
129       cluster.waitActive();
130       FileSystem fs = cluster.getFileSystem();
131 
132       // create a file with one block
133       testFile = new Path("/test3.txt");        
134       WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
135       
136       // given the default replication factor is 3, we will have total of 3
137       // replica of blocks; thus there is one host without weight
138       FileStatus status = fs.getFileStatus(testFile);
139       HDFSBlocksDistribution blocksDistribution =
140         FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
141       assertTrue(blocksDistribution.getTopHosts().size() == 3);
142     } finally {
143       htu.shutdownMiniDFSCluster();
144     }
145     
146   }
147   
148 }