1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.net.URI;
27 import java.net.URISyntaxException;
28 import java.util.ArrayList;
29 import java.util.HashMap;
30 import java.util.List;
31 import java.util.Map;
32
33 import org.apache.commons.logging.Log;
34 import org.apache.commons.logging.LogFactory;
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.BlockLocation;
37 import org.apache.hadoop.fs.FSDataInputStream;
38 import org.apache.hadoop.fs.FSDataOutputStream;
39 import org.apache.hadoop.fs.FileStatus;
40 import org.apache.hadoop.fs.FileSystem;
41 import org.apache.hadoop.fs.Path;
42 import org.apache.hadoop.fs.PathFilter;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
45 import org.apache.hadoop.hbase.HRegionInfo;
46 import org.apache.hadoop.hbase.RemoteExceptionHandler;
47 import org.apache.hadoop.hbase.master.HMaster;
48 import org.apache.hadoop.hbase.regionserver.HRegion;
49 import org.apache.hadoop.hdfs.DistributedFileSystem;
50 import org.apache.hadoop.io.SequenceFile;
51 import org.apache.hadoop.util.ReflectionUtils;
52 import org.apache.hadoop.util.StringUtils;
53
54
55
56
57 public abstract class FSUtils {
58 private static final Log LOG = LogFactory.getLog(FSUtils.class);
59
60 protected FSUtils() {
61 super();
62 }
63
64 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
65 String scheme = fs.getUri().getScheme();
66 if (scheme == null) {
67 LOG.warn("Could not find scheme for uri " +
68 fs.getUri() + ", default to hdfs");
69 scheme = "hdfs";
70 }
71 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
72 scheme + ".impl", FSHDFSUtils.class);
73 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
74 return fsUtils;
75 }
76
77
78
79
80
81
82
83
84 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
85 throws IOException {
86 return fs.exists(dir) && fs.delete(dir, true);
87 }
88
89
90
91
92
93
94
95
96 public Path checkdir(final FileSystem fs, final Path dir) throws IOException {
97 if (!fs.exists(dir)) {
98 fs.mkdirs(dir);
99 }
100 return dir;
101 }
102
103
104
105
106
107
108
109
110 public static Path create(final FileSystem fs, final Path p)
111 throws IOException {
112 if (fs.exists(p)) {
113 throw new IOException("File already exists " + p.toString());
114 }
115 if (!fs.createNewFile(p)) {
116 throw new IOException("Failed create of " + p);
117 }
118 return p;
119 }
120
121
122
123
124
125
126
127 public static void checkFileSystemAvailable(final FileSystem fs)
128 throws IOException {
129 if (!(fs instanceof DistributedFileSystem)) {
130 return;
131 }
132 IOException exception = null;
133 DistributedFileSystem dfs = (DistributedFileSystem) fs;
134 try {
135 if (dfs.exists(new Path("/"))) {
136 return;
137 }
138 } catch (IOException e) {
139 exception = RemoteExceptionHandler.checkIOException(e);
140 }
141 try {
142 fs.close();
143 } catch (Exception e) {
144 LOG.error("file system close failed: ", e);
145 }
146 IOException io = new IOException("File system is not available");
147 io.initCause(exception);
148 throw io;
149 }
150
151
152
153
154
155
156 public static void checkDfsSafeMode(final Configuration conf)
157 throws IOException {
158 boolean isInSafeMode = false;
159 FileSystem fs = FileSystem.get(conf);
160 if (fs instanceof DistributedFileSystem) {
161 DistributedFileSystem dfs = (DistributedFileSystem)fs;
162
163 isInSafeMode = dfs.setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
164 }
165 if (isInSafeMode) {
166 throw new IOException("File system is in safemode, it can't be written now");
167 }
168 }
169
170
171
172
173
174
175
176
177
178 public static String getVersion(FileSystem fs, Path rootdir)
179 throws IOException {
180 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
181 String version = null;
182 if (fs.exists(versionFile)) {
183 FSDataInputStream s =
184 fs.open(versionFile);
185 try {
186 version = DataInputStream.readUTF(s);
187 } catch (EOFException eof) {
188 LOG.warn("Version file was empty, odd, will try to set it.");
189 } finally {
190 s.close();
191 }
192 }
193 return version;
194 }
195
196
197
198
199
200
201
202
203
204
205 public static void checkVersion(FileSystem fs, Path rootdir,
206 boolean message) throws IOException {
207 checkVersion(fs, rootdir, message, 0);
208 }
209
210
211
212
213
214
215
216
217
218
219
220 public static void checkVersion(FileSystem fs, Path rootdir,
221 boolean message, int wait) throws IOException {
222 String version = getVersion(fs, rootdir);
223
224 if (version == null) {
225 if (!rootRegionExists(fs, rootdir)) {
226
227
228 FSUtils.setVersion(fs, rootdir, wait);
229 return;
230 }
231 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0)
232 return;
233
234
235
236 String msg = "File system needs to be upgraded."
237 + " You have version " + version
238 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
239 + ". Run the '${HBASE_HOME}/bin/hbase migrate' script.";
240 if (message) {
241 System.out.println("WARNING! " + msg);
242 }
243 throw new FileSystemVersionException(msg);
244 }
245
246
247
248
249
250
251
252
253 public static void setVersion(FileSystem fs, Path rootdir)
254 throws IOException {
255 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0);
256 }
257
258
259
260
261
262
263
264
265
266 public static void setVersion(FileSystem fs, Path rootdir, int wait)
267 throws IOException {
268 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait);
269 }
270
271
272
273
274
275
276
277
278
279
280 public static void setVersion(FileSystem fs, Path rootdir, String version,
281 int wait) throws IOException {
282 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
283 while (true) {
284 try {
285 FSDataOutputStream s = fs.create(versionFile);
286 s.writeUTF(version);
287 LOG.debug("Created version file at " + rootdir.toString() +
288 " set its version at:" + version);
289 s.close();
290 return;
291 } catch (IOException e) {
292 if (wait > 0) {
293 LOG.warn("Unable to create version file at " + rootdir.toString() +
294 ", retrying: " + e.getMessage());
295 fs.delete(versionFile, false);
296 try {
297 Thread.sleep(wait);
298 } catch (InterruptedException ex) {
299
300 }
301 }
302 }
303 }
304 }
305
306
307
308
309
310
311
312
313
314 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
315 int wait) throws IOException {
316 while (true) {
317 try {
318 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
319 return fs.exists(filePath);
320 } catch (IOException ioe) {
321 if (wait > 0) {
322 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
323 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
324 try {
325 Thread.sleep(wait);
326 } catch (InterruptedException ie) {
327 Thread.interrupted();
328 break;
329 }
330 } else {
331 throw ioe;
332 }
333 }
334 }
335 return false;
336 }
337
338
339
340
341
342
343
344
345 public static String getClusterId(FileSystem fs, Path rootdir)
346 throws IOException {
347 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
348 String clusterId = null;
349 if (fs.exists(idPath)) {
350 FSDataInputStream in = fs.open(idPath);
351 try {
352 clusterId = in.readUTF();
353 } catch (EOFException eof) {
354 LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
355 } finally{
356 in.close();
357 }
358 } else {
359 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
360 }
361 return clusterId;
362 }
363
364
365
366
367
368
369
370
371
372
373 public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
374 int wait) throws IOException {
375 while (true) {
376 try {
377 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
378 FSDataOutputStream s = fs.create(filePath);
379 s.writeUTF(clusterId);
380 s.close();
381 if (LOG.isDebugEnabled()) {
382 LOG.debug("Created cluster ID file at " + filePath.toString() +
383 " with ID: " + clusterId);
384 }
385 return;
386 } catch (IOException ioe) {
387 if (wait > 0) {
388 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
389 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
390 try {
391 Thread.sleep(wait);
392 } catch (InterruptedException ie) {
393 Thread.interrupted();
394 break;
395 }
396 } else {
397 throw ioe;
398 }
399 }
400 }
401 }
402
403
404
405
406
407
408
409
410 public static Path validateRootPath(Path root) throws IOException {
411 try {
412 URI rootURI = new URI(root.toString());
413 String scheme = rootURI.getScheme();
414 if (scheme == null) {
415 throw new IOException("Root directory does not have a scheme");
416 }
417 return root;
418 } catch (URISyntaxException e) {
419 IOException io = new IOException("Root directory path is not a valid " +
420 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
421 io.initCause(e);
422 throw io;
423 }
424 }
425
426
427
428
429
430
431
432 public static void waitOnSafeMode(final Configuration conf,
433 final long wait)
434 throws IOException {
435 FileSystem fs = FileSystem.get(conf);
436 if (!(fs instanceof DistributedFileSystem)) return;
437 DistributedFileSystem dfs = (DistributedFileSystem)fs;
438
439 while (dfs.setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET)) {
440 LOG.info("Waiting for dfs to exit safe mode...");
441 try {
442 Thread.sleep(wait);
443 } catch (InterruptedException e) {
444
445 }
446 }
447 }
448
449
450
451
452
453
454
455
456
457
458
459 public static String getPath(Path p) {
460 return p.toUri().getPath();
461 }
462
463
464
465
466
467
468
469 public static Path getRootDir(final Configuration c) throws IOException {
470 Path p = new Path(c.get(HConstants.HBASE_DIR));
471 FileSystem fs = p.getFileSystem(c);
472 return p.makeQualified(fs);
473 }
474
475
476
477
478
479
480
481
482
483 public static boolean rootRegionExists(FileSystem fs, Path rootdir)
484 throws IOException {
485 Path rootRegionDir =
486 HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
487 return fs.exists(rootRegionDir);
488 }
489
490
491
492
493
494
495
496
497
498 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
499 final FileSystem fs, FileStatus status, long start, long length)
500 throws IOException {
501 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
502 BlockLocation [] blockLocations =
503 fs.getFileBlockLocations(status, start, length);
504 for(BlockLocation bl : blockLocations) {
505 String [] hosts = bl.getHosts();
506 long len = bl.getLength();
507 blocksDistribution.addHostsAndBlockWeight(hosts, len);
508 }
509
510 return blocksDistribution;
511 }
512
513
514
515
516
517
518
519
520
521
522
523
524 public static boolean isMajorCompacted(final FileSystem fs,
525 final Path hbaseRootDir)
526 throws IOException {
527
528 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
529 for (FileStatus tableDir : tableDirs) {
530
531
532
533
534 Path d = tableDir.getPath();
535 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
536 continue;
537 }
538 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
539 for (FileStatus regionDir : regionDirs) {
540 Path dd = regionDir.getPath();
541 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
542 continue;
543 }
544
545 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
546 for (FileStatus familyDir : familyDirs) {
547 Path family = familyDir.getPath();
548
549 FileStatus[] familyStatus = fs.listStatus(family);
550 if (familyStatus.length > 1) {
551 LOG.debug(family.toString() + " has " + familyStatus.length +
552 " files.");
553 return false;
554 }
555 }
556 }
557 }
558 return true;
559 }
560
561
562
563
564
565
566
567
568
569
570 public static int getTotalTableFragmentation(final HMaster master)
571 throws IOException {
572 Map<String, Integer> map = getTableFragmentation(master);
573 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
574 }
575
576
577
578
579
580
581
582
583
584
585 public static Map<String, Integer> getTableFragmentation(
586 final HMaster master)
587 throws IOException {
588 Path path = getRootDir(master.getConfiguration());
589
590 FileSystem fs = path.getFileSystem(master.getConfiguration());
591 return getTableFragmentation(fs, path);
592 }
593
594
595
596
597
598
599
600
601
602
603
604 public static Map<String, Integer> getTableFragmentation(
605 final FileSystem fs, final Path hbaseRootDir)
606 throws IOException {
607 Map<String, Integer> frags = new HashMap<String, Integer>();
608 int cfCountTotal = 0;
609 int cfFragTotal = 0;
610 DirFilter df = new DirFilter(fs);
611
612 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
613 for (FileStatus tableDir : tableDirs) {
614
615
616
617
618 Path d = tableDir.getPath();
619 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
620 continue;
621 }
622 int cfCount = 0;
623 int cfFrag = 0;
624 FileStatus[] regionDirs = fs.listStatus(d, df);
625 for (FileStatus regionDir : regionDirs) {
626 Path dd = regionDir.getPath();
627 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
628 continue;
629 }
630
631 FileStatus[] familyDirs = fs.listStatus(dd, df);
632 for (FileStatus familyDir : familyDirs) {
633 cfCount++;
634 cfCountTotal++;
635 Path family = familyDir.getPath();
636
637 FileStatus[] familyStatus = fs.listStatus(family);
638 if (familyStatus.length > 1) {
639 cfFrag++;
640 cfFragTotal++;
641 }
642 }
643 }
644
645 frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100));
646 }
647
648 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
649 return frags;
650 }
651
652
653
654
655
656
657
658
659 public static boolean isPre020FileLayout(final FileSystem fs,
660 final Path hbaseRootDir)
661 throws IOException {
662 Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
663 "70236052"), "info"), "mapfiles");
664 return fs.exists(mapfiles);
665 }
666
667
668
669
670
671
672
673
674
675
676
677
678 public static boolean isMajorCompactedPre020(final FileSystem fs,
679 final Path hbaseRootDir)
680 throws IOException {
681
682 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs));
683 for (FileStatus tableDir : tableDirs) {
684
685
686
687
688 Path d = tableDir.getPath();
689 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
690 continue;
691 }
692 FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
693 for (FileStatus regionDir : regionDirs) {
694 Path dd = regionDir.getPath();
695 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
696 continue;
697 }
698
699 FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
700 for (FileStatus familyDir : familyDirs) {
701 Path family = familyDir.getPath();
702 FileStatus[] infoAndMapfile = fs.listStatus(family);
703
704 if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
705 LOG.debug(family.toString() +
706 " has more than just info and mapfile: " + infoAndMapfile.length);
707 return false;
708 }
709
710 for (int ll = 0; ll < 2; ll++) {
711 if (infoAndMapfile[ll].getPath().getName().equals("info") ||
712 infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
713 continue;
714 LOG.debug("Unexpected directory name: " +
715 infoAndMapfile[ll].getPath());
716 return false;
717 }
718
719
720 FileStatus[] familyStatus =
721 fs.listStatus(new Path(family, "mapfiles"));
722 if (familyStatus.length > 1) {
723 LOG.debug(family.toString() + " has " + familyStatus.length +
724 " files.");
725 return false;
726 }
727 }
728 }
729 }
730 return true;
731 }
732
733
734
735
736 public static class DirFilter implements PathFilter {
737 private final FileSystem fs;
738
739 public DirFilter(final FileSystem fs) {
740 this.fs = fs;
741 }
742
743 public boolean accept(Path p) {
744 boolean isValid = false;
745 try {
746 if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) {
747 isValid = false;
748 } else {
749 isValid = this.fs.getFileStatus(p).isDir();
750 }
751 } catch (IOException e) {
752 e.printStackTrace();
753 }
754 return isValid;
755 }
756 }
757
758
759
760
761
762
763
764
765 public static boolean isAppendSupported(final Configuration conf) {
766 boolean append = conf.getBoolean("dfs.support.append", false);
767 if (append) {
768 try {
769
770
771
772 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
773 append = true;
774 } catch (SecurityException e) {
775 } catch (NoSuchMethodException e) {
776 append = false;
777 }
778 }
779 if (!append) {
780
781 try {
782 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
783 append = true;
784 } catch (NoSuchMethodException e) {
785 append = false;
786 }
787 }
788 return append;
789 }
790
791
792
793
794
795
796 public static boolean isHDFS(final Configuration conf) throws IOException {
797 FileSystem fs = FileSystem.get(conf);
798 String scheme = fs.getUri().getScheme();
799 return scheme.equalsIgnoreCase("hdfs");
800 }
801
802
803
804
805
806
807
808
809
810 public abstract void recoverFileLease(final FileSystem fs, final Path p,
811 Configuration conf) throws IOException;
812
813
814
815
816
817
818
819
820 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
821 throws IOException {
822
823 FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs));
824 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
825 for (FileStatus dir: dirs) {
826 Path p = dir.getPath();
827 String tableName = p.getName();
828 if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) {
829 tabledirs.add(p);
830 }
831 }
832 return tabledirs;
833 }
834
835 public static Path getTablePath(Path rootdir, byte [] tableName) {
836 return getTablePath(rootdir, Bytes.toString(tableName));
837 }
838
839 public static Path getTablePath(Path rootdir, final String tableName) {
840 return new Path(rootdir, tableName);
841 }
842
843
844
845
846
847
848 public static FileSystem getCurrentFileSystem(Configuration conf)
849 throws IOException {
850 return getRootDir(conf).getFileSystem(conf);
851 }
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866 public static Map<String, Path> getTableStoreFilePathMap(
867 final FileSystem fs, final Path hbaseRootDir)
868 throws IOException {
869 Map<String, Path> map = new HashMap<String, Path>();
870
871
872
873
874 DirFilter df = new DirFilter(fs);
875
876 FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
877 for (FileStatus tableDir : tableDirs) {
878
879
880
881 Path d = tableDir.getPath();
882 if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
883 continue;
884 }
885 FileStatus[] regionDirs = fs.listStatus(d, df);
886 for (FileStatus regionDir : regionDirs) {
887 Path dd = regionDir.getPath();
888 if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
889 continue;
890 }
891
892 FileStatus[] familyDirs = fs.listStatus(dd, df);
893 for (FileStatus familyDir : familyDirs) {
894 Path family = familyDir.getPath();
895
896
897 FileStatus[] familyStatus = fs.listStatus(family);
898 for (FileStatus sfStatus : familyStatus) {
899 Path sf = sfStatus.getPath();
900 map.put( sf.getName(), sf);
901 }
902
903 }
904 }
905 }
906 return map;
907 }
908
909
910
911
912
913
914
915
916
917
918 public static FileStatus [] listStatus(final FileSystem fs,
919 final Path dir, final PathFilter filter) throws IOException {
920 FileStatus [] status = null;
921 try {
922 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
923 } catch (FileNotFoundException fnfe) {
924
925 LOG.info(dir + " doesn't exist");
926 }
927 if (status == null || status.length < 1) return null;
928 return status;
929 }
930 }