View Javadoc

1   /*
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import java.io.DataInputStream;
23  import java.io.DataOutput;
24  import java.io.IOException;
25  import java.io.OutputStream;
26  import java.nio.ByteBuffer;
27  
28  import org.apache.hadoop.hbase.util.Bytes;
29  
30  /**
31   * Various types of {@link HFile} blocks. Ordinal values of these enum constants
32   * must not be relied upon. The values in the enum appear in the order they
33   * appear in a version 2 {@link HFile}.
34   */
35  public enum BlockType {
36  
37    // Scanned block section
38  
39    /** Data block, both versions */
40    DATA("DATABLK*"),
41  
42    /** Version 2 leaf index block. Appears in the data block section */
43    LEAF_INDEX("IDXLEAF2"),
44  
45    /** Bloom filter block, version 2 */
46    BLOOM_CHUNK("BLMFBLK2"),
47  
48    // Non-scanned block section
49  
50    /** Meta blocks */
51    META("METABLKc"),
52  
53    /** Intermediate-level version 2 index in the non-data block section */
54    INTERMEDIATE_INDEX("IDXINTE2"),
55  
56    // Load-on-open section.
57  
58    /** Root index block, also used for the single-level meta index, version 2 */
59    ROOT_INDEX("IDXROOT2"),
60  
61    /** File info, version 2 */
62    FILE_INFO("FILEINF2"),
63  
64    /** Bloom filter metadata, version 2 */
65    BLOOM_META("BLMFMET2"),
66  
67    // Trailer
68  
69    /** Fixed file trailer, both versions (always just a magic string) */
70    TRAILER("TRABLK\"$"),
71  
72    // Legacy blocks
73  
74    /** Block index magic string in version 1 */
75    INDEX_V1("IDXBLK)+");
76  
77    public static final int MAGIC_LENGTH = 8;
78  
79    private final byte[] magic;
80  
81    private BlockType(String magicStr) {
82      magic = Bytes.toBytes(magicStr);
83      assert magic.length == MAGIC_LENGTH;
84    }
85  
86    public void writeToStream(OutputStream out) throws IOException {
87      out.write(magic);
88    }
89  
90    public void write(DataOutput out) throws IOException {
91      out.write(magic);
92    }
93  
94    public void write(ByteBuffer buf) {
95      buf.put(magic);
96    }
97  
98    public static BlockType parse(byte[] buf, int offset, int length)
99        throws IOException {
100     if (length != MAGIC_LENGTH) {
101       throw new IOException("Magic record of invalid length: "
102           + Bytes.toStringBinary(buf, offset, length));
103     }
104 
105     for (BlockType blockType : values())
106       if (Bytes.compareTo(blockType.magic, 0, MAGIC_LENGTH, buf, offset,
107           MAGIC_LENGTH) == 0)
108         return blockType;
109 
110     throw new IOException("Invalid HFile block magic: "
111         + Bytes.toStringBinary(buf, offset, MAGIC_LENGTH));
112   }
113 
114   public static BlockType read(DataInputStream in) throws IOException {
115     byte[] buf = new byte[MAGIC_LENGTH];
116     in.readFully(buf);
117     return parse(buf, 0, buf.length);
118   }
119 
120   public static BlockType read(ByteBuffer buf) throws IOException {
121     BlockType blockType = parse(buf.array(),
122         buf.arrayOffset() + buf.position(),
123         Math.min(buf.limit() - buf.position(), MAGIC_LENGTH));
124 
125     // If we got here, we have read exactly MAGIC_LENGTH bytes.
126     buf.position(buf.position() + MAGIC_LENGTH);
127     return blockType;
128   }
129 
130   /**
131    * Put the magic record out to the specified byte array position.
132    *
133    * @param bytes the byte array
134    * @param offset position in the array
135    * @return incremented offset
136    */
137   public int put(byte[] bytes, int offset) {
138     System.arraycopy(magic, 0, bytes, offset, MAGIC_LENGTH);
139     return offset + MAGIC_LENGTH;
140   }
141 
142   /**
143    * Reads a magic record of the length {@link #MAGIC_LENGTH} from the given
144    * stream and expects it to match this block type.
145    */
146   public void readAndCheck(DataInputStream in) throws IOException {
147     byte[] buf = new byte[MAGIC_LENGTH];
148     in.readFully(buf);
149     if (Bytes.compareTo(buf, magic) != 0) {
150       throw new IOException("Invalid magic: expected "
151           + Bytes.toStringBinary(magic) + ", got " + Bytes.toStringBinary(buf));
152     }
153   }
154 
155   /**
156    * Reads a magic record of the length {@link #MAGIC_LENGTH} from the given
157    * byte buffer and expects it to match this block type.
158    */
159   public void readAndCheck(ByteBuffer in) throws IOException {
160     byte[] buf = new byte[MAGIC_LENGTH];
161     in.get(buf);
162     if (Bytes.compareTo(buf, magic) != 0) {
163       throw new IOException("Invalid magic: expected "
164           + Bytes.toStringBinary(magic) + ", got " + Bytes.toStringBinary(buf));
165     }
166   }
167 
168 }