1 /**
2 * Copyright 2010 The Apache Software Foundation
3 *
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 package org.apache.hadoop.hbase.mapred;
21
22 import java.io.IOException;
23
24 import org.apache.hadoop.hbase.client.HTable;
25 import org.apache.hadoop.hbase.client.Result;
26 import org.apache.hadoop.hbase.filter.Filter;
27 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
28 import org.apache.hadoop.mapred.RecordReader;
29
30
31 /**
32 * Iterate over an HBase table data, return (Text, RowResult) pairs
33 */
34 public class TableRecordReader
35 implements RecordReader<ImmutableBytesWritable, Result> {
36
37 private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
38
39 /**
40 * Restart from survivable exceptions by creating a new scanner.
41 *
42 * @param firstRow
43 * @throws IOException
44 */
45 public void restart(byte[] firstRow) throws IOException {
46 this.recordReaderImpl.restart(firstRow);
47 }
48
49 /**
50 * Build the scanner. Not done in constructor to allow for extension.
51 *
52 * @throws IOException
53 */
54 public void init() throws IOException {
55 this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow());
56 }
57
58 /**
59 * @param htable the {@link HTable} to scan.
60 */
61 public void setHTable(HTable htable) {
62 this.recordReaderImpl.setHTable(htable);
63 }
64
65 /**
66 * @param inputColumns the columns to be placed in {@link Result}.
67 */
68 public void setInputColumns(final byte [][] inputColumns) {
69 this.recordReaderImpl.setInputColumns(inputColumns);
70 }
71
72 /**
73 * @param startRow the first row in the split
74 */
75 public void setStartRow(final byte [] startRow) {
76 this.recordReaderImpl.setStartRow(startRow);
77 }
78
79 /**
80 *
81 * @param endRow the last row in the split
82 */
83 public void setEndRow(final byte [] endRow) {
84 this.recordReaderImpl.setEndRow(endRow);
85 }
86
87 /**
88 * @param rowFilter the {@link Filter} to be used.
89 */
90 public void setRowFilter(Filter rowFilter) {
91 this.recordReaderImpl.setRowFilter(rowFilter);
92 }
93
94 public void close() {
95 this.recordReaderImpl.close();
96 }
97
98 /**
99 * @return ImmutableBytesWritable
100 *
101 * @see org.apache.hadoop.mapred.RecordReader#createKey()
102 */
103 public ImmutableBytesWritable createKey() {
104 return this.recordReaderImpl.createKey();
105 }
106
107 /**
108 * @return RowResult
109 *
110 * @see org.apache.hadoop.mapred.RecordReader#createValue()
111 */
112 public Result createValue() {
113 return this.recordReaderImpl.createValue();
114 }
115
116 public long getPos() {
117
118 // This should be the ordinal tuple in the range;
119 // not clear how to calculate...
120 return this.recordReaderImpl.getPos();
121 }
122
123 public float getProgress() {
124 // Depends on the total number of tuples and getPos
125 return this.recordReaderImpl.getPos();
126 }
127
128 /**
129 * @param key HStoreKey as input key.
130 * @param value MapWritable as input value
131 * @return true if there was more data
132 * @throws IOException
133 */
134 public boolean next(ImmutableBytesWritable key, Result value)
135 throws IOException {
136 return this.recordReaderImpl.next(key, value);
137 }
138 }