The following document contains the results of RAT (Release Audit Tool).
***************************************************** Summary ------- Notes: 5 Binaries: 8 Archives: 0 Standards: 751 Apache Licensed: 628 Generated Documents: 0 JavaDocs are generated and so license header is optional Generated files do not required license headers 123 Unknown Licenses ******************************* Unapproved licenses: bin/local-regionservers.sh bin/set_meta_block_caching.rb bin/local-master-backup.sh src/examples/mapreduce/index-builder-setup.rb src/main/java/org/apache/hadoop/hbase/executor/HBaseExecutorService.java src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionEventData.java src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/MasterCloseRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java src/main/java/org/apache/hadoop/hbase/regionserver/RSZookeeperUpdater.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/LogEntryVisitor.java src/main/resources/hbase-webapps/master/zk.jsp src/main/resources/hbase-webapps/master/table.jsp src/main/resources/hbase-webapps/master/index.html src/main/resources/hbase-webapps/master/master.jsp src/main/resources/hbase-webapps/regionserver/regionserver.jsp src/main/resources/hbase-webapps/regionserver/index.html src/main/resources/hbase-webapps/static/hbase.css src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html src/docbkx/sample_article.xml src/docbkx/book.xml src/site/site.vm src/site/site.xml src/assembly/bin.xml src/assembly/src.xml src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java src/test/ruby/test_helper.rb src/test/resources/log4j.properties src/test/resources/mapred-queues.xml .git/config .git/info/exclude .git/hooks/commit-msg.sample .git/hooks/applypatch-msg.sample .git/hooks/post-update.sample .git/hooks/pre-commit.sample .git/hooks/pre-rebase.sample .git/hooks/pre-applypatch.sample .git/hooks/update.sample .git/hooks/post-commit.sample .git/hooks/prepare-commit-msg.sample .git/hooks/post-receive.sample .git/description .git/HEAD CHANGES.txt cloudera/do-release-build cloudera/apply-patches cloudera/install_hbase.sh cloudera/patches/0019-CLOUDERA-BUILD.-Fix-copy-of-bin-to-be-cp-a.patch cloudera/patches/0021-HBASE-3008-Memstore.updateColumnValue-passes-wrong-f.patch cloudera/patches/0003-CLOUDERA-BUILD.-Switch-to-CDH3b3-snapshot-in-todd-s-.patch cloudera/patches/0018-CLOUDERA-BUILD.-Build-site-as-part-of-release-build.patch cloudera/patches/0008-CLOUDERA-BUILD.-hbase-config.sh-should-set-HBASE_PID.patch cloudera/patches/0005-Re-enable-log-split-test.patch cloudera/patches/0016-CLOUDERA-BUILD.-HBase-running-on-secure-hadoop-tempo.patch cloudera/patches/0020-Fix-src-assembly-to-make-java-libs-644-and-not-inclu.patch cloudera/patches/0014-HBASE-3000.-Add-hbase-classpath-command.patch cloudera/patches/0015-HBASE-3001.-TableMapReduceUtil-should-always-add-dep.patch cloudera/patches/0025-HBASE-3096.-TestCompaction-timing-out.patch cloudera/patches/0006-HBASE-2773.-Check-for-null-values-in-meta-in-test-ut.patch cloudera/patches/0028-CLOUDERA-BUILD.-Fix-versionless-jar-naming-symlinks-.patch cloudera/patches/0002-CLOUDERA-BUILD.-Add-build-infrastructure.patch cloudera/patches/0017-HBASE-2782.-QoS-for-META-table-access.patch cloudera/patches/0007-CLOUDERA-BUILD.-Include-cloudera-dir-in-src-assembly.patch cloudera/patches/0023-CLOUDERA-BUILD.-cloudera-directory-should-get-instal.patch cloudera/patches/0010-HBASE-2467.-Concurrent-flushers-in-HLog-sync-using-H.patch cloudera/patches/0001-Updating-the-site-for-0.89.20100924.patch cloudera/patches/0009-CLOUDERA-BUILD.-rsync-all-of-lib-into-target-directo.patch cloudera/patches/0022-CLOUDERA-BUILD.-Change-wrapper-scripts-to-not-be-dep.patch cloudera/patches/0026-CLOUDERA-BUILD.-Update-hadoop-version.patch cloudera/patches/0027-HBASE-3101.-bin-assembly-should-include-tests-and-so.patch cloudera/patches/0011-SequenceFileLogWriter-doesn-t-need-to-actually-call-.patch cloudera/CHANGES.cloudera.txt cloudera/build.properties cloudera/README.cloudera conf/log4j.properties conf/regionservers conf/hadoop-metrics.properties ******************************* Archives: ***************************************************** Files with Apache License headers will be marked AL Binary files (which do not require AL headers) will be marked B Compressed archives will be marked A Notices, licenses etc will be marked N AL bin/replication/add_peer.rb AL bin/replication/copy_tables_desc.rb AL bin/rename_table.rb AL bin/add_table.rb AL bin/start-hbase.sh AL bin/hbase-daemon.sh AL bin/regionservers.sh AL bin/check_meta.rb AL bin/stop-hbase.sh AL bin/hirb.rb AL bin/hbase-config.sh !????? bin/local-regionservers.sh AL bin/hbase-daemons.sh !????? bin/set_meta_block_caching.rb AL bin/rolling-restart.sh !????? bin/local-master-backup.sh AL bin/zookeepers.sh AL bin/copy_table.rb AL bin/loadtable.rb AL bin/hbase AL bin/master-backup.sh AL src/saveVersion.sh AL src/examples/mapreduce/org/apache/hadoop/hbase/mapreduce/SampleUploader.java AL src/examples/mapreduce/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java !????? src/examples/mapreduce/index-builder-setup.rb AL src/examples/thrift/DemoClient.cpp AL src/examples/thrift/DemoClient.py AL src/examples/thrift/DemoClient.rb AL src/examples/thrift/DemoClient.java N src/examples/thrift/README.txt AL src/examples/thrift/DemoClient.php AL src/examples/thrift/Makefile N src/examples/README.txt AL src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java AL src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java AL src/main/java/org/apache/hadoop/hbase/ClusterStatus.java !????? src/main/java/org/apache/hadoop/hbase/executor/HBaseExecutorService.java !????? src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionEventData.java AL src/main/java/org/apache/hadoop/hbase/executor/NamedThreadFactory.java !????? src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java AL src/main/java/org/apache/hadoop/hbase/ColumnNameParseException.java AL src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceMetrics.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatistics.java AL src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkMetrics.java AL src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java AL src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java AL src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java AL src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/WritableByteArrayComparable.java AL src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java AL src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java AL src/main/java/org/apache/hadoop/hbase/filter/FilterList.java AL src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java AL src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java AL src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java !????? src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/Filter.java AL src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java AL src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/package-info.java AL src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java AL src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java AL src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java AL src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java AL src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java AL src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java AL src/main/java/org/apache/hadoop/hbase/Leases.java AL src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java AL src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java AL src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java AL src/main/java/org/apache/hadoop/hbase/LeaseListener.java AL src/main/java/org/apache/hadoop/hbase/avro/package.html AL src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java !????? src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java AL src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java AL src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java !????? src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr AL src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java AL src/main/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java AL src/main/java/org/apache/hadoop/hbase/HMsg.java AL src/main/java/org/apache/hadoop/hbase/master/HMaster.java AL src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java AL src/main/java/org/apache/hadoop/hbase/master/InvalidColumnNameException.java AL src/main/java/org/apache/hadoop/hbase/master/MetaScanner.java AL src/main/java/org/apache/hadoop/hbase/master/TableDelete.java AL src/main/java/org/apache/hadoop/hbase/master/LogsCleaner.java AL src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java AL src/main/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java AL src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java AL src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java AL src/main/java/org/apache/hadoop/hbase/master/RootScanner.java AL src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java AL src/main/java/org/apache/hadoop/hbase/master/LogCleanerDelegate.java AL src/main/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java AL src/main/java/org/apache/hadoop/hbase/master/MetaRegion.java AL src/main/java/org/apache/hadoop/hbase/master/AddColumn.java AL src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationListener.java AL src/main/java/org/apache/hadoop/hbase/master/RegionManager.java AL src/main/java/org/apache/hadoop/hbase/master/NotAllMetaRegionsOnlineException.java AL src/main/java/org/apache/hadoop/hbase/master/TableOperation.java AL src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java AL src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java !????? src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java !????? src/main/java/org/apache/hadoop/hbase/master/handler/MasterCloseRegionHandler.java AL src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java AL src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java AL src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java AL src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java !????? src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java AL src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java AL src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java AL src/main/java/org/apache/hadoop/hbase/master/ModifyColumn.java AL src/main/java/org/apache/hadoop/hbase/master/RegionServerOperation.java AL src/main/java/org/apache/hadoop/hbase/master/ServerManager.java AL src/main/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java AL src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java AL src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java AL src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java AL src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java AL src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java AL src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java AL src/main/java/org/apache/hadoop/hbase/client/Result.java AL src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java AL src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java AL src/main/java/org/apache/hadoop/hbase/client/ServerConnectionManager.java AL src/main/java/org/apache/hadoop/hbase/client/MultiPut.java AL src/main/java/org/apache/hadoop/hbase/client/HConnection.java AL src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java AL src/main/java/org/apache/hadoop/hbase/client/Put.java AL src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java AL src/main/java/org/apache/hadoop/hbase/client/Delete.java AL src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java AL src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java AL src/main/java/org/apache/hadoop/hbase/client/Get.java AL src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java AL src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java AL src/main/java/org/apache/hadoop/hbase/client/Scan.java AL src/main/java/org/apache/hadoop/hbase/client/RowLock.java AL src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java AL src/main/java/org/apache/hadoop/hbase/client/HTable.java AL src/main/java/org/apache/hadoop/hbase/client/HTablePool.java AL src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java AL src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java AL src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java AL src/main/java/org/apache/hadoop/hbase/client/package-info.java AL src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java AL src/main/java/org/apache/hadoop/hbase/client/Row.java AL src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java AL src/main/java/org/apache/hadoop/hbase/client/ServerConnection.java AL src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java AL src/main/java/org/apache/hadoop/hbase/io/Reference.java AL src/main/java/org/apache/hadoop/hbase/io/HeapSize.java AL src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java AL src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java AL src/main/java/org/apache/hadoop/hbase/io/TimeRange.java AL src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java AL src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java AL src/main/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/BoundedRangeFileInputStream.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java AL src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java AL src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java AL src/main/java/org/apache/hadoop/hbase/TableExistsException.java AL src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java AL src/main/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java AL src/main/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java AL src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java AL src/main/java/org/apache/hadoop/hbase/RegionException.java AL src/main/java/org/apache/hadoop/hbase/HMerge.java AL src/main/java/org/apache/hadoop/hbase/HRegionInfo.java AL src/main/java/org/apache/hadoop/hbase/KeyValue.java AL src/main/java/org/apache/hadoop/hbase/WritableComparator.java AL src/main/java/org/apache/hadoop/hbase/MiniZooKeeperCluster.java AL src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java AL src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java AL src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java AL src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java AL src/main/java/org/apache/hadoop/hbase/HConstants.java AL src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java AL src/main/java/org/apache/hadoop/hbase/Chore.java AL src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java AL src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java AL src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java AL src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java AL src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java AL src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java AL src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java AL src/main/java/org/apache/hadoop/hbase/rest/RowResource.java AL src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java AL src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java AL src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java AL src/main/java/org/apache/hadoop/hbase/rest/client/Client.java AL src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java AL src/main/java/org/apache/hadoop/hbase/rest/client/Response.java AL src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java AL src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java AL src/main/java/org/apache/hadoop/hbase/rest/TableResource.java AL src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java AL src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTMetrics.java AL src/main/java/org/apache/hadoop/hbase/rest/metrics/RESTStatistics.java AL src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java AL src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java AL src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java AL src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java AL src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java AL src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java AL src/main/java/org/apache/hadoop/hbase/rest/RootResource.java AL src/main/java/org/apache/hadoop/hbase/rest/Constants.java AL src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java AL src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java AL src/main/java/org/apache/hadoop/hbase/rest/Main.java AL src/main/java/org/apache/hadoop/hbase/rest/ResourceConfig.java AL src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java AL src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java AL src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java AL src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java AL src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java !????? src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java AL src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java AL src/main/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java !????? src/main/java/org/apache/hadoop/hbase/regionserver/RSZookeeperUpdater.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java AL src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java AL src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java AL src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java AL src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java AL src/main/java/org/apache/hadoop/hbase/regionserver/Store.java AL src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java AL src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java AL src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java AL src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java AL src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java AL src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java AL src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java AL src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java AL src/main/java/org/apache/hadoop/hbase/regionserver/Stoppable.java AL src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java AL src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java AL src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java AL src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java AL src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java AL src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java AL src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java AL src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java AL src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java AL src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/LogActionsListener.java !????? src/main/java/org/apache/hadoop/hbase/regionserver/wal/LogEntryVisitor.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java AL src/main/java/org/apache/hadoop/hbase/regionserver/wal/LogRollListener.java AL src/main/java/org/apache/hadoop/hbase/LeaseException.java AL src/main/java/org/apache/hadoop/hbase/HServerInfo.java AL src/main/java/org/apache/hadoop/hbase/HBaseConfTool.java AL src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java AL src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java AL src/main/java/org/apache/hadoop/hbase/mapred/Driver.java AL src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java AL src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java AL src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java AL src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java AL src/main/java/org/apache/hadoop/hbase/mapred/package-info.java AL src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java AL src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java AL src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java AL src/main/java/org/apache/hadoop/hbase/util/Merge.java AL src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java AL src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java AL src/main/java/org/apache/hadoop/hbase/util/Bytes.java AL src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java AL src/main/java/org/apache/hadoop/hbase/util/Pair.java AL src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java AL src/main/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java AL src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java AL src/main/java/org/apache/hadoop/hbase/util/InfoServer.java AL src/main/java/org/apache/hadoop/hbase/util/DynamicByteBloomFilter.java AL src/main/java/org/apache/hadoop/hbase/util/FSUtils.java AL src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java AL src/main/java/org/apache/hadoop/hbase/util/Writables.java AL src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java AL src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java AL src/main/java/org/apache/hadoop/hbase/util/Keying.java AL src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java AL src/main/java/org/apache/hadoop/hbase/util/Base64.java AL src/main/java/org/apache/hadoop/hbase/util/Strings.java AL src/main/java/org/apache/hadoop/hbase/util/Hash.java AL src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java AL src/main/java/org/apache/hadoop/hbase/util/ClassSize.java AL src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java AL src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java AL src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java AL src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java AL src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java AL src/main/java/org/apache/hadoop/hbase/util/Threads.java AL src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java AL src/main/java/org/apache/hadoop/hbase/util/Sleeper.java AL src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java AL src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java AL src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java AL src/main/java/org/apache/hadoop/hbase/HServerAddress.java AL src/main/java/org/apache/hadoop/hbase/HServerLoad.java AL src/main/java/org/apache/hadoop/hbase/HRegionLocation.java AL src/main/ruby/hbase.rb AL src/main/ruby/shell.rb AL src/main/ruby/irb/hirb.rb AL src/main/ruby/shell/commands/major_compact.rb AL src/main/ruby/shell/commands/version.rb AL src/main/ruby/shell/commands/truncate.rb AL src/main/ruby/shell/commands/status.rb AL src/main/ruby/shell/commands/enable_region.rb AL src/main/ruby/shell/commands/count.rb AL src/main/ruby/shell/commands/zk.rb AL src/main/ruby/shell/commands/create.rb AL src/main/ruby/shell/commands/incr.rb AL src/main/ruby/shell/commands/split.rb AL src/main/ruby/shell/commands/enable.rb AL src/main/ruby/shell/commands/get_counter.rb AL src/main/ruby/shell/commands/drop.rb AL src/main/ruby/shell/commands/describe.rb AL src/main/ruby/shell/commands/delete.rb AL src/main/ruby/shell/commands/shutdown.rb AL src/main/ruby/shell/commands/deleteall.rb AL src/main/ruby/shell/commands/exists.rb AL src/main/ruby/shell/commands/scan.rb AL src/main/ruby/shell/commands/compact.rb AL src/main/ruby/shell/commands/disable.rb AL src/main/ruby/shell/commands/close_region.rb AL src/main/ruby/shell/commands/list.rb AL src/main/ruby/shell/commands/put.rb AL src/main/ruby/shell/commands/disable_region.rb AL src/main/ruby/shell/commands/flush.rb AL src/main/ruby/shell/commands/get.rb AL src/main/ruby/shell/commands/zk_dump.rb AL src/main/ruby/shell/commands/alter.rb AL src/main/ruby/shell/commands.rb AL src/main/ruby/shell/formatter.rb AL src/main/ruby/hbase/hbase.rb AL src/main/ruby/hbase/table.rb AL src/main/ruby/hbase/admin.rb !????? src/main/resources/hbase-webapps/master/zk.jsp !????? src/main/resources/hbase-webapps/master/table.jsp !????? src/main/resources/hbase-webapps/master/index.html !????? src/main/resources/hbase-webapps/master/master.jsp !????? src/main/resources/hbase-webapps/regionserver/regionserver.jsp !????? src/main/resources/hbase-webapps/regionserver/index.html !????? src/main/resources/hbase-webapps/static/hbase.css B src/main/resources/hbase-webapps/static/hbase_logo_med.gif AL src/main/resources/hbase-default.xml !????? src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties AL src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift !????? src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableSchemaMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellSetMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableListMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ScannerMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/ColumnSchemaMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/VersionMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/TableInfoMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/CellMessage.proto AL src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto !????? src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties AL src/main/javadoc/org/apache/hadoop/hbase/replication/package.html AL src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html !????? src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html AL src/main/javadoc/org/apache/hadoop/hbase/io/hfile/package.html AL src/main/javadoc/org/apache/hadoop/hbase/ipc/package.html AL src/main/javadoc/overview.html !????? src/docbkx/sample_article.xml !????? src/docbkx/book.xml !????? src/site/site.vm AL src/site/fml/faq.fml !????? src/site/site.xml AL src/site/xdoc/old_news.xml AL src/site/xdoc/index.xml AL src/site/xdoc/metrics.xml AL src/site/xdoc/bulk-loads.xml AL src/site/xdoc/cygwin.xml AL src/site/xdoc/replication.xml AL src/site/xdoc/acid-semantics.xml AL src/site/xdoc/pseudo-distributed.xml B src/site/resources/images/asf_logo_wide.png B src/site/resources/images/replication_overview.png B src/site/resources/images/architecture.gif B src/site/resources/images/hadoop-logo.jpg B src/site/resources/images/hbase_logo_med.gif B src/site/resources/images/favicon.ico B src/site/resources/images/hbase_small.gif AL src/site/resources/css/site.css !????? src/assembly/bin.xml !????? src/assembly/src.xml AL src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java AL src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java AL src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java AL src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java AL src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java AL src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java AL src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java AL src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java AL src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java AL src/test/java/org/apache/hadoop/hbase/TestMultiClusters.java AL src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java AL src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java AL src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java !????? src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java AL src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java AL src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java AL src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java AL src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java AL src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java AL src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java AL src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java AL src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java AL src/test/java/org/apache/hadoop/hbase/TestHMsg.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java AL src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java AL src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java AL src/test/java/org/apache/hadoop/hbase/TestCompare.java AL src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java AL src/test/java/org/apache/hadoop/hbase/TestEmptyMetaInfo.java AL src/test/java/org/apache/hadoop/hbase/master/TestMaster.java AL src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java AL src/test/java/org/apache/hadoop/hbase/master/TestMasterWithDisabling.java AL src/test/java/org/apache/hadoop/hbase/master/TestKillingServersFromMaster.java AL src/test/java/org/apache/hadoop/hbase/master/OOMEHMaster.java AL src/test/java/org/apache/hadoop/hbase/master/TestMinimumServerCount.java !????? src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java AL src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java !????? src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java AL src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java AL src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java AL src/test/java/org/apache/hadoop/hbase/master/TestRegionManager.java AL src/test/java/org/apache/hadoop/hbase/master/TestServerManager.java !????? src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java AL src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java AL src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java AL src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java AL src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java AL src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java AL src/test/java/org/apache/hadoop/hbase/client/TestHCM.java AL src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java AL src/test/java/org/apache/hadoop/hbase/client/TestShell.java AL src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java AL src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java AL src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java AL src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java AL src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java AL src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java AL src/test/java/org/apache/hadoop/hbase/io/TestHbaseObjectWritable.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java AL src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java AL src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java AL src/test/java/org/apache/hadoop/hbase/metrics/TestMetricsMBeanBase.java AL src/test/java/org/apache/hadoop/hbase/TestMergeTable.java AL src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java AL src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java AL src/test/java/org/apache/hadoop/hbase/TestInfoServers.java AL src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java AL src/test/java/org/apache/hadoop/hbase/KeyValueTestUtil.java AL src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java AL src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java AL src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java AL src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java AL src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java AL src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java AL src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java AL src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java AL src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java AL src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java AL src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java AL src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java AL src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java AL src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompactingStoreScanner.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java AL src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java AL src/test/java/org/apache/hadoop/hbase/regionserver/DisabledTestRegionServerExit.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueSkipListSet.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java AL src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java AL src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java AL src/test/java/org/apache/hadoop/hbase/TestSerialization.java AL src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java AL src/test/java/org/apache/hadoop/hbase/VerifiableEditor.java AL src/test/java/org/apache/hadoop/hbase/TestKeyValue.java AL src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java AL src/test/java/org/apache/hadoop/hbase/util/TestBase64.java AL src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java AL src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java AL src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java AL src/test/java/org/apache/hadoop/hbase/util/SoftValueSortedMapTest.java AL src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java AL src/test/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManagerTestHelper.java AL src/test/java/org/apache/hadoop/hbase/util/TestBytes.java AL src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java AL src/test/java/org/apache/hadoop/hbase/util/DisabledTestMetaUtils.java AL src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java AL src/test/java/org/apache/hadoop/hbase/util/TestKeying.java AL src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java AL src/test/java/org/apache/hadoop/hbase/EmptyWatcher.java AL src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java !????? src/test/ruby/test_helper.rb AL src/test/ruby/tests_runner.rb AL src/test/ruby/shell/shell_test.rb AL src/test/ruby/shell/commands_test.rb AL src/test/ruby/shell/formatter_test.rb AL src/test/ruby/hbase/table_test.rb AL src/test/ruby/hbase/admin_test.rb AL src/test/ruby/hbase/hbase_test.rb !????? src/test/resources/log4j.properties AL src/test/resources/hbase-site.xml AL src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties !????? src/test/resources/mapred-queues.xml AL pom.xml N README.txt N LICENSE.txt !????? .git/config !????? .git/info/exclude !????? .git/hooks/commit-msg.sample !????? .git/hooks/applypatch-msg.sample !????? .git/hooks/post-update.sample !????? .git/hooks/pre-commit.sample !????? .git/hooks/pre-rebase.sample !????? .git/hooks/pre-applypatch.sample !????? .git/hooks/update.sample !????? .git/hooks/post-commit.sample !????? .git/hooks/prepare-commit-msg.sample !????? .git/hooks/post-receive.sample !????? .git/description !????? .git/HEAD !????? CHANGES.txt N NOTICE.txt !????? cloudera/do-release-build !????? cloudera/apply-patches !????? cloudera/install_hbase.sh !????? cloudera/patches/0019-CLOUDERA-BUILD.-Fix-copy-of-bin-to-be-cp-a.patch AL cloudera/patches/0024-HBASE-2799.-Append-not-enabled-warning-should-not-sh.patch !????? cloudera/patches/0021-HBASE-3008-Memstore.updateColumnValue-passes-wrong-f.patch !????? cloudera/patches/0003-CLOUDERA-BUILD.-Switch-to-CDH3b3-snapshot-in-todd-s-.patch !????? cloudera/patches/0018-CLOUDERA-BUILD.-Build-site-as-part-of-release-build.patch !????? cloudera/patches/0008-CLOUDERA-BUILD.-hbase-config.sh-should-set-HBASE_PID.patch AL cloudera/patches/0013-HBASE-2980.-Refactor-region-server-command-line-to-a.patch !????? cloudera/patches/0005-Re-enable-log-split-test.patch !????? cloudera/patches/0016-CLOUDERA-BUILD.-HBase-running-on-secure-hadoop-tempo.patch !????? cloudera/patches/0020-Fix-src-assembly-to-make-java-libs-644-and-not-inclu.patch !????? cloudera/patches/0014-HBASE-3000.-Add-hbase-classpath-command.patch !????? cloudera/patches/0015-HBASE-3001.-TableMapReduceUtil-should-always-add-dep.patch !????? cloudera/patches/0025-HBASE-3096.-TestCompaction-timing-out.patch !????? cloudera/patches/0006-HBASE-2773.-Check-for-null-values-in-meta-in-test-ut.patch !????? cloudera/patches/0028-CLOUDERA-BUILD.-Fix-versionless-jar-naming-symlinks-.patch !????? cloudera/patches/0002-CLOUDERA-BUILD.-Add-build-infrastructure.patch !????? cloudera/patches/0017-HBASE-2782.-QoS-for-META-table-access.patch !????? cloudera/patches/0007-CLOUDERA-BUILD.-Include-cloudera-dir-in-src-assembly.patch !????? cloudera/patches/0023-CLOUDERA-BUILD.-cloudera-directory-should-get-instal.patch !????? cloudera/patches/0010-HBASE-2467.-Concurrent-flushers-in-HLog-sync-using-H.patch !????? cloudera/patches/0001-Updating-the-site-for-0.89.20100924.patch AL cloudera/patches/0012-HBASE-2977.-Refactor-master-command-line-to-a-new-cl.patch AL cloudera/patches/0004-Add-VerifiableEditor.patch !????? cloudera/patches/0009-CLOUDERA-BUILD.-rsync-all-of-lib-into-target-directo.patch !????? cloudera/patches/0022-CLOUDERA-BUILD.-Change-wrapper-scripts-to-not-be-dep.patch !????? cloudera/patches/0026-CLOUDERA-BUILD.-Update-hadoop-version.patch !????? cloudera/patches/0027-HBASE-3101.-bin-assembly-should-include-tests-and-so.patch !????? cloudera/patches/0011-SequenceFileLogWriter-doesn-t-need-to-actually-call-.patch !????? cloudera/CHANGES.cloudera.txt !????? cloudera/build.properties !????? cloudera/README.cloudera AL conf/hbase-site.xml.psuedo-distributed.template !????? conf/log4j.properties !????? conf/regionservers AL conf/tohtml.xsl AL conf/hbase-site.xml AL conf/hbase-env.sh !????? conf/hadoop-metrics.properties ***************************************************** Printing headers for files without AL header... ======================================================================= ==bin/local-regionservers.sh ======================================================================= #!/bin/sh # This is used for starting multiple regionservers on the same machine. # run it from hbase-dir/ just like 'bin/hbase' # Supports up to 100 regionservers (limitation = overlapping ports) bin=`dirname "${BASH_SOURCE-$0}"` bin=`cd "$bin" >/dev/null && pwd` if [ $# -lt 2 ]; then S=`basename "${BASH_SOURCE-$0}"` echo "Usage: $S [start|stop] offset(s)" echo "" echo " e.g. $S start 1 2" exit fi # sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG] export HBASE_REGIONSERVER_OPTS=" " run_regionserver () { DN=$2 export HBASE_IDENT_STRING="$USER-$DN" HBASE_REGIONSERVER_ARGS="\ -D hbase.regionserver.port=`expr 60200 + $DN` \ -D hbase.regionserver.info.port=`expr 60300 + $DN`" "$bin"/hbase-daemon.sh $1 regionserver $HBASE_REGIONSERVER_ARGS } cmd=$1 shift; for i in $* do run_regionserver $cmd $i done ======================================================================= ==bin/set_meta_block_caching.rb ======================================================================= # Set in_memory=true and blockcache=true on catalog tables. # The .META. and -ROOT- tables can be created with caching and # in_memory set to false. You want them set to true so that # these hot tables make it into cache. To see if the # .META. table has BLOCKCACHE set, in the shell do the following: # # hbase> scan '-ROOT-' # # Look for the 'info' column family. See if BLOCKCACHE => 'true'? # If not, run this script and it will set the value to true. # Setting cache to 'true' will only take effect on region restart # of if you close the .META. region -- *disruptive* -- and have # it deploy elsewhere. This script runs against an up and running # hbase instance. # # To see usage for this script, run: # # ${HBASE_HOME}/bin/hbase org.jruby.Main set_meta_block_caching.rb # include Java import org.apache.hadoop.hbase.util.Bytes import org.apache.hadoop.hbase.HConstants import org.apache.hadoop.hbase.HRegionInfo import org.apache.hadoop.hbase.client.HTable import org.apache.hadoop.hbase.client.Delete import org.apache.hadoop.hbase.client.Put import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.HTableDescriptor import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.hadoop.hbase.util.FSUtils import org.apache.hadoop.hbase.util.Writables import org.apache.hadoop.fs.Path import org.apache.hadoop.fs.FileSystem import org.apache.commons.logging.LogFactory # Name of this script NAME = "set_meta_block_caching.rb" # Print usage for this script def usage puts 'Usage: %s.rb]' % NAME exit! end # Get configuration to use. c = HBaseConfiguration.new() # Set hadoop filesystem configuration using the hbase.rootdir. # Otherwise, we'll always use localhost though the hbase.rootdir ======================================================================= ==bin/local-master-backup.sh ======================================================================= #!/bin/sh # This is used for starting multiple masters on the same machine. # run it from hbase-dir/ just like 'bin/hbase' # Supports up to 10 masters (limitation = overlapping ports) bin=`dirname "${BASH_SOURCE-$0}"` bin=`cd "$bin" >/dev/null && pwd` if [ $# -lt 2 ]; then S=`basename "${BASH_SOURCE-$0}"` echo "Usage: $S [start|stop] offset(s)" echo "" echo " e.g. $S start 1" exit fi # sanity check: make sure your master opts don't use ports [i.e. JMX/DBG] export HBASE_MASTER_OPTS=" " run_master () { DN=$2 export HBASE_IDENT_STRING="$USER-$DN" HBASE_MASTER_ARGS="\ --backup \ -D hbase.master.port=`expr 60000 + $DN` \ -D hbase.master.info.port=`expr 60010 + $DN`" "$bin"/hbase-daemon.sh $1 master $HBASE_MASTER_ARGS } cmd=$1 shift; for i in $* do run_master $cmd $i done ======================================================================= ==src/examples/mapreduce/index-builder-setup.rb ======================================================================= # Set up sample data for IndexBuilder example create "people", "attributes" create "people-email", "INDEX" create "people-phone", "INDEX" create "people-name", "INDEX" [["1", "jenny", "jenny@example.com", "867-5309"], ["2", "alice", "alice@example.com", "555-1234"], ["3", "kevin", "kevinpet@example.com", "555-1212"]].each do |fields| (id, name, email, phone) = *fields put "people", id, "attributes:name", name put "people", id, "attributes:email", email put "people", id, "attributes:phone", phone end ======================================================================= ==src/main/java/org/apache/hadoop/hbase/executor/HBaseExecutorService.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionEventData.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/executor/HBaseEventHandler.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java ======================================================================= package org.apache.hadoop.hbase.filter; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.List; import java.util.TreeSet; import org.apache.hadoop.hbase.KeyValue; /** * Filter that returns only cells whose timestamp (version) is * in the specified list of timestamps (versions). * <p> * Note: Use of this filter overrides any time range/time stamp * options specified using {@link Get#setTimeRange(long, long)}, * {@link Scan#setTimeRange(long, long)}, {@link Get#setTimeStamp(long)}, * or {@link Scan#setTimeStamp(long)}. */ public class TimestampsFilter extends FilterBase { TreeSet<Long> timestamps; // Used during scans to hint the scan to stop early // once the timestamps fall below the minTimeStamp. long minTimeStamp = Long.MAX_VALUE; /** * Used during deserialization. Do not use otherwise. */ public TimestampsFilter() { super(); } /** * Constructor for filter that retains only those * cells whose timestamp (version) is in the specified * list of timestamps. * * @param timestamps */ public TimestampsFilter(List<Long> timestamps) { this.timestamps = new TreeSet<Long>(timestamps); init(); } private void init() { if (this.timestamps.size() > 0) { minTimeStamp = this.timestamps.first(); } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AIllegalArgument extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AScan extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AScan\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}"); public java.nio.ByteBuffer startRow; public java.nio.ByteBuffer stopRow; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns; public java.lang.Long timestamp; public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange; public java.lang.Integer maxVersions; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return startRow; case 1: return stopRow; case 2: return columns; case 3: return timestamp; case 4: return timerange; case 5: return maxVersions; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: startRow = (java.nio.ByteBuffer)value$; break; case 1: stopRow = (java.nio.ByteBuffer)value$; break; case 2: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break; case 3: timestamp = (java.lang.Long)value$; break; case 4: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break; case 5: maxVersions = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class ATableDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]}"); public java.nio.ByteBuffer name; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor> families; public java.lang.Long maxFileSize; public java.lang.Long memStoreFlushSize; public java.lang.Boolean rootRegion; public java.lang.Boolean metaRegion; public java.lang.Boolean metaTable; public java.lang.Boolean readOnly; public java.lang.Boolean deferredLogFlush; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return name; case 1: return families; case 2: return maxFileSize; case 3: return memStoreFlushSize; case 4: return rootRegion; case 5: return metaRegion; case 6: return metaTable; case 7: return readOnly; case 8: return deferredLogFlush; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: name = (java.nio.ByteBuffer)value$; break; case 1: families = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor>)value$; break; case 2: maxFileSize = (java.lang.Long)value$; break; case 3: memStoreFlushSize = (java.lang.Long)value$; break; case 4: rootRegion = (java.lang.Boolean)value$; break; case 5: metaRegion = (java.lang.Boolean)value$; break; case 6: metaTable = (java.lang.Boolean)value$; break; case 7: readOnly = (java.lang.Boolean)value$; break; case 8: deferredLogFlush = (java.lang.Boolean)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AColumnValue extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnValue\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}"); public java.nio.ByteBuffer family; public java.nio.ByteBuffer qualifier; public java.nio.ByteBuffer value; public java.lang.Long timestamp; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return family; case 1: return qualifier; case 2: return value; case 3: return timestamp; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: family = (java.nio.ByteBuffer)value$; break; case 1: qualifier = (java.nio.ByteBuffer)value$; break; case 2: value = (java.nio.ByteBuffer)value$; break; case 3: timestamp = (java.lang.Long)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}"); public java.nio.ByteBuffer name; public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression; public java.lang.Integer maxVersions; public java.lang.Integer blocksize; public java.lang.Boolean inMemory; public java.lang.Integer timeToLive; public java.lang.Boolean blockCacheEnabled; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return name; case 1: return compression; case 2: return maxVersions; case 3: return blocksize; case 4: return inMemory; case 5: return timeToLive; case 6: return blockCacheEnabled; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: name = (java.nio.ByteBuffer)value$; break; case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break; case 2: maxVersions = (java.lang.Integer)value$; break; case 3: blocksize = (java.lang.Integer)value$; break; case 4: inMemory = (java.lang.Boolean)value$; break; case 5: timeToLive = (java.lang.Integer)value$; break; case 6: blockCacheEnabled = (java.lang.Boolean)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AAlreadyExists.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AAlreadyExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AAlreadyExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public interface HBase { public static final org.apache.avro.Protocol PROTOCOL = org.apache.avro.Protocol.parse("{\"protocol\":\"HBase\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"types\":[{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":\"ARegionLoad\"}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":\"AServerLoad\"},{\"name\":\"serverAddress\",\"type\":\"AServerAddress\"},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AClusterStatus\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":\"AServerInfo\"}},{\"name\":\"servers\",\"type\":\"int\"}]},{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[\"ACompressionAlgorithm\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":\"AFamilyDescriptor\"},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AGet\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AResult\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":\"AResultEntry\"}}]},{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]},{\"type\":\"record\",\"name\":\"APut\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":\"AColumnValue\"}}]},{\"type\":\"record\",\"name\":\"ADelete\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]}]},{\"type\":\"record\",\"name\":\"AScan\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"error\",\"name\":\"AIOError\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"ATableExists\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}],\"messages\":{\"getHBaseVersion\":{\"request\":[],\"response\":\"string\",\"errors\":[\"AIOError\"]},\"getClusterStatus\":{\"request\":[],\"response\":\"AClusterStatus\",\"errors\":[\"AIOError\"]},\"listTables\":{\"request\":[],\"response\":{\"type\":\"array\",\"items\":\"ATableDescriptor\"},\"errors\":[\"AIOError\"]},\"describeTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"ATableDescriptor\",\"errors\":[\"AIOError\"]},\"isTableEnabled\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"tableExists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"describeFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"AFamilyDescriptor\",\"errors\":[\"AIOError\"]},\"createTable\":{\"request\":[{\"name\":\"table\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\",\"ATableExists\",\"AMasterNotRunning\"]},\"deleteTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"tableDescriptor\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"enableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"disableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"flush\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"split\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"addFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"deleteFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"familyName\",\"type\":\"bytes\"},{\"name\":\"familyDescriptor\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"get\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"AResult\",\"errors\":[\"AIOError\"]},\"exists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"put\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"put\",\"type\":\"APut\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"delete\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"delete\",\"type\":\"ADelete\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"incrementColumnValue\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"amount\",\"type\":\"long\"},{\"name\":\"writeToWAL\",\"type\":\"boolean\"}],\"response\":\"long\",\"errors\":[\"AIOError\"]},\"scannerOpen\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"scan\",\"type\":\"AScan\"}],\"response\":\"int\",\"errors\":[\"AIOError\"]},\"scannerClose\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\"]},\"scannerGetRows\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"},{\"name\":\"numberOfRows\",\"type\":\"int\"}],\"response\":{\"type\":\"array\",\"items\":\"AResult\"},\"errors\":[\"AIOError\",\"AIllegalArgument\"]}}}"); org.apache.avro.util.Utf8 getHBaseVersion() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; org.apache.hadoop.hbase.avro.generated.AClusterStatus getClusterStatus() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ATableDescriptor> listTables() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; org.apache.hadoop.hbase.avro.generated.ATableDescriptor describeTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; boolean isTableEnabled(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; boolean tableExists(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor describeFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void createTable(org.apache.hadoop.hbase.avro.generated.ATableDescriptor table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument, org.apache.hadoop.hbase.avro.generated.ATableExists, org.apache.hadoop.hbase.avro.generated.AMasterNotRunning; java.lang.Void deleteTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void modifyTable(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ATableDescriptor tableDescriptor) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void enableTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void disableTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void flush(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void split(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void addFamily(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void deleteFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void modifyFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer familyName, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor familyDescriptor) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; org.apache.hadoop.hbase.avro.generated.AResult get(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; boolean exists(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void put(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.APut put) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; java.lang.Void delete(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ADelete delete) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; long incrementColumnValue(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, long amount, boolean writeToWAL) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError; int scannerOpen(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AScan scan) ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnFamilyDescriptor.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AColumnFamilyDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumnFamilyDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]}},{\"name\":\"maxVersions\",\"type\":\"int\"},{\"name\":\"blocksize\",\"type\":\"int\"},{\"name\":\"inMemory\",\"type\":\"boolean\"},{\"name\":\"timeToLive\",\"type\":\"int\"},{\"name\":\"blockCacheEnabled\",\"type\":\"boolean\"},{\"name\":\"bloomfilterEnabled\",\"type\":\"boolean\"}]}"); public java.nio.ByteBuffer name; public org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm compression; public int maxVersions; public int blocksize; public boolean inMemory; public int timeToLive; public boolean blockCacheEnabled; public boolean bloomfilterEnabled; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return name; case 1: return compression; case 2: return maxVersions; case 3: return blocksize; case 4: return inMemory; case 5: return timeToLive; case 6: return blockCacheEnabled; case 7: return bloomfilterEnabled; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: name = (java.nio.ByteBuffer)value$; break; case 1: compression = (org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm)value$; break; case 2: maxVersions = (java.lang.Integer)value$; break; case 3: blocksize = (java.lang.Integer)value$; break; case 4: inMemory = (java.lang.Boolean)value$; break; case 5: timeToLive = (java.lang.Integer)value$; break; case 6: blockCacheEnabled = (java.lang.Boolean)value$; break; case 7: bloomfilterEnabled = (java.lang.Boolean)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AResultEntry extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResultEntry\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}"); public java.nio.ByteBuffer family; public java.nio.ByteBuffer qualifier; public java.nio.ByteBuffer value; public long timestamp; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return family; case 1: return qualifier; case 2: return value; case 3: return timestamp; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: family = (java.nio.ByteBuffer)value$; break; case 1: qualifier = (java.nio.ByteBuffer)value$; break; case 2: value = (java.nio.ByteBuffer)value$; break; case 3: timestamp = (java.lang.Long)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class ATableExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"ATableExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/TCell.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class TCell extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"TCell\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}"); public java.nio.ByteBuffer value; public long timestamp; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return value; case 1: return timestamp; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: value = (java.nio.ByteBuffer)value$; break; case 1: timestamp = (java.lang.Long)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/IOError.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class IOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"IOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class ARegionLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ARegionLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}"); public int memStoreSizeMB; public java.nio.ByteBuffer name; public int storefileIndexSizeMB; public int storefiles; public int storefileSizeMB; public int stores; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return memStoreSizeMB; case 1: return name; case 2: return storefileIndexSizeMB; case 3: return storefiles; case 4: return storefileSizeMB; case 5: return stores; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: memStoreSizeMB = (java.lang.Integer)value$; break; case 1: name = (java.nio.ByteBuffer)value$; break; case 2: storefileIndexSizeMB = (java.lang.Integer)value$; break; case 3: storefiles = (java.lang.Integer)value$; break; case 4: storefileSizeMB = (java.lang.Integer)value$; break; case 5: stores = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AGet extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AGet\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}"); public java.nio.ByteBuffer row; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns; public java.lang.Long timestamp; public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange; public java.lang.Integer maxVersions; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return row; case 1: return columns; case 2: return timestamp; case 3: return timerange; case 4: return maxVersions; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: row = (java.nio.ByteBuffer)value$; break; case 1: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break; case 2: timestamp = (java.lang.Long)value$; break; case 3: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break; case 4: maxVersions = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AResult extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResult\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}}}]}"); public java.nio.ByteBuffer row; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AResultEntry> entries; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return row; case 1: return entries; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: row = (java.nio.ByteBuffer)value$; break; case 1: entries = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AResultEntry>)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AServerLoad extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerLoad\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}"); public int load; public int maxHeapMB; public int memStoreSizeInMB; public int numberOfRegions; public int numberOfRequests; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ARegionLoad> regionsLoad; public int storefileIndexSizeInMB; public int storefiles; public int storefileSizeInMB; public int usedHeapMB; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return load; case 1: return maxHeapMB; case 2: return memStoreSizeInMB; case 3: return numberOfRegions; case 4: return numberOfRequests; case 5: return regionsLoad; case 6: return storefileIndexSizeInMB; case 7: return storefiles; case 8: return storefileSizeInMB; case 9: return usedHeapMB; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: load = (java.lang.Integer)value$; break; case 1: maxHeapMB = (java.lang.Integer)value$; break; case 2: memStoreSizeInMB = (java.lang.Integer)value$; break; case 3: numberOfRegions = (java.lang.Integer)value$; break; case 4: numberOfRequests = (java.lang.Integer)value$; break; case 5: regionsLoad = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.ARegionLoad>)value$; break; case 6: storefileIndexSizeInMB = (java.lang.Integer)value$; break; case 7: storefiles = (java.lang.Integer)value$; break; case 8: storefileSizeInMB = (java.lang.Integer)value$; break; case 9: usedHeapMB = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class ADelete extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ADelete\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]}]}"); public java.nio.ByteBuffer row; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn> columns; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return row; case 1: return columns; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: row = (java.nio.ByteBuffer)value$; break; case 1: columns = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumn>)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AMasterNotRunning extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AServerAddress extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerAddress\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}"); public org.apache.avro.util.Utf8 hostname; public org.apache.avro.util.Utf8 inetSocketAddress; public int port; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return hostname; case 1: return inetSocketAddress; case 2: return port; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: hostname = (org.apache.avro.util.Utf8)value$; break; case 1: inetSocketAddress = (org.apache.avro.util.Utf8)value$; break; case 2: port = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AClusterStatus extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AClusterStatus\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}}},{\"name\":\"servers\",\"type\":\"int\"}]}"); public double averageLoad; public org.apache.avro.generic.GenericArray<org.apache.avro.util.Utf8> deadServerNames; public int deadServers; public org.apache.avro.util.Utf8 hbaseVersion; public int regionsCount; public int requestsCount; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AServerInfo> serverInfos; public int servers; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return averageLoad; case 1: return deadServerNames; case 2: return deadServers; case 3: return hbaseVersion; case 4: return regionsCount; case 5: return requestsCount; case 6: return serverInfos; case 7: return servers; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: averageLoad = (java.lang.Double)value$; break; case 1: deadServerNames = (org.apache.avro.generic.GenericArray<org.apache.avro.util.Utf8>)value$; break; case 2: deadServers = (java.lang.Integer)value$; break; case 3: hbaseVersion = (org.apache.avro.util.Utf8)value$; break; case 4: regionsCount = (java.lang.Integer)value$; break; case 5: requestsCount = (java.lang.Integer)value$; break; case 6: serverInfos = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AServerInfo>)value$; break; case 7: servers = (java.lang.Integer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AColumn extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AColumn\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}"); public java.nio.ByteBuffer family; public java.nio.ByteBuffer qualifier; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return family; case 1: return qualifier; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: family = (java.nio.ByteBuffer)value$; break; case 1: qualifier = (java.nio.ByteBuffer)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AIOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}"); public org.apache.avro.util.Utf8 message; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return message; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: message = (org.apache.avro.util.Utf8)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public enum ACompressionAlgorithm { LZO, GZ, NONE } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class APut extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"APut\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}}}]}"); public java.nio.ByteBuffer row; public org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumnValue> columnValues; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return row; case 1: return columnValues; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: row = (java.nio.ByteBuffer)value$; break; case 1: columnValues = (org.apache.avro.generic.GenericArray<org.apache.hadoop.hbase.avro.generated.AColumnValue>)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class ATimeRange extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ATimeRange\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]}"); public long minStamp; public long maxStamp; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return minStamp; case 1: return maxStamp; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: minStamp = (java.lang.Long)value$; break; case 1: maxStamp = (java.lang.Long)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java ======================================================================= package org.apache.hadoop.hbase.avro.generated; @SuppressWarnings("all") public class AServerInfo extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerInfo\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}"); public int infoPort; public org.apache.hadoop.hbase.avro.generated.AServerLoad load; public org.apache.hadoop.hbase.avro.generated.AServerAddress serverAddress; public org.apache.avro.util.Utf8 serverName; public long startCode; public org.apache.avro.Schema getSchema() { return SCHEMA$; } public java.lang.Object get(int field$) { switch (field$) { case 0: return infoPort; case 1: return load; case 2: return serverAddress; case 3: return serverName; case 4: return startCode; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: infoPort = (java.lang.Integer)value$; break; case 1: load = (org.apache.hadoop.hbase.avro.generated.AServerLoad)value$; break; case 2: serverAddress = (org.apache.hadoop.hbase.avro.generated.AServerAddress)value$; break; case 3: serverName = (org.apache.avro.util.Utf8)value$; break; case 4: startCode = (java.lang.Long)value$; break; default: throw new org.apache.avro.AvroRuntimeException("Bad index"); } } } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/avro/hbase.avpr ======================================================================= { "protocol" : "HBase", "namespace" : "org.apache.hadoop.hbase.avro.generated", "types" : [ { "type" : "record", "name" : "AServerAddress", "fields" : [ { "name" : "hostname", "type" : "string" }, { "name" : "inetSocketAddress", "type" : "string" }, { "name" : "port", "type" : "int" } ] }, { "type" : "record", "name" : "ARegionLoad", "fields" : [ { "name" : "memStoreSizeMB", "type" : "int" }, { "name" : "name", "type" : "bytes" }, { "name" : "storefileIndexSizeMB", "type" : "int" }, { "name" : "storefiles", "type" : "int" }, { "name" : "storefileSizeMB", "type" : "int" }, { "name" : "stores", "type" : "int" } ] }, { "type" : "record", "name" : "AServerLoad", "fields" : [ { "name" : "load", "type" : "int" }, { "name" : "maxHeapMB", "type" : "int" }, { "name" : "memStoreSizeInMB", "type" : "int" ======================================================================= ==src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/master/handler/MasterCloseRegionHandler.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: StorageClusterStatusMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class StorageClusterStatusMessage { private StorageClusterStatusMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class StorageClusterStatus extends com.google.protobuf.GeneratedMessage { // Use StorageClusterStatus.newBuilder() to construct. private StorageClusterStatus() { initFields(); } private StorageClusterStatus(boolean noInit) {} private static final StorageClusterStatus defaultInstance; public static StorageClusterStatus getDefaultInstance() { return defaultInstance; } public StorageClusterStatus getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable; } public static final class Region extends com.google.protobuf.GeneratedMessage { // Use Region.newBuilder() to construct. private Region() { initFields(); } private Region(boolean noInit) {} private static final Region defaultInstance; public static Region getDefaultInstance() { return defaultInstance; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: ColumnSchemaMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class ColumnSchemaMessage { private ColumnSchemaMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class ColumnSchema extends com.google.protobuf.GeneratedMessage { // Use ColumnSchema.newBuilder() to construct. private ColumnSchema() { initFields(); } private ColumnSchema(boolean noInit) {} private static final ColumnSchema defaultInstance; public static ColumnSchema getDefaultInstance() { return defaultInstance; } public ColumnSchema getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_ColumnSchema_fieldAccessorTable; } public static final class Attribute extends com.google.protobuf.GeneratedMessage { // Use Attribute.newBuilder() to construct. private Attribute() { initFields(); } private Attribute(boolean noInit) {} private static final Attribute defaultInstance; public static Attribute getDefaultInstance() { return defaultInstance; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableInfoMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: TableInfoMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class TableInfoMessage { private TableInfoMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class TableInfo extends com.google.protobuf.GeneratedMessage { // Use TableInfo.newBuilder() to construct. private TableInfo() { initFields(); } private TableInfo(boolean noInit) {} private static final TableInfo defaultInstance; public static TableInfo getDefaultInstance() { return defaultInstance; } public TableInfo getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableInfo_fieldAccessorTable; } public static final class Region extends com.google.protobuf.GeneratedMessage { // Use Region.newBuilder() to construct. private Region() { initFields(); } private Region(boolean noInit) {} private static final Region defaultInstance; public static Region getDefaultInstance() { return defaultInstance; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/VersionMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: VersionMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class VersionMessage { private VersionMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class Version extends com.google.protobuf.GeneratedMessage { // Use Version.newBuilder() to construct. private Version() { initFields(); } private Version(boolean noInit) {} private static final Version defaultInstance; public static Version getDefaultInstance() { return defaultInstance; } public Version getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Version_fieldAccessorTable; } // optional string restVersion = 1; public static final int RESTVERSION_FIELD_NUMBER = 1; private boolean hasRestVersion; private java.lang.String restVersion_ = ""; public boolean hasRestVersion() { return hasRestVersion; } public java.lang.String getRestVersion() { return restVersion_; } // optional string jvmVersion = 2; public static final int JVMVERSION_FIELD_NUMBER = 2; private boolean hasJvmVersion; private java.lang.String jvmVersion_ = ""; public boolean hasJvmVersion() { return hasJvmVersion; } public java.lang.String getJvmVersion() { return jvmVersion_; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellSetMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: CellSetMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class CellSetMessage { private CellSetMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class CellSet extends com.google.protobuf.GeneratedMessage { // Use CellSet.newBuilder() to construct. private CellSet() { initFields(); } private CellSet(boolean noInit) {} private static final CellSet defaultInstance; public static CellSet getDefaultInstance() { return defaultInstance; } public CellSet getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_CellSet_fieldAccessorTable; } public static final class Row extends com.google.protobuf.GeneratedMessage { // Use Row.newBuilder() to construct. private Row() { initFields(); } private Row(boolean noInit) {} private static final Row defaultInstance; public static Row getDefaultInstance() { return defaultInstance; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableListMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: TableListMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class TableListMessage { private TableListMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class TableList extends com.google.protobuf.GeneratedMessage { // Use TableList.newBuilder() to construct. private TableList() { initFields(); } private TableList(boolean noInit) {} private static final TableList defaultInstance; public static TableList getDefaultInstance() { return defaultInstance; } public TableList getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableList_fieldAccessorTable; } // repeated string name = 1; public static final int NAME_FIELD_NUMBER = 1; private java.util.List<java.lang.String> name_ = java.util.Collections.emptyList(); public java.util.List<java.lang.String> getNameList() { return name_; } public int getNameCount() { return name_.size(); } public java.lang.String getName(int index) { return name_.get(index); } private void initFields() { ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/CellMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: CellMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class CellMessage { private CellMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class Cell extends com.google.protobuf.GeneratedMessage { // Use Cell.newBuilder() to construct. private Cell() { initFields(); } private Cell(boolean noInit) {} private static final Cell defaultInstance; public static Cell getDefaultInstance() { return defaultInstance; } public Cell getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Cell_fieldAccessorTable; } // optional bytes row = 1; public static final int ROW_FIELD_NUMBER = 1; private boolean hasRow; private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY; public boolean hasRow() { return hasRow; } public com.google.protobuf.ByteString getRow() { return row_; } // optional bytes column = 2; public static final int COLUMN_FIELD_NUMBER = 2; private boolean hasColumn; private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY; public boolean hasColumn() { return hasColumn; } public com.google.protobuf.ByteString getColumn() { return column_; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/TableSchemaMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: TableSchemaMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class TableSchemaMessage { private TableSchemaMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class TableSchema extends com.google.protobuf.GeneratedMessage { // Use TableSchema.newBuilder() to construct. private TableSchema() { initFields(); } private TableSchema(boolean noInit) {} private static final TableSchema defaultInstance; public static TableSchema getDefaultInstance() { return defaultInstance; } public TableSchema getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_TableSchema_fieldAccessorTable; } public static final class Attribute extends com.google.protobuf.GeneratedMessage { // Use Attribute.newBuilder() to construct. private Attribute() { initFields(); } private Attribute(boolean noInit) {} private static final Attribute defaultInstance; public static Attribute getDefaultInstance() { return defaultInstance; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/ScannerMessage.java ======================================================================= // Generated by the protocol buffer compiler. DO NOT EDIT! // source: ScannerMessage.proto package org.apache.hadoop.hbase.rest.protobuf.generated; public final class ScannerMessage { private ScannerMessage() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public static final class Scanner extends com.google.protobuf.GeneratedMessage { // Use Scanner.newBuilder() to construct. private Scanner() { initFields(); } private Scanner(boolean noInit) {} private static final Scanner defaultInstance; public static Scanner getDefaultInstance() { return defaultInstance; } public Scanner getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_Scanner_fieldAccessorTable; } // optional bytes startRow = 1; public static final int STARTROW_FIELD_NUMBER = 1; private boolean hasStartRow; private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY; public boolean hasStartRow() { return hasStartRow; } public com.google.protobuf.ByteString getStartRow() { return startRow_; } // optional bytes endRow = 2; public static final int ENDROW_FIELD_NUMBER = 2; private boolean hasEndRow; private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY; public boolean hasEndRow() { return hasEndRow; } public com.google.protobuf.ByteString getEndRow() { return endRow_; } ======================================================================= ==src/main/java/org/apache/hadoop/hbase/regionserver/RSZookeeperUpdater.java ======================================================================= ======================================================================= ==src/main/java/org/apache/hadoop/hbase/regionserver/wal/LogEntryVisitor.java ======================================================================= package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.HRegionInfo; public interface LogEntryVisitor { /** * * @param info * @param logKey * @param logEdit */ public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit); } ======================================================================= ==src/main/resources/hbase-webapps/master/zk.jsp ======================================================================= <%@ page contentType="text/html;charset=UTF-8" import="java.io.IOException" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HBaseAdmin" import="org.apache.hadoop.hbase.client.HConnection" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper" import="org.apache.hadoop.hbase.HBaseConfiguration" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.HConstants"%><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); HBaseAdmin hbadmin = new HBaseAdmin(conf); HConnection connection = hbadmin.getConnection(); ZooKeeperWrapper wrapper = connection.getZooKeeperWrapper(); %> <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/> <title>ZooKeeper Dump</title> <link rel="stylesheet" type="text/css" href="/static/hbase.css" /> </head> <body> <a id="logo" href="http://hbase.org"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a> <h1 id="page_title">ZooKeeper Dump</h1> <p id="links_menu"><a href="/master.jsp">Master</a>, <a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p> <hr id="head_rule" /> <pre> <%= wrapper.dump() %> </pre> </body> </html> ======================================================================= ==src/main/resources/hbase-webapps/master/table.jsp ======================================================================= <%@ page contentType="text/html;charset=UTF-8" import="java.util.Map" import="org.apache.hadoop.io.Writable" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.HBaseAdmin" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.HServerAddress" import="org.apache.hadoop.hbase.HServerInfo" import="org.apache.hadoop.hbase.io.ImmutableBytesWritable" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.master.MetaRegion" import="org.apache.hadoop.hbase.util.Bytes" import="java.util.Map" import="org.apache.hadoop.hbase.HConstants"%><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); HBaseAdmin hbadmin = new HBaseAdmin(conf); String tableName = request.getParameter("name"); HTable table = new HTable(conf, tableName); String tableHeader = "<h2>Table Regions</h2><table><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th></tr>"; HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation(); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); Map<String, Integer> frags = null; if (showFragmentation) { frags = master.getTableFragmentation(); } %> <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <% String action = request.getParameter("action"); String key = request.getParameter("key"); if ( action != null ) { %> <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/> <link rel="stylesheet" type="text/css" href="/static/hbase.css" /> </head> <body> <a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a> <h1 id="page_title">Table action request accepted</h1> <p><hr><p> <% if (action.equals("split")) { if (key != null && key.length() > 0) { Writable[] arr = new Writable[1]; ======================================================================= ==src/main/resources/hbase-webapps/master/index.html ======================================================================= <meta HTTP-EQUIV="REFRESH" content="0;url=master.jsp"/> ======================================================================= ==src/main/resources/hbase-webapps/master/master.jsp ======================================================================= <%@ page contentType="text/html;charset=UTF-8" import="java.util.*" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.JvmVersion" import="org.apache.hadoop.hbase.util.FSUtils" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.master.MetaRegion" import="org.apache.hadoop.hbase.client.HBaseAdmin" import="org.apache.hadoop.hbase.HServerInfo" import="org.apache.hadoop.hbase.HServerAddress" import="org.apache.hadoop.hbase.HTableDescriptor" %><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation(); Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions(); Map<String, HServerInfo> serverToServerInfos = master.getServerManager().getServersToServerInfo(); int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000; if (interval == 0) { interval = 1; } boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); Map<String, Integer> frags = null; if (showFragmentation) { frags = master.getTableFragmentation(); } %><?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/> <title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title> <link rel="stylesheet" type="text/css" href="/static/hbase.css" /> </head> <body> <a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a> <h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1> <p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p> <!-- Various warnings that cluster admins should be aware of --> <% if (JvmVersion.isBadJvmVersion()) { %> <div class="warning"> Your current JVM version <%= System.getProperty("java.version") %> is known to be unstable with HBase. Please see the <a href="http://wiki.apache.org/hadoop/Hbase/Troubleshooting#A18">HBase wiki</a> for details. </div> <% } %> ======================================================================= ==src/main/resources/hbase-webapps/regionserver/regionserver.jsp ======================================================================= <%@ page contentType="text/html;charset=UTF-8" import="java.util.*" import="java.io.IOException" import="org.apache.hadoop.io.Text" import="org.apache.hadoop.hbase.regionserver.HRegionServer" import="org.apache.hadoop.hbase.regionserver.HRegion" import="org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HServerInfo" import="org.apache.hadoop.hbase.HServerLoad" import="org.apache.hadoop.hbase.HRegionInfo" %><% HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER); HServerInfo serverInfo = null; try { serverInfo = regionServer.getHServerInfo(); } catch (IOException e) { e.printStackTrace(); } RegionServerMetrics metrics = regionServer.getMetrics(); Collection<HRegionInfo> onlineRegions = regionServer.getSortedOnlineRegionInfos(); int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000; %><?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/> <title>HBase Region Server: <%= serverInfo.getServerAddress().getHostname() %>:<%= serverInfo.getServerAddress().getPort() %></title> <link rel="stylesheet" type="text/css" href="/static/hbase.css" /> </head> <body> <a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a> <h1 id="page_title">Region Server: <%= serverInfo.getServerAddress().getHostname() %>:<%= serverInfo.getServerAddress().getPort() %></h1> <p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p> <hr id="head_rule" /> <h2>Region Server Attributes</h2> <table> <tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr> <tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase version and svn revision</td></tr> <tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When HBase version was compiled and by whom</td></tr> <tr><td>Metrics</td><td><%= metrics.toString() %></td><td>RegionServer Metrics; file and heap sizes are in megabytes</td></tr> <tr><td>Zookeeper Quorum</td><td><%= regionServer.getZooKeeperWrapper().getQuorumServers() %></td><td>Addresses of all registered ZK servers</td></tr> </table> <h2>Online Regions</h2> <% if (onlineRegions != null && onlineRegions.size() > 0) { %> <table> ======================================================================= ==src/main/resources/hbase-webapps/regionserver/index.html ======================================================================= <meta HTTP-EQUIV="REFRESH" content="0;url=regionserver.jsp"/> ======================================================================= ==src/main/resources/hbase-webapps/static/hbase.css ======================================================================= h1, h2, h3 { color: DarkSlateBlue } table { border: thin solid DodgerBlue } tr { border: thin solid DodgerBlue } td { border: thin solid DodgerBlue } th { border: thin solid DodgerBlue } #logo {float: left;} #logo img {border: none;} #page_title {padding-top: 27px;} div.warning { border: 1px solid #666; background-color: #fcc; font-size: 110%; font-weight: bold; } ======================================================================= ==src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties ======================================================================= # ResourceBundle properties file for RowCounter MR job CounterGroupName= RowCounter ROWS.name= Rows ======================================================================= ==src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd ======================================================================= <?xml version="1.0" encoding="UTF-8"?> <schema targetNamespace="ModelSchema" elementFormDefault="qualified" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="ModelSchema"> <element name="Version" type="tns:Version"></element> <complexType name="Version"> <attribute name="REST" type="string"></attribute> <attribute name="JVM" type="string"></attribute> <attribute name="OS" type="string"></attribute> <attribute name="Server" type="string"></attribute> <attribute name="Jersey" type="string"></attribute> </complexType> <element name="TableList" type="tns:TableList"></element> <complexType name="TableList"> <sequence> <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element> </sequence> </complexType> <complexType name="Table"> <sequence> <element name="name" type="string"></element> </sequence> </complexType> <element name="TableInfo" type="tns:TableInfo"></element> <complexType name="TableInfo"> <sequence> <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element> </sequence> <attribute name="name" type="string"></attribute> </complexType> <complexType name="TableRegion"> <attribute name="name" type="string"></attribute> <attribute name="id" type="int"></attribute> <attribute name="startKey" type="base64Binary"></attribute> <attribute name="endKey" type="base64Binary"></attribute> <attribute name="location" type="string"></attribute> </complexType> <element name="TableSchema" type="tns:TableSchema"></element> <complexType name="TableSchema"> <sequence> <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element> </sequence> ======================================================================= ==src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties ======================================================================= # ResourceBundle properties file for RowCounter MR job CounterGroupName= RowCounter ROWS.name= Rows ======================================================================= ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css ======================================================================= /* Auto-generated CSS for generated Thrift docs */ body { font-family: Tahoma, sans-serif; } pre { background-color: #dddddd; padding: 6px; } h3,h4 { padding-top: 0px; margin-top: 0px; } div.definition { border: 1px solid gray; margin: 10px; padding: 10px; } div.extends { margin: -0.5em 0 1em 5em } table { border: 1px solid grey; border-collapse: collapse; } td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; } th { border: 1px solid black; background-color: #bbbbbb; text-align: left; padding: 1px 6px; } ======================================================================= ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/index.html ======================================================================= <html><head> <link href="style.css" rel="stylesheet" type="text/css"/> <title>All Thrift declarations</title></head><body> <h1>All Thrift declarations</h1> <table><tr><th>Module</th><th>Services</th><th>Data types</th><th>Constants</th></tr> <tr> <td>Hbase</td><td><a href="Hbase.html#Svc_Hbase">Hbase</a><br/> <ul> <li><a href="Hbase.html#Fn_Hbase_atomicIncrement">atomicIncrement</a></li> <li><a href="Hbase.html#Fn_Hbase_compact">compact</a></li> <li><a href="Hbase.html#Fn_Hbase_createTable">createTable</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAll">deleteAll</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllRow">deleteAllRow</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllRowTs">deleteAllRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllTs">deleteAllTs</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteTable">deleteTable</a></li> <li><a href="Hbase.html#Fn_Hbase_disableTable">disableTable</a></li> <li><a href="Hbase.html#Fn_Hbase_enableTable">enableTable</a></li> <li><a href="Hbase.html#Fn_Hbase_get">get</a></li> <li><a href="Hbase.html#Fn_Hbase_getColumnDescriptors">getColumnDescriptors</a></li> <li><a href="Hbase.html#Fn_Hbase_getRow">getRow</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowTs">getRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowWithColumns">getRowWithColumns</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowWithColumnsTs">getRowWithColumnsTs</a></li> <li><a href="Hbase.html#Fn_Hbase_getTableNames">getTableNames</a></li> <li><a href="Hbase.html#Fn_Hbase_getTableRegions">getTableRegions</a></li> <li><a href="Hbase.html#Fn_Hbase_getVer">getVer</a></li> <li><a href="Hbase.html#Fn_Hbase_getVerTs">getVerTs</a></li> <li><a href="Hbase.html#Fn_Hbase_isTableEnabled">isTableEnabled</a></li> <li><a href="Hbase.html#Fn_Hbase_majorCompact">majorCompact</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRow">mutateRow</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRowTs">mutateRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRows">mutateRows</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRowsTs">mutateRowsTs</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerClose">scannerClose</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerGet">scannerGet</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerGetList">scannerGetList</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpen">scannerOpen</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenTs">scannerOpenTs</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithPrefix">scannerOpenWithPrefix</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStop">scannerOpenWithStop</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStopTs">scannerOpenWithStopTs</a></li> </ul> </td> <td><a href="Hbase.html#Struct_AlreadyExists">AlreadyExists</a><br/> <a href="Hbase.html#Struct_BatchMutation">BatchMutation</a><br/> <a href="Hbase.html#Typedef_Bytes">Bytes</a><br/> <a href="Hbase.html#Struct_ColumnDescriptor">ColumnDescriptor</a><br/> <a href="Hbase.html#Struct_IOError">IOError</a><br/> <a href="Hbase.html#Struct_IllegalArgument">IllegalArgument</a><br/> ======================================================================= ==src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/Hbase.html ======================================================================= <html><head> <link href="style.css" rel="stylesheet" type="text/css"/> <title>Thrift module: Hbase</title></head><body> <h1>Thrift module: Hbase</h1> <table><tr><th>Module</th><th>Services</th><th>Data types</th><th>Constants</th></tr> <tr> <td>Hbase</td><td><a href="Hbase.html#Svc_Hbase">Hbase</a><br/> <ul> <li><a href="Hbase.html#Fn_Hbase_atomicIncrement">atomicIncrement</a></li> <li><a href="Hbase.html#Fn_Hbase_compact">compact</a></li> <li><a href="Hbase.html#Fn_Hbase_createTable">createTable</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAll">deleteAll</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllRow">deleteAllRow</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllRowTs">deleteAllRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteAllTs">deleteAllTs</a></li> <li><a href="Hbase.html#Fn_Hbase_deleteTable">deleteTable</a></li> <li><a href="Hbase.html#Fn_Hbase_disableTable">disableTable</a></li> <li><a href="Hbase.html#Fn_Hbase_enableTable">enableTable</a></li> <li><a href="Hbase.html#Fn_Hbase_get">get</a></li> <li><a href="Hbase.html#Fn_Hbase_getColumnDescriptors">getColumnDescriptors</a></li> <li><a href="Hbase.html#Fn_Hbase_getRow">getRow</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowTs">getRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowWithColumns">getRowWithColumns</a></li> <li><a href="Hbase.html#Fn_Hbase_getRowWithColumnsTs">getRowWithColumnsTs</a></li> <li><a href="Hbase.html#Fn_Hbase_getTableNames">getTableNames</a></li> <li><a href="Hbase.html#Fn_Hbase_getTableRegions">getTableRegions</a></li> <li><a href="Hbase.html#Fn_Hbase_getVer">getVer</a></li> <li><a href="Hbase.html#Fn_Hbase_getVerTs">getVerTs</a></li> <li><a href="Hbase.html#Fn_Hbase_isTableEnabled">isTableEnabled</a></li> <li><a href="Hbase.html#Fn_Hbase_majorCompact">majorCompact</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRow">mutateRow</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRowTs">mutateRowTs</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRows">mutateRows</a></li> <li><a href="Hbase.html#Fn_Hbase_mutateRowsTs">mutateRowsTs</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerClose">scannerClose</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerGet">scannerGet</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerGetList">scannerGetList</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpen">scannerOpen</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenTs">scannerOpenTs</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithPrefix">scannerOpenWithPrefix</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStop">scannerOpenWithStop</a></li> <li><a href="Hbase.html#Fn_Hbase_scannerOpenWithStopTs">scannerOpenWithStopTs</a></li> </ul> </td> <td><a href="Hbase.html#Struct_AlreadyExists">AlreadyExists</a><br/> <a href="Hbase.html#Struct_BatchMutation">BatchMutation</a><br/> <a href="Hbase.html#Typedef_Bytes">Bytes</a><br/> <a href="Hbase.html#Struct_ColumnDescriptor">ColumnDescriptor</a><br/> <a href="Hbase.html#Struct_IOError">IOError</a><br/> <a href="Hbase.html#Struct_IllegalArgument">IllegalArgument</a><br/> ======================================================================= ==src/docbkx/sample_article.xml ======================================================================= <?xml version="1.0" encoding="UTF-8"?> <article version="5.0" xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:svg="http://www.w3.org/2000/svg" xmlns:m="http://www.w3.org/1998/Math/MathML" xmlns:html="http://www.w3.org/1999/xhtml" xmlns:db="http://docbook.org/ns/docbook"> <info> <title>Wah-wah <?eval ${project.version}?> </title> </info> <section xml:id="wahwah"> <title>Wah-Wah changed my life</title> <para>I was born very young...</para> <para>This is a sample docbook article.</para> <para> <?eval ${project.version}?> </para> <section xml:id="then"> <title>Then</title> <para></para> </section> <section xml:id="and"> <title>And</title> <para></para> </section> <section xml:id="later"> <title>Later</title> <para></para> </section> </section> <section xml:id="good_books"> <title>Good books</title> <para></para> </section> ======================================================================= ==src/docbkx/book.xml ======================================================================= <?xml version="1.0" encoding="UTF-8"?> <book version="5.0" xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:svg="http://www.w3.org/2000/svg" xmlns:m="http://www.w3.org/1998/Math/MathML" xmlns:html="http://www.w3.org/1999/xhtml" xmlns:db="http://docbook.org/ns/docbook"> <info> <title>HBase Book <?eval ${project.version}?> </title> </info> <chapter xml:id="getting_started"> <title >Getting Started</title> <section> <title>Requirements</title> <para>First...</para> </section> </chapter> <chapter xml:id="datamodel"> <title>Data Model</title> <para></para> </chapter> <chapter xml:id="implementation"> <title>Implementation</title> <para></para> </chapter> <chapter xml:id="mapreduce"> <title>MapReduce</title> <para></para> </chapter> <chapter xml:id="schema"> <title>Schema Design</title> <para></para> </chapter> <chapter xml:id="shell"> ======================================================================= ==src/site/site.vm ======================================================================= <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <!-- Generated by Apache Maven Doxia at $dateFormat.format( $currentDate ) --> #macro ( link $href $name $target $img $position $alt $border $width $height ) #set ( $linkTitle = ' title="' + $name + '"' ) #if( $target ) #set ( $linkTarget = ' target="' + $target + '"' ) #else #set ( $linkTarget = "" ) #end #if ( ( $href.toLowerCase().startsWith("http") || $href.toLowerCase().startsWith("https") ) ) #set ( $linkClass = ' class="externalLink"' ) #else #set ( $linkClass = "" ) #end #if ( $img ) #if ( $position == "left" ) <a href="$href"$linkClass$linkTarget$linkTitle>#image($img $alt $border $width $height)$name</a> #else <a href="$href"$linkClass$linkTarget$linkTitle>$name #image($img $alt $border $width $height)</a> #end #else <a href="$href"$linkClass$linkTarget$linkTitle>$name</a> #end #end ## #macro ( image $img $alt $border $width $height ) #if( $img ) #if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) ) #set ( $imgSrc = $PathTool.calculateLink( $img, . ) ) #set ( $imgSrc = $imgSrc.replaceAll( "\\", "/" ) ) #set ( $imgSrc = ' src="' + $imgSrc + '"' ) #else #set ( $imgSrc = ' src="' + $img + '"' ) #end #if( $alt ) #set ( $imgAlt = ' alt="' + $alt + '"' ) #else #set ( $imgAlt = ' alt=""' ) #end #if( $border ) #set ( $imgBorder = ' border="' + $border + '"' ) #else #set ( $imgBorder = "" ) #end #if( $width ) #set ( $imgWidth = ' width="' + $width + '"' ) #else #set ( $imgWidth = "" ) #end #if( $height ) ======================================================================= ==src/site/site.xml ======================================================================= <?xml version="1.0" encoding="ISO-8859-1"?> <project xmlns="http://maven.apache.org/DECORATION/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/DECORATION/1.0.0 http://maven.apache.org/xsd/decoration-1.0.0.xsd"> <bannerLeft> <name>HBase</name> <src>http://hbase.apache.org/images/hbase_logo_med.gif</src> <href>http://hbase.apache.org/</href> </bannerLeft> <bannerRight> <src>http://hbase.apache.org/images/asf_logo_wide.png</src> <href>http://www.apache.org/</href> </bannerRight> <version position="right" /> <publishDate position="right" /> <body> <menu name="HBase"> <item name="Overview" href="index.html"/> <item name="License" href="license.html" /> <item name="Downloads" href="http://www.apache.org/dyn/closer.cgi/hbase/" /> <item name="Release Notes" href="https://issues.apache.org/jira/browse/HBASE?report=com.atlassian.jira.plugin.system.project:changelog-panel" /> <item name="Issue Tracking" href="issue-tracking.html" /> <item name="Mailing Lists" href="mail-lists.html" /> <item name="Source Repository" href="source-repository.html" /> <item name="FAQ" href="faq.html" /> <item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" /> <item name="Team" href="team-list.html" /> </menu> <menu name="Documentation"> <item name="Getting Started" href="apidocs/overview-summary.html#overview_description" /> <item name="API" href="apidocs/index.html" /> <item name="X-Ref" href="xref/index.html" /> <item name="ACID Semantics" href="acid-semantics.html" /> <item name="Bulk Loads" href="bulk-loads.html" /> <item name="Metrics" href="metrics.html" /> <item name="HBase on Windows" href="cygwin.html" /> <item name="Cluster replication" href="replication.html" /> <item name="Pseudo-Distributed HBase" href="pseudo-distributed.html" /> <item name="HBase Book" href="book.html" /> <item name="Example Docbook Article" href="sample_article.html" /> </menu> </body> <skin> <groupId>org.apache.maven.skins</groupId> <artifactId>maven-stylus-skin</artifactId> </skin> </project> ======================================================================= ==src/assembly/bin.xml ======================================================================= <?xml version="1.0"?> <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd"> <id>bin</id> <formats> <format>tar.gz</format> </formats> <fileSets> <fileSet> <includes> <include>${basedir}/*.txt</include> </includes> </fileSet> <fileSet> <directory>conf</directory> </fileSet> <fileSet> <directory>bin</directory> <fileMode>755</fileMode> </fileSet> <fileSet> <directory>src/main/ruby</directory> <outputDirectory>lib/ruby</outputDirectory> </fileSet> <fileSet> <directory>target</directory> <outputDirectory>/</outputDirectory> <includes> <include>hbase-${project.version}.jar</include> <include>hbase-${project.version}-tests.jar</include> <include>hbase-${project.version}-sources.jar</include> </includes> </fileSet> <fileSet> <directory>target/hbase-webapps</directory> <outputDirectory>hbase-webapps</outputDirectory> </fileSet> <fileSet> <directory>target/site</directory> <outputDirectory>docs</outputDirectory> </fileSet> <fileSet> <directory>src/main/resources/</directory> <outputDirectory>conf</outputDirectory> <includes> <include>hbase-default.xml</include> </includes> </fileSet> </fileSets> ======================================================================= ==src/assembly/src.xml ======================================================================= <?xml version="1.0"?> <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd"> <id>src</id> <formats> <format>tar.gz</format> </formats> <fileSets> <fileSet> <includes> <include>${basedir}/*.txt</include> </includes> </fileSet> <fileSet> <includes> <include>pom.xml</include> </includes> </fileSet> <fileSet> <directory>src</directory> </fileSet> <fileSet> <directory>conf</directory> </fileSet> <fileSet> <directory>docs</directory> </fileSet> <fileSet> <directory>cloudera</directory> </fileSet> <fileSet> <directory>bin</directory> <fileMode>755</fileMode> </fileSet> </fileSets> </assembly> ======================================================================= ==src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java ======================================================================= package org.apache.hadoop.hbase.filter; import static org.junit.Assert.*; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; public class TestColumnPrefixFilter { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @Test public void testColumnPrefixFilter() throws IOException { String family = "Family"; HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); htd.addFamily(new HColumnDescriptor(family)); HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. getTestDir(), TEST_UTIL.getConfiguration()); List<String> rows = generateRandomWords(100, "row"); List<String> columns = generateRandomWords(10000, "column"); long maxTimestamp = 2; List<KeyValue> kvList = new ArrayList<KeyValue>(); Map<String, List<KeyValue>> prefixMap = new HashMap<String, List<KeyValue>>(); prefixMap.put("p", new ArrayList<KeyValue>()); prefixMap.put("s", new ArrayList<KeyValue>()); ======================================================================= ==src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java ======================================================================= ======================================================================= ==src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java ======================================================================= ======================================================================= ==src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java ======================================================================= ======================================================================= ==src/test/ruby/test_helper.rb ======================================================================= require 'test/unit' module Testing module Declarative # define_test "should do something" do # ... # end def define_test(name, &block) test_name = "test_#{name.gsub(/\s+/,'_')}".to_sym defined = instance_method(test_name) rescue false raise "#{test_name} is already defined in #{self}" if defined if block_given? define_method(test_name, &block) else define_method(test_name) do flunk "No implementation provided for #{name}" end end end end end module Hbase module TestHelpers def setup_hbase @formatter = Shell::Formatter::Console.new() @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) end def table(table) @hbase.table(table, @formatter) end def admin @hbase.admin(@formatter) end def create_test_table(name) # Create the table if needed unless admin.exists?(name) admin.create name, [{'NAME' => 'x', 'VERSIONS' => 5}, 'y'] return end # Enable the table if needed unless admin.enabled?(name) admin.enable(name) end end ======================================================================= ==src/test/resources/log4j.properties ======================================================================= # Define some default values that can be overridden by system properties hbase.root.logger=INFO,console hbase.log.dir=. hbase.log.file=hbase.log # Define the root logger to the system property "hbase.root.logger". log4j.rootLogger=${hbase.root.logger} # Logging Threshold log4j.threshhold=ALL # # Daily Rolling File Appender # log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} # Rollver at midnight log4j.appender.DRFA.DatePattern=.yyyy-MM-dd # 30-day backup #log4j.appender.DRFA.MaxBackupIndex=30 log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout # Pattern format: Date LogLevel LoggerName LogMessage #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n # Debugging Pattern format log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n # # console # Add "console" to rootlogger above if you want to use this # log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.err log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n # Custom Logging levels #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG log4j.logger.org.apache.hadoop=WARN log4j.logger.org.apache.zookeeper=ERROR log4j.logger.org.apache.hadoop.hbase=DEBUG ======================================================================= ==src/test/resources/mapred-queues.xml ======================================================================= <?xml version="1.0"?> <!-- This is the template for queue configuration. The format supports nesting of queues within queues - a feature called hierarchical queues. All queues are defined within the 'queues' tag which is the top level element for this XML document. The 'aclsEnabled' attribute should be set to true, if ACLs should be checked on queue operations such as submitting jobs, killing jobs etc. --> <queues aclsEnabled="false"> <!-- Configuration for a queue is specified by defining a 'queue' element. --> <queue> <!-- Name of a queue. Queue name cannot contain a ':' --> <name>default</name> <!-- properties for a queue, typically used by schedulers, can be defined here --> <properties> </properties> <!-- State of the queue. If running, the queue will accept new jobs. If stopped, the queue will not accept new jobs. --> <state>running</state> <!-- Specifies the ACLs to check for submitting jobs to this queue. If set to '*', it allows all users to submit jobs to the queue. For specifying a list of users and groups the format to use is user1,user2 group1,group2 --> <acl-submit-job>*</acl-submit-job> <!-- Specifies the ACLs to check for modifying jobs in this queue. Modifications include killing jobs, tasks of jobs or changing priorities. If set to '*', it allows all users to submit jobs to the queue. For specifying a list of users and groups the format to use is user1,user2 group1,group2 --> <acl-administer-jobs>*</acl-administer-jobs> </queue> <!-- Here is a sample of a hierarchical queue configuration where q2 is a child of q1. In this example, q2 is a leaf level queue as it has no queues configured within it. Currently, ACLs and state are only supported for the leaf level queues. Note also the usage of properties for the queue q2. <queue> <name>q1</name> <queue> <name>q2</name> <properties> <property key="capacity" value="20"/> ======================================================================= ==.git/config ======================================================================= [core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ======================================================================= ==.git/info/exclude ======================================================================= # git ls-files --others --exclude-from=.git/info/exclude # Lines that start with '#' are comments. # For a project mostly in C, the following would be a good set of # exclude patterns (uncomment them if you want to use them): # *.[oa] # *~ ======================================================================= ==.git/hooks/commit-msg.sample ======================================================================= #!/bin/sh # # An example hook script to check the commit log message. # Called by "git commit" with one argument, the name of the file # that has the commit message. The hook should exit with non-zero # status after issuing an appropriate message if it wants to stop the # commit. The hook is allowed to edit the commit message file. # # To enable this hook, rename this file to "commit-msg". # Uncomment the below to add a Signed-off-by line to the message. # Doing this in a hook is a bad idea in general, but the prepare-commit-msg # hook is more suited to it. # # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" # This example catches duplicate Signed-off-by lines. test "" = "$(grep '^Signed-off-by: ' "$1" | sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { echo >&2 Duplicate Signed-off-by lines. exit 1 } ======================================================================= ==.git/hooks/applypatch-msg.sample ======================================================================= #!/bin/sh # # An example hook script to check the commit log message taken by # applypatch from an e-mail message. # # The hook should exit with non-zero status after issuing an # appropriate message if it wants to stop the commit. The hook is # allowed to edit the commit message file. # # To enable this hook, rename this file to "applypatch-msg". . git-sh-setup test -x "$GIT_DIR/hooks/commit-msg" && exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"} : ======================================================================= ==.git/hooks/post-update.sample ======================================================================= #!/bin/sh # # An example hook script to prepare a packed repository for use over # dumb transports. # # To enable this hook, rename this file to "post-update". exec git update-server-info ======================================================================= ==.git/hooks/pre-commit.sample ======================================================================= #!/bin/sh # # An example hook script to verify what is about to be committed. # Called by "git commit" with no arguments. The hook should # exit with non-zero status after issuing an appropriate message if # it wants to stop the commit. # # To enable this hook, rename this file to "pre-commit". if git rev-parse --verify HEAD >/dev/null 2>&1 then against=HEAD else # Initial commit: diff against an empty tree object against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 fi # If you want to allow non-ascii filenames set this variable to true. allownonascii=$(git config hooks.allownonascii) # Cross platform projects tend to avoid non-ascii filenames; prevent # them from being added to the repository. We exploit the fact that the # printable range starts at the space character and ends with tilde. if [ "$allownonascii" != "true" ] && # Note that the use of brackets around a tr range is ok here, (it's # even required, for portability to Solaris 10's /usr/bin/tr), since # the square bracket bytes happen to fall in the designated range. test "$(git diff --cached --name-only --diff-filter=A -z $against | LC_ALL=C tr -d '[ -~]\0')" then echo "Error: Attempt to add a non-ascii file name." echo echo "This can cause problems if you want to work" echo "with people on other platforms." echo echo "To be portable it is advisable to rename the file ..." echo echo "If you know what you are doing you can disable this" echo "check using:" echo echo " git config hooks.allownonascii true" echo exit 1 fi exec git diff-index --check --cached $against -- ======================================================================= ==.git/hooks/pre-rebase.sample ======================================================================= #!/bin/sh # # Copyright (c) 2006, 2008 Junio C Hamano # # The "pre-rebase" hook is run just before "git rebase" starts doing # its job, and can prevent the command from running by exiting with # non-zero status. # # The hook is called with the following parameters: # # $1 -- the upstream the series was forked from. # $2 -- the branch being rebased (or empty when rebasing the current branch). # # This sample shows how to prevent topic branches that are already # merged to 'next' branch from getting rebased, because allowing it # would result in rebasing already published history. publish=next basebranch="$1" if test "$#" = 2 then topic="refs/heads/$2" else topic=`git symbolic-ref HEAD` || exit 0 ;# we do not interrupt rebasing detached HEAD fi case "$topic" in refs/heads/??/*) ;; *) exit 0 ;# we do not interrupt others. ;; esac # Now we are dealing with a topic branch being rebased # on top of master. Is it OK to rebase it? # Does the topic really exist? git show-ref -q "$topic" || { echo >&2 "No such branch $topic" exit 1 } # Is topic fully merged to master? not_in_master=`git rev-list --pretty=oneline ^master "$topic"` if test -z "$not_in_master" then echo >&2 "$topic is fully merged to master; better remove it." exit 1 ;# we could allow it, but there is no point. ======================================================================= ==.git/hooks/pre-applypatch.sample ======================================================================= #!/bin/sh # # An example hook script to verify what is about to be committed # by applypatch from an e-mail message. # # The hook should exit with non-zero status after issuing an # appropriate message if it wants to stop the commit. # # To enable this hook, rename this file to "pre-applypatch". . git-sh-setup test -x "$GIT_DIR/hooks/pre-commit" && exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} : ======================================================================= ==.git/hooks/update.sample ======================================================================= #!/bin/sh # # An example hook script to blocks unannotated tags from entering. # Called by "git receive-pack" with arguments: refname sha1-old sha1-new # # To enable this hook, rename this file to "update". # # Config # ------ # hooks.allowunannotated # This boolean sets whether unannotated tags will be allowed into the # repository. By default they won't be. # hooks.allowdeletetag # This boolean sets whether deleting tags will be allowed in the # repository. By default they won't be. # hooks.allowmodifytag # This boolean sets whether a tag may be modified after creation. By default # it won't be. # hooks.allowdeletebranch # This boolean sets whether deleting branches will be allowed in the # repository. By default they won't be. # hooks.denycreatebranch # This boolean sets whether remotely creating branches will be denied # in the repository. By default this is allowed. # # --- Command line refname="$1" oldrev="$2" newrev="$3" # --- Safety check if [ -z "$GIT_DIR" ]; then echo "Don't run this script from the command line." >&2 echo " (if you want, you could supply GIT_DIR then run" >&2 echo " $0 <ref> <oldrev> <newrev>)" >&2 exit 1 fi if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then echo "Usage: $0 <ref> <oldrev> <newrev>" >&2 exit 1 fi # --- Config allowunannotated=$(git config --bool hooks.allowunannotated) allowdeletebranch=$(git config --bool hooks.allowdeletebranch) denycreatebranch=$(git config --bool hooks.denycreatebranch) allowdeletetag=$(git config --bool hooks.allowdeletetag) allowmodifytag=$(git config --bool hooks.allowmodifytag) ======================================================================= ==.git/hooks/post-commit.sample ======================================================================= #!/bin/sh # # An example hook script that is called after a successful # commit is made. # # To enable this hook, rename this file to "post-commit". : Nothing ======================================================================= ==.git/hooks/prepare-commit-msg.sample ======================================================================= #!/bin/sh # # An example hook script to prepare the commit log message. # Called by "git commit" with the name of the file that has the # commit message, followed by the description of the commit # message's source. The hook's purpose is to edit the commit # message file. If the hook fails with a non-zero status, # the commit is aborted. # # To enable this hook, rename this file to "prepare-commit-msg". # This hook includes three examples. The first comments out the # "Conflicts:" part of a merge commit. # # The second includes the output of "git diff --name-status -r" # into the message, just before the "git status" output. It is # commented because it doesn't cope with --amend or with squashed # commits. # # The third example adds a Signed-off-by line to the message, that can # still be edited. This is rarely a good idea. case "$2,$3" in merge,) /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; # ,|template,) # /usr/bin/perl -i.bak -pe ' # print "\n" . `git diff --cached --name-status -r` # if /^#/ && $first++ == 0' "$1" ;; *) ;; esac # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" ======================================================================= ==.git/hooks/post-receive.sample ======================================================================= #!/bin/sh # # An example hook script for the "post-receive" event. # # The "post-receive" script is run after receive-pack has accepted a pack # and the repository has been updated. It is passed arguments in through # stdin in the form # <oldrev> <newrev> <refname> # For example: # aa453216d1b3e49e7f6f98441fa56946ddcd6a20 68f7abf4e6f922807889f52bc043ecd31b79f814 refs/heads/master # # see contrib/hooks/ for a sample, or uncomment the next line and # rename the file to "post-receive". #. /usr/share/doc/git-core/contrib/hooks/post-receive-email ======================================================================= ==.git/description ======================================================================= Unnamed repository; edit this file 'description' to name the repository. ======================================================================= ==.git/HEAD ======================================================================= ref: refs/heads/master ======================================================================= ==CHANGES.txt ======================================================================= HBase Change Log Release 0.89.20100924 - Fri Sep 24 13:51:36 PDT 2010 INCOMPATIBLE CHANGES HBASE-1822 Remove the deprecated APIs HBASE-1848 Fixup shell for HBASE-1822 HBASE-1854 Remove the Region Historian HBASE-1930 Put.setTimeStamp misleading (doesn't change timestamp on existing KeyValues, not copied in copy constructor) (Dave Latham via Stack) HBASE-1360 move up to Thrift 0.2.0 (Kay Kay and Lars Francke via Stack) HBASE-2212 Refactor out lucene dependencies from HBase (Kay Kay via Stack) HBASE-2219 stop using code mapping for method names in the RPC HBASE-1728 Column family scoping and cluster identification HBASE-2099 Move build to Maven (Paul Smith via Stack) HBASE-2260 Remove all traces of Ant and Ivy (Lars Francke via Stack) HBASE-2255 take trunk back to hadoop 0.20 HBASE-2378 Bulk insert with multiple reducers broken due to improper ImmutableBytesWritable comparator (Todd Lipcon via Stack) HBASE-2392 Upgrade to ZooKeeper 3.3.0 HBASE-2294 Enumerate ACID properties of HBase in a well defined spec (Todd Lipcon via Stack) HBASE-2541 Remove transactional contrib (Clint Morgan via Stack) HBASE-2542 Fold stargate contrib into core HBASE-2565 Remove contrib module from hbase HBASE-2397 Bytes.toStringBinary escapes printable chars HBASE-2771 Update our hadoop jar to be latest from 0.20-append branch HBASE-2803 Remove remaining Get code from Store.java,etc HBASE-2553 Revisit IncrementColumnValue implementation in 0.22 BUG FIXES HBASE-1791 Timeout in IndexRecordWriter (Bradford Stephens via Andrew Purtell) HBASE-1737 Regions unbalanced when adding new node (recommit) HBASE-1792 [Regression] Cannot save timestamp in the future HBASE-1793 [Regression] HTable.get/getRow with a ts is broken HBASE-1698 Review documentation for o.a.h.h.mapreduce HBASE-1798 [Regression] Unable to delete a row in the future HBASE-1790 filters are not working correctly (HBASE-1710 HBASE-1807 too) HBASE-1779 ThriftServer logged error if getVer() result is empty HBASE-1778 Improve PerformanceEvaluation (Schubert Zhang via Stack) HBASE-1751 Fix KeyValue javadoc on getValue for client-side HBASE-1795 log recovery doesnt reset the max sequence id, new logfiles can get tossed as 'duplicates' HBASE-1794 recovered log files are not inserted into the storefile map HBASE-1824 [stargate] default timestamp should be LATEST_TIMESTAMP HBASE-1740 ICV has a subtle race condition only visible under high load HBASE-1808 [stargate] fix how columns are specified for scanners HBASE-1828 CompareFilters are broken from client-side HBASE-1836 test of indexed hbase broken ======================================================================= ==cloudera/do-release-build ======================================================================= #!/bin/bash # Copyright (c) 2009 Cloudera, inc # # Performs a release build set -ex # Do the build BIN_DIR=$(readlink -f $(dirname $0)) RELEASE_DIR=$BIN_DIR/.. cd $RELEASE_DIR mvn -DskipTests clean mvn -DskipTests -Dhbase.version=${FULL_VERSION} site install assembly:assembly mkdir -p build for x in target/*.tar.gz ; do tar -C build -xzf $x done (cd build && tar -czf hbase-${FULL_VERSION}.tar.gz hbase-${FULL_VERSION}) ======================================================================= ==cloudera/apply-patches ======================================================================= #!/bin/sh -x set -e if [ $# != 2 ]; then echo usage: $0 '<target-dir> <patch-dir>' exit 1 fi TARGET_DIR=`readlink -f $1` PATCH_DIR=`readlink -f $2` cd $TARGET_DIR # We have to git init, or else git apply will search upwards and find # some other git repository (even though this build is taking place # inside a gitignored build/ dir) # even though we never commit to this "repository", this serves to # anchor the repository root at the source dir root. git init-db for PATCH in `ls -1 $PATCH_DIR/* | sort` ; do git apply --whitespace=nowarn $PATCH done ======================================================================= ==cloudera/install_hbase.sh ======================================================================= #!/bin/sh # Copyright 2009 Cloudera, inc. set -ex usage() { echo " usage: $0 <options> Required not-so-options: --cloudera-source-dir=DIR path to cloudera distribution files --build-dir=DIR path to hbase dist.dir --prefix=PREFIX path to install into Optional options: --doc-dir=DIR path to install docs into [/usr/share/doc/hbase] --lib-dir=DIR path to install hbase home [/usr/lib/hbase] --installed-lib-dir=DIR path where lib-dir will end up on target system --bin-dir=DIR path to install bins [/usr/bin] --examples-dir=DIR path to install examples [doc-dir/examples] ... [ see source for more similar options ] " exit 1 } OPTS=$(getopt \ -n $0 \ -o '' \ -l 'cloudera-source-dir:' \ -l 'prefix:' \ -l 'doc-dir:' \ -l 'lib-dir:' \ -l 'installed-lib-dir:' \ -l 'bin-dir:' \ -l 'examples-dir:' \ -l 'build-dir:' -- "$@") if [ $? != 0 ] ; then usage fi eval set -- "$OPTS" while true ; do case "$1" in --cloudera-source-dir) CLOUDERA_SOURCE_DIR=$2 ; shift 2 ;; --prefix) PREFIX=$2 ; shift 2 ;; --build-dir) BUILD_DIR=$2 ; shift 2 ======================================================================= ==cloudera/patches/0019-CLOUDERA-BUILD.-Fix-copy-of-bin-to-be-cp-a.patch ======================================================================= From 427f35f2dc730c553d9be1c0d78d7b695e4b2b76 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 30 Sep 2010 22:50:02 -0700 Subject: [PATCH 19/28] CLOUDERA-BUILD. Fix copy of bin/ to be cp -a --- cloudera/install_hbase.sh | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/cloudera/install_hbase.sh b/cloudera/install_hbase.sh index 6e9f257..65f7e71 100755 --- a/cloudera/install_hbase.sh +++ b/cloudera/install_hbase.sh @@ -100,7 +100,7 @@ cp *.txt $PREFIX/$DOC_DIR/ cp -a hbase-webapps $PREFIX/$LIB_DIR cp -a conf $PREFIX/$ETC_DIR/conf -cp bin/* $PREFIX/$BIN_DIR +cp -a bin/* $PREFIX/$BIN_DIR/ ln -s $ETC_DIR/conf $PREFIX/$LIB_DIR/conf -- 1.7.1 ======================================================================= ==cloudera/patches/0021-HBASE-3008-Memstore.updateColumnValue-passes-wrong-f.patch ======================================================================= From 405eef0025bc6894c117b5d542706e18b64aa4fe Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 7 Oct 2010 13:29:27 -0700 Subject: [PATCH 21/28] HBASE-3008 Memstore.updateColumnValue passes wrong flag to heapSizeChange (Causes memstore size to go negative) Author: Ryan Rawson --- .../java/org/apache/hadoop/hbase/KeyValue.java | 2 +- .../hadoop/hbase/regionserver/TestHRegion.java | 16 ++++++ .../hadoop/hbase/regionserver/TestStore.java | 58 ++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/src/main/java/org/apache/hadoop/hbase/KeyValue.java index bb26f27..ffbcb15 100644 --- a/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -1907,7 +1907,7 @@ public class KeyValue implements Writable, HeapSize { return ClassSize.align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + ClassSize.align(ClassSize.ARRAY + length) + (3 * Bytes.SIZEOF_INT) + - ClassSize.align(ClassSize.ARRAY + (rowCache == null ? 0 : rowCache.length)) + + ClassSize.align(ClassSize.ARRAY) + (2 * Bytes.SIZEOF_LONG)); } diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index d0b84cc..139252f 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -1958,6 +1958,22 @@ public class TestHRegion extends HBaseTestCase { assertICV(row, fam1, qual1, value+amount); } + public void testIncrementColumnValue_heapSize() throws IOException { + EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); + + initHRegion(tableName, getName(), fam1); + + long byAmount = 1L; + long size; + + for( int i = 0; i < 1000 ; i++) { + region.incrementColumnValue(row, fam1, qual1, byAmount, true); + + size = region.memstoreSize.get(); + assertTrue("memstore size: " + size, size >= 0); + } + } + public void testIncrementColumnValue_UpdatingInPlace_Negative() ======================================================================= ==cloudera/patches/0003-CLOUDERA-BUILD.-Switch-to-CDH3b3-snapshot-in-todd-s-.patch ======================================================================= From 5ac1ffdf37c28be48196eb0f4226e0ade6d3e048 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Mon, 17 May 2010 17:22:34 -0700 Subject: [PATCH 03/28] CLOUDERA-BUILD. Switch to CDH3b3 snapshot in todd's repo --- pom.xml | 33 +++++++++++++++++++++++++++------ 1 files changed, 27 insertions(+), 6 deletions(-) diff --git a/pom.xml b/pom.xml index 90cafef..4cbedb5 100644 --- a/pom.xml +++ b/pom.xml @@ -186,6 +186,17 @@ </releases> </repository> <repository> + <id>todd</id> + <name>Todd Lipcon's repo for CDH snapshots</name> + <url>http://people.apache.org/~todd/repo/</url> + <snapshots> + <enabled>true</enabled> + </snapshots> + <releases> + <enabled>true</enabled> + </releases> + </repository> + <repository> <id>temp-hadoop</id> <name>Hadoop 0.20.1/2 packaging, thrift, zk</name> <url>http://people.apache.org/~rawson/repo/</url> @@ -207,6 +218,17 @@ <enabled>true</enabled> </releases> </repository> + <repository> + <id>cloudera</id> + <name>Repository for finding CDH3</name> + <url> https://repository.cloudera.com/content/repositories/releases/</url> + <snapshots> + <enabled>true</enabled> + </snapshots> + <releases> + <enabled>true</enabled> + </releases> + </repository> </repositories> @@ -477,8 +499,7 @@ ======================================================================= ==cloudera/patches/0018-CLOUDERA-BUILD.-Build-site-as-part-of-release-build.patch ======================================================================= From 4c89df680a254d41fff96a4387280a8be505a2ad Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 30 Sep 2010 22:42:07 -0700 Subject: [PATCH 18/28] CLOUDERA-BUILD. Build site as part of release build --- cloudera/do-release-build | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/cloudera/do-release-build b/cloudera/do-release-build index 0986c23..eacaf57 100755 --- a/cloudera/do-release-build +++ b/cloudera/do-release-build @@ -12,7 +12,7 @@ RELEASE_DIR=$BIN_DIR/.. cd $RELEASE_DIR mvn -DskipTests clean -mvn -DskipTests -Dhbase.version=${FULL_VERSION} install assembly:assembly +mvn -DskipTests -Dhbase.version=${FULL_VERSION} site install assembly:assembly mkdir -p build for x in target/*.tar.gz ; do tar -C build -xzf $x -- 1.7.1 ======================================================================= ==cloudera/patches/0008-CLOUDERA-BUILD.-hbase-config.sh-should-set-HBASE_PID.patch ======================================================================= From e9c356f37bf195fc284e51ecd93c40b2a0b38509 Mon Sep 17 00:00:00 2001 From: todd <todd@ubuntu64-build01.(none)> Date: Sun, 27 Jun 2010 22:21:29 -0700 Subject: [PATCH 08/28] CLOUDERA-BUILD. hbase-config.sh should set HBASE_PID_DIR if unset --- bin/hbase-config.sh | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 5d13859..2415b53 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -71,6 +71,8 @@ done # Allow alternate hbase conf dir location. HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}" + +HBASE_PID_DIR="${HBASE_PID_DIR:-/tmp}" # List of hbase regions servers. HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}" # List of hbase secondary masters. -- 1.7.1 ======================================================================= ==cloudera/patches/0005-Re-enable-log-split-test.patch ======================================================================= From 4377d51ef77152d798c7c7739a6e64c52911286c Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 3 Jun 2010 20:32:45 -0700 Subject: [PATCH 05/28] Re-enable log split test --- .../hbase/regionserver/wal/TestHLogSplit.java | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java index 9cbebb0..d7bdc44 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java @@ -404,7 +404,7 @@ public class TestHLogSplit { // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null } } -/* DISABLED for now. TODO: HBASE-2645 + @Test public void testLogCannotBeWrittenOnceParsed() throws IOException { AtomicLong counter = new AtomicLong(0); @@ -433,7 +433,7 @@ public class TestHLogSplit { stop.set(true); } } -*/ + @Test public void testSplitWillNotTouchLogsIfNewHLogGetsCreatedAfterSplitStarted() -- 1.7.1 ======================================================================= ==cloudera/patches/0016-CLOUDERA-BUILD.-HBase-running-on-secure-hadoop-tempo.patch ======================================================================= From 74542880d740e9be24b103f1d5f5c6489d01911c Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Mon, 30 Aug 2010 16:25:56 -0700 Subject: [PATCH 16/28] CLOUDERA-BUILD. HBase running on secure hadoop, temporary patch. This is not upstreamed, since it currently is very difficult to do this without reflection or a shim layer. This will be upstreamed with the larger project of HBase security later this year. Author: Todd Lipcon --- .../org/apache/hadoop/hbase/ipc/HBaseClient.java | 4 +- .../java/org/apache/hadoop/hbase/ipc/HBaseRPC.java | 3 +- .../org/apache/hadoop/hbase/ipc/HBaseServer.java | 24 ++- .../org/apache/hadoop/hbase/master/HMaster.java | 45 +++++- .../hadoop/hbase/regionserver/HRegionServer.java | 59 ++++++- .../apache/hadoop/hbase/util/JVMClusterUtil.java | 60 ++++++- .../apache/hadoop/hbase/HBaseTestingUtility.java | 12 +- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 37 ++--- .../hadoop/hbase/regionserver/TestStore.java | 82 +++++---- .../hbase/regionserver/wal/TestWALReplay.java | 180 +++++++++++--------- 10 files changed, 330 insertions(+), 176 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java index 2b5eeb6..dbd4803 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java @@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.ObjectWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; @@ -385,8 +384,7 @@ public class HBaseClient { out.write(HBaseServer.CURRENT_VERSION); //When there are more fields we can have ConnectionHeader Writable. DataOutputBuffer buf = new DataOutputBuffer(); - ObjectWritable.writeObject(buf, remoteId.getTicket(), - UserGroupInformation.class, conf); + WritableUtils.writeString(buf, remoteId.getTicket().getUserName()); int bufLen = buf.getLength(); out.writeInt(bufLen); out.write(buf.getData(), 0, bufLen); diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java index 2d90d4e..58e9d0d 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java @@ -381,7 +381,8 @@ public class HBaseRPC { ======================================================================= ==cloudera/patches/0020-Fix-src-assembly-to-make-java-libs-644-and-not-inclu.patch ======================================================================= From 8acae9396f84c90c4b4513d4644fb30389c6847c Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@lipcon.org> Date: Thu, 30 Sep 2010 23:27:21 -0700 Subject: [PATCH 20/28] Fix src assembly to make java libs 644, and not include hbase lib twice --- src/assembly/bin.xml | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/src/assembly/bin.xml b/src/assembly/bin.xml index 06e45a9..416c46a 100644 --- a/src/assembly/bin.xml +++ b/src/assembly/bin.xml @@ -52,6 +52,8 @@ <outputDirectory>/lib</outputDirectory> <unpack>false</unpack> <scope>runtime</scope> + <fileMode>644</fileMode> + <useProjectArtifact>false</useProjectArtifact> </dependencySet> </dependencySets> </assembly> -- 1.7.1 ======================================================================= ==cloudera/patches/0014-HBASE-3000.-Add-hbase-classpath-command.patch ======================================================================= From 677ce02c0b4af1e361942e58bd8a34902cf363ca Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Wed, 15 Sep 2010 01:37:37 -0400 Subject: [PATCH 14/28] HBASE-3000. Add hbase classpath command --- bin/hbase | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/bin/hbase b/bin/hbase index c783e80..ec92f7b 100755 --- a/bin/hbase +++ b/bin/hbase @@ -262,6 +262,9 @@ elif [ "$COMMAND" = "zookeeper" ] ; then fi elif [ "$COMMAND" = "zkcli" ] ; then CLASS='org.apache.zookeeper.ZooKeeperMain' +elif [ "$COMMAND" = "classpath" ] ; then + echo $CLASSPATH + exit 0 else CLASS=$COMMAND fi -- 1.7.1 ======================================================================= ==cloudera/patches/0015-HBASE-3001.-TableMapReduceUtil-should-always-add-dep.patch ======================================================================= From 5476f351a90bd028de924ad021a7f0924b20e103 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Wed, 15 Sep 2010 01:37:45 -0400 Subject: [PATCH 15/28] HBASE-3001. TableMapReduceUtil should always add dependency jars Author: Todd Lipcon and Michael Stack --- .../hadoop/hbase/mapred/TableMapReduceUtil.java | 28 ++++ .../hadoop/hbase/mapreduce/TableMapReduceUtil.java | 36 +++--- .../hadoop/hbase/mapreduce/package-info.java | 152 +++++++++----------- 3 files changed, 116 insertions(+), 100 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 41748fe..749e314 100644 --- a/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -29,6 +29,10 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.mapred.TextOutputFormat; /** * Utility for {@link TableMap} and {@link TableReduce} @@ -59,6 +63,11 @@ public class TableMapReduceUtil { job.setMapperClass(mapper); FileInputFormat.addInputPaths(job, table); job.set(TableInputFormat.COLUMN_LIST, columns); + try { + addDependencyJars(job); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } } /** @@ -105,6 +114,7 @@ public class TableMapReduceUtil { } else if (partitioner != null) { job.setPartitionerClass(partitioner); } + addDependencyJars(job); } /** @@ -181,4 +191,22 @@ public class TableMapReduceUtil { public static void setScannerCaching(JobConf job, int batchSize) { job.setInt("hbase.client.scanner.caching", batchSize); ======================================================================= ==cloudera/patches/0025-HBASE-3096.-TestCompaction-timing-out.patch ======================================================================= From df24f2aa192153241a12711a95e861806db315f3 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Fri, 8 Oct 2010 15:29:23 -0700 Subject: [PATCH 25/28] HBASE-3096. TestCompaction timing out Author: Todd Lipcon --- .../hadoop/hbase/regionserver/TestCompaction.java | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 34b8044..9da4031 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -61,7 +61,7 @@ public class TestCompaction extends HBaseTestCase { // Set cache flush size to 1MB conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024); - conf.setInt("hbase.hregion.memstore.block.multiplier", 10); + conf.setInt("hbase.hregion.memstore.block.multiplier", 30); this.cluster = null; } @@ -94,9 +94,9 @@ public class TestCompaction extends HBaseTestCase { * @throws IOException */ public void testMajorCompactingToNoOutput() throws IOException { - createStoreFile(r); + createSmallerStoreFile(r); for (int i = 0; i < COMPACTION_THRESHOLD; i++) { - createStoreFile(r); + createSmallerStoreFile(r); } // Now delete everything. InternalScanner s = r.getScanner(new Scan()); -- 1.7.1 ======================================================================= ==cloudera/patches/0006-HBASE-2773.-Check-for-null-values-in-meta-in-test-ut.patch ======================================================================= From 89b8bdcb7eb2837c07a03783f2065be9f69a478d Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@lipcon.org> Date: Sun, 20 Jun 2010 20:25:13 -0700 Subject: [PATCH 06/28] HBASE-2773. Check for null values in meta in test util --- .../apache/hadoop/hbase/HBaseTestingUtility.java | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index fadee21..b0f132a 100644 --- a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -662,8 +662,10 @@ public class HBaseTestingUtility { List<byte[]> rows = new ArrayList<byte[]>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { - HRegionInfo info = Writables.getHRegionInfo( - result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); + byte[] value = result.getValue( + HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + if (value == null) continue; + HRegionInfo info = Writables.getHRegionInfo(value); HTableDescriptor desc = info.getTableDesc(); if (Bytes.compareTo(desc.getName(), tableName) == 0) { LOG.info("getMetaTableRows: row -> " + -- 1.7.1 ======================================================================= ==cloudera/patches/0028-CLOUDERA-BUILD.-Fix-versionless-jar-naming-symlinks-.patch ======================================================================= From 80fd13dbb5d66bdfc80e6c22f6b4567f62499c49 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Sat, 9 Oct 2010 17:56:08 -0700 Subject: [PATCH 28/28] CLOUDERA-BUILD. Fix versionless jar naming symlinks for multijar case Ref: CDH-2203 --- cloudera/install_hbase.sh | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudera/install_hbase.sh b/cloudera/install_hbase.sh index 247780e..86c4baf 100755 --- a/cloudera/install_hbase.sh +++ b/cloudera/install_hbase.sh @@ -99,8 +99,10 @@ cp hbase*.jar $PREFIX/$LIB_DIR/ # Make an unversioned jar symlink so that other # packages that depend on us can link in. -for x in $PREFIX/hbase*jar ; do - ln -s $(basename $x) $PREFIX/$LIB_DIR/hbase.jar +for x in $PREFIX/$LIB_DIR/hbase*jar ; do + JARNAME=$(basename $x) + VERSIONLESS_NAME=$(echo $JARNAME | sed -e 's,hbase-[0-9\+\-\.]*[0-9]\(-SNAPSHOT\)*,hbase,g') + ln -s $JARNAME $PREFIX/$LIB_DIR/$VERSIONLESS_NAME done cp -a docs/* $PREFIX/$DOC_DIR cp *.txt $PREFIX/$DOC_DIR/ -- 1.7.1 ======================================================================= ==cloudera/patches/0002-CLOUDERA-BUILD.-Add-build-infrastructure.patch ======================================================================= From b569811d0a92da0011375e27d2b38dc04cef3c44 Mon Sep 17 00:00:00 2001 From: newalex <newalex@ubuntu64-build01.(none)> Date: Tue, 22 Jun 2010 12:26:19 -0700 Subject: [PATCH 02/28] CLOUDERA-BUILD. Add build infrastructure. CLOUDERA-BUILD. Build should create a "mixed" src/bin tarball, and install from that --- cloudera/README.cloudera | 9 +++ cloudera/do-release-build | 21 +++++++ cloudera/install_hbase.sh | 136 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 166 insertions(+), 0 deletions(-) create mode 100644 cloudera/README.cloudera create mode 100755 cloudera/do-release-build create mode 100755 cloudera/install_hbase.sh diff --git a/cloudera/README.cloudera b/cloudera/README.cloudera new file mode 100644 index 0000000..8626e8f --- /dev/null +++ b/cloudera/README.cloudera @@ -0,0 +1,9 @@ +This build was generated by Cloudera's build system in the following manner: + +1) The pristine open-source release tarball was unpacked + +2) The patches contained within the patches/ directory next to this README +were applied using the apply-patches script. A complete log of these changes +is also included in CHANGES.cloudera.txt. + +3) The project was built by running the do-release-build script in this directory. diff --git a/cloudera/do-release-build b/cloudera/do-release-build new file mode 100755 index 0000000..0986c23 --- /dev/null +++ b/cloudera/do-release-build @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2009 Cloudera, inc +# +# Performs a release build + +set -ex + +# Do the build +BIN_DIR=$(readlink -f $(dirname $0)) +RELEASE_DIR=$BIN_DIR/.. + +cd $RELEASE_DIR + +mvn -DskipTests clean ======================================================================= ==cloudera/patches/0017-HBASE-2782.-QoS-for-META-table-access.patch ======================================================================= From 49c5812eb3c58180b3a49791afc165d93d8351ea Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Tue, 28 Sep 2010 22:59:55 -0700 Subject: [PATCH 17/28] HBASE-2782. QoS for META table access Author: Ryan Rawson --- .../java/org/apache/hadoop/hbase/ipc/HBaseRPC.java | 15 ++-- .../org/apache/hadoop/hbase/ipc/HBaseServer.java | 93 ++++++++++++++++---- .../org/apache/hadoop/hbase/master/HMaster.java | 7 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 35 ++++---- .../hadoop/hbase/regionserver/HRegionServer.java | 74 +++++++++++++++- 5 files changed, 181 insertions(+), 43 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java index 58e9d0d..9e7866b 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java @@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.ipc; +import com.google.common.base.Function; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.io.HbaseObjectWritable; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.net.NetUtils; @@ -82,8 +84,9 @@ public class HBaseRPC { super(); } // no public ctor + /** A method invocation, including the method name and its parameters.*/ - private static class Invocation implements Writable, Configurable { + public static class Invocation implements Writable, Configurable { private String methodName; @SuppressWarnings("unchecked") private Class[] parameterClasses; @@ -497,9 +500,9 @@ public class HBaseRPC { final Class<?>[] ifaces, final String bindAddress, final int port, final int numHandlers, - final boolean verbose, Configuration conf) + int metaHandlerCount, final boolean verbose, Configuration conf, int highPriorityLevel) ======================================================================= ==cloudera/patches/0007-CLOUDERA-BUILD.-Include-cloudera-dir-in-src-assembly.patch ======================================================================= From cf4dc092807fc922103366ed222bef2745320a84 Mon Sep 17 00:00:00 2001 From: todd <todd@ubuntu64-build01.(none)> Date: Sun, 27 Jun 2010 21:52:18 -0700 Subject: [PATCH 07/28] CLOUDERA-BUILD. Include cloudera/ dir in src assembly --- src/assembly/src.xml | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/src/assembly/src.xml b/src/assembly/src.xml index ab414ac..5bb4f2f 100644 --- a/src/assembly/src.xml +++ b/src/assembly/src.xml @@ -24,6 +24,12 @@ <directory>conf</directory> </fileSet> <fileSet> + <directory>docs</directory> + </fileSet> + <fileSet> + <directory>cloudera</directory> + </fileSet> + <fileSet> <directory>bin</directory> <fileMode>755</fileMode> </fileSet> -- 1.7.1 ======================================================================= ==cloudera/patches/0023-CLOUDERA-BUILD.-cloudera-directory-should-get-instal.patch ======================================================================= From 3190f7406c74c716fe09f693589314c67b98b62b Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 7 Oct 2010 19:56:33 -0700 Subject: [PATCH 23/28] CLOUDERA-BUILD. cloudera directory should get installed --- cloudera/install_hbase.sh | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/cloudera/install_hbase.sh b/cloudera/install_hbase.sh index dd96245..247780e 100755 --- a/cloudera/install_hbase.sh +++ b/cloudera/install_hbase.sh @@ -94,6 +94,7 @@ install -d -m 0755 $PREFIX/$BIN_DIR install -d -m 0755 $PREFIX/$ETC_DIR cp -ra lib/* ${PREFIX}/${LIB_DIR}/lib/ +cp -a cloudera ${PREFIX}/${LIB_DIR}/cloudera cp hbase*.jar $PREFIX/$LIB_DIR/ # Make an unversioned jar symlink so that other -- 1.7.1 ======================================================================= ==cloudera/patches/0010-HBASE-2467.-Concurrent-flushers-in-HLog-sync-using-H.patch ======================================================================= From cec5868f8de0080107540234f3005f1f033d43a4 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Tue, 7 Sep 2010 21:41:25 -0700 Subject: [PATCH 10/28] HBASE-2467. Concurrent flushers in HLog sync using HDFS-895 (JD's v3 patch) --- .../org/apache/hadoop/hbase/HTableDescriptor.java | 2 +- .../apache/hadoop/hbase/regionserver/wal/HLog.java | 169 ++++++-------------- .../hadoop/hbase/regionserver/wal/TestHLog.java | 2 +- 3 files changed, 53 insertions(+), 120 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 0d57270..3861799 100644 --- a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -95,7 +95,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> { public static final long DEFAULT_MAX_FILESIZE = 1024*1024*256L; - public static final boolean DEFAULT_DEFERRED_LOG_FLUSH = true; + public static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false; private volatile Boolean meta = null; private volatile Boolean root = null; diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 9a62084..b7fc22f 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -141,10 +141,10 @@ public class HLog implements Syncable { private final long blocksize; private final int flushlogentries; private final String prefix; - private final AtomicInteger unflushedEntries = new AtomicInteger(0); private final Path oldLogDir; private final List<LogActionsListener> actionListeners = Collections.synchronizedList(new ArrayList<LogActionsListener>()); + private boolean logRollRequested; private static Class<? extends Writer> logWriterClass; @@ -212,6 +212,7 @@ public class HLog implements Syncable { // We synchronize on updateLock to prevent updates and to prevent a log roll // during an update + // locked during appends private final Object updateLock = new Object(); private final boolean enabled; @@ -224,7 +225,7 @@ public class HLog implements Syncable { private final int maxLogs; ======================================================================= ==cloudera/patches/0001-Updating-the-site-for-0.89.20100924.patch ======================================================================= From 692785c890809f1475229a398cc1dba501d6cab9 Mon Sep 17 00:00:00 2001 From: Jean-Daniel Cryans <jdcryans@apache.org> Date: Tue, 5 Oct 2010 19:05:51 +0000 Subject: [PATCH 01/28] Updating the site for 0.89.20100924 git-svn-id: https://svn.apache.org/repos/asf/hbase/tags/0.89.20100924RC1@1004768 13f79535-47bb-0310-9956-ffa450edef68 --- src/site/xdoc/index.xml | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diff --git a/src/site/xdoc/index.xml b/src/site/xdoc/index.xml index 1f57ac3..df375c6 100644 --- a/src/site/xdoc/index.xml +++ b/src/site/xdoc/index.xml @@ -49,6 +49,10 @@ HBase includes: </section> <section name="News"> + <p>November 19th, <a href="http://huguk.org/">Hadoop HUG in London</a> is all about HBase</p> + <p>November 15-19th, <a href="http://www.devoxx.com/display/Devoxx2K10/Home">Devoxx</a> features HBase Training and multiple HBase presentations</p> + <p>October 12th, HBase-related presentations by core contributors and users at <a href="http://www.cloudera.com/company/press-center/hadoop-world-nyc/">Hadoop World 2010</a></p> + <p>October 11th, <a href="http://www.meetup.com/hbaseusergroup/calendar/14606174/">HUG-NYC: HBase User Group NYC Edition</a> (Night before Hadoop World)</p> <p>June 30th, <a href="http://www.meetup.com/hbaseusergroup/calendar/13562846/">HBase Contributor Workshop</a> (Day after Hadoop Summit)</p> <p>May 10th, 2010: HBase graduates from Hadoop sub-project to Apache Top Level Project </p> <p><a href="old_news.html">...</a></p> -- 1.7.1 ======================================================================= ==cloudera/patches/0009-CLOUDERA-BUILD.-rsync-all-of-lib-into-target-directo.patch ======================================================================= From cdeff4c4e3e5c21b55b04cd36c17804f88eba88e Mon Sep 17 00:00:00 2001 From: todd <todd@ubuntu64-build01.(none)> Date: Sun, 27 Jun 2010 23:24:58 -0700 Subject: [PATCH 09/28] CLOUDERA-BUILD. rsync all of lib into target directory Otherwise shell doesn't work, since we don't get .rb files CLOUDERA-BUILD. Replace rsync with cp. --- cloudera/install_hbase.sh | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/cloudera/install_hbase.sh b/cloudera/install_hbase.sh index 74090d4..6e9f257 100755 --- a/cloudera/install_hbase.sh +++ b/cloudera/install_hbase.sh @@ -93,10 +93,7 @@ install -d -m 0755 $PREFIX/$DOC_DIR install -d -m 0755 $PREFIX/$BIN_DIR install -d -m 0755 $PREFIX/$ETC_DIR -for i in `find lib/*.jar -type f ` - do echo "Copying $i" - cp $i ${PREFIX}/${LIB_DIR}/lib #don't copy directories by default -done +cp -ra lib/* ${PREFIX}/${LIB_DIR}/lib/ cp hbase*.jar $PREFIX/$LIB_DIR cp -a docs/* $PREFIX/$DOC_DIR cp *.txt $PREFIX/$DOC_DIR/ -- 1.7.1 ======================================================================= ==cloudera/patches/0022-CLOUDERA-BUILD.-Change-wrapper-scripts-to-not-be-dep.patch ======================================================================= From 62bec1e3c6a34bae54dbe05bf9c473ef401eb3fd Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Thu, 7 Oct 2010 18:34:43 -0700 Subject: [PATCH 22/28] CLOUDERA-BUILD. Change wrapper scripts to not be dependent on setting a bunch of environment variables Fixes issues where /usr/lib/hbase/bin/hbase would fail, not finding the hadoop jars. --- bin/hbase | 12 +++++++++++- bin/hbase-config.sh | 5 +++++ cloudera/install_hbase.sh | 31 +++++++++---------------------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/bin/hbase b/bin/hbase index ec92f7b..b36593b 100755 --- a/bin/hbase +++ b/bin/hbase @@ -42,7 +42,7 @@ # # MAVEN_HOME Where mvn is installed. # -bin=`dirname "$0"` +bin=`dirname "$BASH_SOURCE"` bin=`cd "$bin">/dev/null; pwd` # This will set HBASE_HOME, etc. @@ -175,6 +175,16 @@ if [ "$HBASE_CLASSPATH" != "" ]; then CLASSPATH=${CLASSPATH}:${HBASE_CLASSPATH} fi +# And very last, add other project configuration dirs - this +# allows someone to override the hdfs config by putting an hdfs-site +# in hbase's conf dir, for example +if [ -n "$ZOOKEEPER_CONF_DIR" ]; then + CLASSPATH=${CLASSPATH}:${ZOOKEEPER_CONF_DIR} +fi +if [ -n "$HADOOP_CONF_DIR" ]; then + CLASSPATH=${CLASSPATH}:${HADOOP_CONF_DIR} +fi + # default log directory & file if [ "$HBASE_LOG_DIR" = "" ]; then HBASE_LOG_DIR="$HBASE_HOME/logs" diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 2415b53..32d495d 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -78,11 +78,16 @@ HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}" # List of hbase secondary masters. HBASE_BACKUP_MASTERS="${HBASE_BACKUP_MASTERS:-$HBASE_CONF_DIR/backup-masters}" ======================================================================= ==cloudera/patches/0026-CLOUDERA-BUILD.-Update-hadoop-version.patch ======================================================================= From 1faa3e29bfc2935a71197fe28cec4dbecf2b7692 Mon Sep 17 00:00:00 2001 From: Eli Collins <eli@cloudera.com> Date: Fri, 8 Oct 2010 20:18:14 -0700 Subject: [PATCH 26/28] CLOUDERA-BUILD. Update hadoop version. --- pom.xml | 13 +------------ 1 files changed, 1 insertions(+), 12 deletions(-) diff --git a/pom.xml b/pom.xml index 4cbedb5..92523f4 100644 --- a/pom.xml +++ b/pom.xml @@ -186,17 +186,6 @@ </releases> </repository> <repository> - <id>todd</id> - <name>Todd Lipcon's repo for CDH snapshots</name> - <url>http://people.apache.org/~todd/repo/</url> - <snapshots> - <enabled>true</enabled> - </snapshots> - <releases> - <enabled>true</enabled> - </releases> - </repository> - <repository> <id>temp-hadoop</id> <name>Hadoop 0.20.1/2 packaging, thrift, zk</name> <url>http://people.apache.org/~rawson/repo/</url> @@ -499,7 +488,7 @@ <compileSource>1.6</compileSource> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <hbase.version>0.89.20100924</hbase.version> - <hadoop.version>0.20.3-CDH3-SNAPSHOT</hadoop.version> + <hadoop.version>0.20.2-737</hadoop.version> <commons-cli.version>1.2</commons-cli.version> <commons-logging.version>1.1.1</commons-logging.version> <jetty.version>6.1.24</jetty.version> -- 1.7.1 ======================================================================= ==cloudera/patches/0027-HBASE-3101.-bin-assembly-should-include-tests-and-so.patch ======================================================================= From baa476fc14c80e18e91ccd683c6aa045a6689fd3 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Sat, 9 Oct 2010 17:36:53 -0700 Subject: [PATCH 27/28] HBASE-3101. bin assembly should include tests and sources jar Author: Todd Lipcon Reason: sqoop build depends on -tests jar Ref: CDH-2203 --- src/assembly/bin.xml | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/src/assembly/bin.xml b/src/assembly/bin.xml index 416c46a..4eeab88 100644 --- a/src/assembly/bin.xml +++ b/src/assembly/bin.xml @@ -28,7 +28,8 @@ <outputDirectory>/</outputDirectory> <includes> <include>hbase-${project.version}.jar</include> - <include>hbase-${project.version}-test.jar</include> + <include>hbase-${project.version}-tests.jar</include> + <include>hbase-${project.version}-sources.jar</include> </includes> </fileSet> <fileSet> -- 1.7.1 ======================================================================= ==cloudera/patches/0011-SequenceFileLogWriter-doesn-t-need-to-actually-call-.patch ======================================================================= From 6647194e4e48aba590a9fba9d3c699ee05510b16 Mon Sep 17 00:00:00 2001 From: Todd Lipcon <todd@cloudera.com> Date: Wed, 8 Sep 2010 13:45:41 -0700 Subject: [PATCH 11/28] SequenceFileLogWriter doesn't need to actually call sync() --- .../regionserver/wal/SequenceFileLogWriter.java | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index ea59695..7f1b6ce 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -139,4 +139,4 @@ public class SequenceFileLogWriter implements HLog.Writer { public OutputStream getDFSCOutputStream() { return this.dfsClient_out; } -} \ No newline at end of file +} -- 1.7.1 ======================================================================= ==cloudera/CHANGES.cloudera.txt ======================================================================= commit 80fd13dbb5d66bdfc80e6c22f6b4567f62499c49 Author: Todd Lipcon <todd@cloudera.com> Date: Sat Oct 9 17:56:08 2010 -0700 CLOUDERA-BUILD. Fix versionless jar naming symlinks for multijar case Ref: CDH-2203 commit baa476fc14c80e18e91ccd683c6aa045a6689fd3 Author: Todd Lipcon <todd@cloudera.com> Date: Sat Oct 9 17:36:53 2010 -0700 HBASE-3101. bin assembly should include tests and sources jar Author: Todd Lipcon Reason: sqoop build depends on -tests jar Ref: CDH-2203 commit 1faa3e29bfc2935a71197fe28cec4dbecf2b7692 Author: Eli Collins <eli@cloudera.com> Date: Fri Oct 8 20:18:14 2010 -0700 CLOUDERA-BUILD. Update hadoop version. commit df24f2aa192153241a12711a95e861806db315f3 Author: Todd Lipcon <todd@cloudera.com> Date: Fri Oct 8 15:29:23 2010 -0700 HBASE-3096. TestCompaction timing out Author: Todd Lipcon commit c5361373f24d793ed11cf5935649b2d0bc920658 Author: Todd Lipcon <todd@cloudera.com> Date: Thu Oct 7 22:05:20 2010 -0700 HBASE-2799. "Append not enabled" warning should not show if hbase root dir isn't on DFS Author: Michael Stack commit 3190f7406c74c716fe09f693589314c67b98b62b Author: Todd Lipcon <todd@cloudera.com> Date: Thu Oct 7 19:56:33 2010 -0700 CLOUDERA-BUILD. cloudera directory should get installed commit 62bec1e3c6a34bae54dbe05bf9c473ef401eb3fd Author: Todd Lipcon <todd@cloudera.com> Date: Thu Oct 7 18:34:43 2010 -0700 ======================================================================= ==cloudera/build.properties ======================================================================= # Autogenerated build properties version=0.89.20100924+28 git.hash=80fd13dbb5d66bdfc80e6c22f6b4567f62499c49 cloudera.hash=80fd13dbb5d66bdfc80e6c22f6b4567f62499c49 cloudera.base-branch=cdh-base-0.89.20100924 cloudera.build-branch=cdh-0.89.20100924 ======================================================================= ==cloudera/README.cloudera ======================================================================= This build was generated by Cloudera's build system in the following manner: 1) The pristine open-source release tarball was unpacked 2) The patches contained within the patches/ directory next to this README were applied using the apply-patches script. A complete log of these changes is also included in CHANGES.cloudera.txt. 3) The project was built by running the do-release-build script in this directory. ======================================================================= ==conf/log4j.properties ======================================================================= # Define some default values that can be overridden by system properties hbase.root.logger=INFO,console hbase.log.dir=. hbase.log.file=hbase.log # Define the root logger to the system property "hbase.root.logger". log4j.rootLogger=${hbase.root.logger} # Logging Threshold log4j.threshhold=ALL # # Daily Rolling File Appender # log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} # Rollver at midnight log4j.appender.DRFA.DatePattern=.yyyy-MM-dd # 30-day backup #log4j.appender.DRFA.MaxBackupIndex=30 log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout # Pattern format: Date LogLevel LoggerName LogMessage log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n # Debugging Pattern format #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n # # console # Add "console" to rootlogger above if you want to use this # log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.err log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n # Custom Logging levels log4j.logger.org.apache.zookeeper=INFO #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG log4j.logger.org.apache.hadoop.hbase=DEBUG #log4j.logger.org.apache.hadoop.dfs=DEBUG ======================================================================= ==conf/regionservers ======================================================================= localhost ======================================================================= ==conf/hadoop-metrics.properties ======================================================================= # See http://wiki.apache.org/hadoop/GangliaMetrics # Make sure you know whether you are using ganglia 3.0 or 3.1. # If 3.1, you will have to patch your hadoop instance with HADOOP-4675 # And, yes, this file is named hadoop-metrics.properties rather than # hbase-metrics.properties because we're leveraging the hadoop metrics # package and hadoop-metrics.properties is an hardcoded-name, at least # for the moment. # # See also http://hadoop.apache.org/hbase/docs/current/metrics.html # Configuration of the "hbase" context for null hbase.class=org.apache.hadoop.metrics.spi.NullContext # Configuration of the "hbase" context for file # hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext # hbase.period=10 # hbase.fileName=/tmp/metrics_hbase.log # Configuration of the "hbase" context for ganglia # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) # hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext # hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 # hbase.period=10 # hbase.servers=GMETADHOST_IP:8649 # Configuration of the "jvm" context for null jvm.class=org.apache.hadoop.metrics.spi.NullContext # Configuration of the "jvm" context for file # jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext # jvm.period=10 # jvm.fileName=/tmp/metrics_jvm.log # Configuration of the "jvm" context for ganglia # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 # jvm.period=10 # jvm.servers=GMETADHOST_IP:8649 # Configuration of the "rpc" context for null rpc.class=org.apache.hadoop.metrics.spi.NullContext # Configuration of the "rpc" context for file # rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext # rpc.period=10 # rpc.fileName=/tmp/metrics_rpc.log # Configuration of the "rpc" context for ganglia # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)