1.解压安装
这里解压 hadoop-2.7.3.tar.gz 到 V:\program\common\hadoop-2.7.3
hadoop-common-2.2.0-bin-master https://github.com/srccodes/hadoop-common-2.2.0-bin/archive/master.zip
复制 V:\program\common\hadoop-2.7.3\bin 的7个文件(注意:只复制这7个) 复制到 D:\HBase\hadoop-2.7.3\bin
hadoop.dll、hadoop.exp、hadoop.lib、hadoop.pdb、libwinutils.lib、winutils.exe、winutils.pdb
这里解压 hbase-1.2.4.tar.gz 到 V:\program\common\hbase-1.2.4
2.配置etc\hadoop\core-site.xml
fs.default.name hdfs://0.0.0.0:19000 hadoop.tmp.dir >../../../../data/hadoop/tmp A base for other temporary directories.
3.配置etc\hadoop\slaves
localhost
4.配置etc\hadoop\mapred-site.xml
mapreduce.job.user.name zeno mapreduce.framework.name yarn yarn.apps.stagingDir /user/zeno/staging mapreduce.jobtracker.address local
5.配置etc\hadoop\yarn-site.xml
yarn.server.resourcemanager.address 0.0.0.0:8020 yarn.server.resourcemanager.application.expiry.interval 60000 yarn.server.nodemanager.address 0.0.0.0:45454 yarn.nodemanager.aux-services mapreduce_shuffle yarn.nodemanager.aux-services.mapreduce.shuffle.class org.apache.hadoop.mapred.ShuffleHandler yarn.server.nodemanager.remote-app-log-dir /app-logs yarn.nodemanager.log-dirs /dep/logs/userlogs yarn.server.mapreduce-appmanager.attempt-listener.bindAddress 0.0.0.0 yarn.server.mapreduce-appmanager.client-service.bindAddress 0.0.0.0 yarn.log-aggregation-enable true yarn.log-aggregation.retain-seconds -1 yarn.application.classpath %HADOOP_CONF_DIR%,%HADOOP_COMMON_HOME%/share/hadoop/common/*,%HADOOP_COMMON_HOME%/share/hadoop/common/lib/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/lib/*
6.配置etc\hadoop\hdfs-site.xml
fs.default.name hdfs://0.0.0.0:19000 dfs.namenode.name.dir ../../../../data/hadoop/hdfs/name dfs.datanode.data.dir ../../../../data/hadoop/hdfs/data
7.格式化文件系统
set HADOOP_HOME=%DIR%\program\common\hadoop-2.7.3 set HADOOP_PREFIX=%DIR%\program\common\hadoop-2.7.3 set HADOOP_CONF_DIR=%HADOOP_PREFIX%\etc\hadoop set YARN_CONF_DIR=%HADOOP_CONF_DIR% set PATH=%HADOOP_HOME%\bin;%PATH% call %HADOOP_HOME%\etc\hadoop\hadoop-env.cmd %HADOOP_PREFIX%\bin\hdfs namenode -format
8.启动
%HADOOP_PREFIX%\sbin\start-dfs.cmd
9.测试
%HADOOP_PREFIX%\bin\hdfs dfs -put myfile.txt / %HADOOP_PREFIX%\bin\hdfs dfs -ls / Found 1 items drwxr-xr-x - username supergroup 4640 2016-12-25 20:40 /myfile.txt
10.配置conf/hbase-site.xml
hbase.rootdir file:///V:/data/hbase/root hbase.tmp.dir V:/data/hbase/tmp hbase.zookeeper.quorum 127.0.0.1 hbase.zookeeper.property.dataDir V:/data/hbase/zoo hbase.zookeeper.property.clientPort 2181 zookeeper.znode.parent /hbase hbase.cluster.distributed false
11.启动hbase
set HBASE_HOME=%DIR%\program\common\hbase-1.2.4 call %HBASE_HOME%\conf\hbase-env.cmd set PATH=%HBASE_HOME%\bin;%PATH% %HBASE_HOME%\bin\start-hbase.cmd
12.测试Hbase
创建表 创建一个名为 test 的表,这个表只有一个 列族 为 cf。可以列出所有的表来检查创建情况,然后**些值。 >create 'test','cf' **记录 >put 'test','row1','cf:a','value1' 查询 >scan 'test'