当前位置:   article > 正文

Shell实现Hadoop单机版安装配置_xshell hadoop单机模式的安装与配置

xshell hadoop单机模式的安装与配置

准备

        事先安装好VMware虚拟机

        安装JDK并且配置环境变量

        下载好hadoop文件

安装步骤

        建立一个sh文件,输入以下脚本并输入Hadooo的tgz包作为参数,运行该脚本即可安装配置Hadoop,并验证Hadoop服务是否启动成功。

源代码

  1. #1、压缩包存在
  2. pack=$1
  3. if [[ ! "$pack" =~ ^.*hadoop.*\.(tar\.gz|tgz)$ ]];then
  4. echo "ERROR : ONLY SUPPORT tar.gz OR tgz HADOOP COMPASS FORMAT"
  5. exit 1
  6. fi
  7. #2.1、 检查 Hadoop 服务,若存在则关闭
  8. sc=$(jps|awk 'BEGIN {c=0}/DataNode|SecondaryNameNode|NodeManager|ResourceManager|NameNode/{c++}END{print c}')
  9. if [ $sc -gt 0 ];then
  10. stop-all.sh 1>/dev/null 2>hadoop_err.log || jps|awk '/DataNode|SecondaryNameNode|NodeManager|ResourceManager|NameNode/{print $1}|xargs kill -9'
  11. if [ $? -ne 0 ];then
  12. echo "ERROR : FAIL TO STOP RUNNING HADOOP SERVICES"
  13. exit 1
  14. else
  15. echo "INFO : SUCCESS TO STOP OLD RUNNING HADOOP SERVICES"
  16. fi
  17. fi
  18. #2.2、目标目录(不存在/opt/software则创建,存在子目录则删除)
  19. dest=${2%/}
  20. echo $dest
  21. old=$(ls $dest|grep ^hadoop)
  22. if [ $? -eq 0 ];then
  23. rm -rf $dest/$old
  24. echo "INFO : OLD HADOOP EDITION FOUND AND REMOVED"
  25. fi
  26. if [ ! -e $dest ];then
  27. mkdir -p $dest
  28. echo "INFO : DEST DIR NOT EXISTS BUT CREATED"
  29. fi
  30. if [ ! -d $dest ];then
  31. echo "ERROR : DEST FOR ARG 2 MUSY BE A DIRECTORY"
  32. exit 2
  33. fi
  34. #3、解压
  35. tar -zxf $pack -C $dest
  36. if [ $? -eq 0 ];then
  37. echo -n "INFO : SUCCESS"
  38. else
  39. echo -n "ERROR : FAIL"
  40. exit 3
  41. fi
  42. echo "TO DECOMPRESS $pack TO $dest"
  43. #4、环境变量(存在则删除,再追加新环境变量)
  44. env=/etc/profile.d/myenv.sh
  45. if [ ! -e $env ];then
  46. touch $env
  47. echo "INFO : ENV FILE NOT EXIST BUT TOUCHED"
  48. fi
  49. old=$(cat $env|awk 'BEGIN{b=0; e=0; ln=0;}{ln++; if(b>0 && match($0,/^#.*hadoop/)) e=ln-1; if(match($0,/^#.*hadoop/)) b=ln}END{if(b>0 && e==0) e=ln; print b","e}')
  50. if [ "$old" != "0,0" ];then
  51. sed -i "${old}d" $env
  52. echo "INFO : ENV VARIABLES FOR HADOOP FOUND BUT REMOVED LINE $OLD IN $env "
  53. fi
  54. old=$(ls $dest|grep ^hadoop)
  55. dest="$dest/$old"
  56. cat >> $env <<EOF
  57. # hadoop 3.1.3
  58. export HADOOP_HOME=$dest
  59. export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$HADOOP_HOME/lib
  60. export HDFS_NAMENODE_USER=root
  61. export HDFS_DATANODE_USER=root
  62. export HDFS_SECONDARYNAMENODE_USER=root
  63. export HDFS_JOURNALNODE_USER=root
  64. export HDFS_ZKFC_USER=root
  65. export YARN_RESOURCEMANAGER_USER=root
  66. export YARN_NODEMANAGER_USER=root
  67. export HADOOP_MAPRED_HOME=\$HADOOP_HOME
  68. export HADOOP_COMMON_HOME=\$HADOOP_HOME
  69. export HADOOP_HDFS_HOME=\$HADOOP_HOME
  70. export HADOOP_YARN_HOME=\$HADOOP_HOME
  71. export HADOOP_INSTALL=\$HADOOP_HOME
  72. export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
  73. export HADOOP_LIBEXEC_DIR=\$HADOOP_HOME/libexec
  74. export JAVA_LIBRARY_PATH=\$HADOOP_HOME/lib/native:\$JAVA_LIBRARY_PATH
  75. export HADOOP_CONF_DIR=\$HADOOP_HOME/etc/hadoop
  76. EOF
  77. echo "INFO : HADOOP ENV VARIABLES APPEND TO $env"
  78. #5、激活环境变量
  79. source /etc/profile
  80. if [ $? -eq 0 ];then
  81. echo "INFO : SUCCESS TO ACTIVE ENV VARIABLES FOR HADOOP"
  82. else
  83. echo "INFO : FAIL TO ACTIVE ENV VARIABLES FOR HADOOP"
  84. exit 4
  85. fi
  86. #6、 记录脚本目录,切到Hadoop主配置目录
  87. sdir=$(cd $(dirname $0); pwd)
  88. cd $dest/etc/hadoop
  89. echo "INFO : CURRENT DIRECTORY CHANGED TO $pwd"
  90. #7、Hadoop内部Java环境变量配置
  91. sed -i "s/# export JAVA_HOME=/export JAVA_HOME=${JAVA_HOME//\//\\/}/" hadoop-env.sh
  92. echo "INFO : SUCCESS TO FINISH hadoop-env.sh CONFIG"
  93. #8.0 检查并完善/etc/hosts下主机名与IP地址映射
  94. cat /etc/hosts|grep $HOSTNAME 1>/dev/null 2>$1
  95. echo -n "INFO : IP &HOSTNAME MAP"
  96. if [ $? -ne 0 ];then
  97. ip=$(ip addr|grep -E inet.*ens33|awk '{print $2}'|cut -d/ -f1)
  98. echo "$ip $HOSTNAME">>/etc/hosts
  99. echo "NOT FOUND BUT CREATED"
  100. else
  101. echo "EXIST"
  102. fi
  103. #8.1 检查 hadoop 临时目录 ,存在则删除
  104. dir=/tmp/hadoop
  105. if [ -e $dir ];then
  106. rm -rf $dir
  107. echo "INFO : TEMP & YARN LOCAL & YARN LOG DIR ($dir) FOUND AND REMOVED"
  108. fi
  109. #8、hadoop内部core-site.xml 配置
  110. cat > core-site.xml <<EOF
  111. <?xml version="1.0" encoding="UTF-8"?>
  112. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  113. <!--
  114. Licensed under the Apache License, Version 2.0 (the "License");
  115. you may not use this file except in compliance with the License.
  116. You may obtain a copy of the License at
  117. http://www.apache.org/licenses/LICENSE-2.0
  118. Unless required by applicable law or agreed to in writing, software
  119. distributed under the License is distributed on an "AS IS" BASIS,
  120. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  121. See the License for the specific language governing permissions and
  122. limitations under the License. See accompanying LICENSE file.
  123. -->
  124. <!-- Put site-specific property overrides in this file. -->
  125. <configuration>
  126. <property>
  127. <name>fs.defaultFS</name>
  128. <value>hdfs://$HOSTNAME:9000</value>
  129. </property>
  130. <property>
  131. <name>hadoop.tmp.dir</name>
  132. <value>$dir/$HOSTNAME</value>
  133. </property>
  134. <property>
  135. <name>hadoop.http.staticuser.user</name>
  136. <value>root</value>
  137. </property>
  138. <property>
  139. <name>hadoop.proxyuser.root.hosts</name>
  140. <value>*</value>
  141. </property>
  142. <property>
  143. <name>hadoop.proxyuser.root.groups</name>
  144. <value>*</value>
  145. </property>
  146. <property>
  147. <name>io.file.buffer.size</name>
  148. <value>1048576</value>
  149. </property>
  150. </configuration>
  151. EOF
  152. echo "INFO : SUCCESS TO FINISH core-site.xml CONFIG"
  153. #9、Hadoop内部hdfs-site.xml配置
  154. cat > hdfs-site.xml<<EOF
  155. <?xml version="1.0" encoding="UTF-8"?>
  156. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  157. <!--
  158. Licensed under the Apache License, Version 2.0 (the "License");
  159. you may not use this file except in compliance with the License.
  160. You may obtain a copy of the License at
  161. http://www.apache.org/licenses/LICENSE-2.0
  162. Unless required by applicable law or agreed to in writing, software
  163. distributed under the License is distributed on an "AS IS" BASIS,
  164. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  165. See the License for the specific language governing permissions and
  166. limitations under the License. See accompanying LICENSE file.
  167. -->
  168. <!-- Put site-specific property overrides in this file. -->
  169. <configuration>
  170. <property>
  171. <name>dfs.replication</name>
  172. <value>1</value>
  173. </property>
  174. <property>
  175. <name>dfs.namenode.name.dir</name>
  176. <value>${dest}/data/dfs/name</value>
  177. </property>
  178. <property>
  179. <name>dfs.datanode.data.dir</name>
  180. <value>${dest}/data/dfs/data</value>
  181. </property>
  182. <property>
  183. <name>dfs.namenode.secondary.http-address</name>
  184. <value>$HOSTNAME:9869</value>
  185. </property>
  186. <property>
  187. <name>dfs.permissions.enabled</name>
  188. <value>false</value>
  189. </property>
  190. </configuration>
  191. EOF
  192. echo "INFO : SUCCESS TO FINISH hdfs-site.xml CONFIG"
  193. #10、Hadoop内部mapred-site.xml配置
  194. #10.1 激活Hadoop环境变量并通过Hadoop classpath 命令提取信息
  195. source /etc/profile
  196. if [ $? -ne 0 ];then
  197. echo "ERROR : FAIL TO ACTIVATE HADOOP ENV VARIABLES"
  198. exit 1
  199. fi
  200. hc=$(hadoop classpath)
  201. if [ $? -ne 0 ];then
  202. echo "ERROR : FAIL TO FETCH HADOOP CLASSPATH"
  203. exit 1
  204. fi
  205. echo "INFO : HADOOP ENV VARIABLES ACTIVATED AND HADOOP CLASSPATH FETCHED"
  206. #10.2 配置mapred-site.xml
  207. cat > mapred-site.xml <<EOF
  208. <?xml version="1.0"?>
  209. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  210. <!--
  211. Licensed under the Apache License, Version 2.0 (the "License");
  212. you may not use this file except in compliance with the License.
  213. You may obtain a copy of the License at
  214. http://www.apache.org/licenses/LICENSE-2.0
  215. Unless required by applicable law or agreed to in writing, software
  216. distributed under the License is distributed on an "AS IS" BASIS,
  217. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  218. See the License for the specific language governing permissions and
  219. limitations under the License. See accompanying LICENSE file.
  220. -->
  221. <!-- Put site-specific property overrides in this file. -->
  222. <configuration>
  223. <property>
  224. <name>mapreduce.framework.name</name>
  225. <value>yarn</value>
  226. </property>
  227. <property>
  228. <name>mapreduce.application.classpath</name>
  229. <value>$hc</value>
  230. </property>
  231. <property>
  232. <name>mapreduce.jobhistory.address</name>
  233. <value>$HOSTNAME:10020</value>
  234. </property>
  235. <property>
  236. <name>mapreduce.jobhistory.webapp.address</name>
  237. <value>$HOSTNAME:19888</value>
  238. </property>
  239. <property>
  240. <name>mapreduce.map.memory.mb</name>
  241. <value>256</value>
  242. </property>
  243. <property>
  244. <name>mapreduce.reduce.memory.mb</name>
  245. <value>512</value>
  246. </property>
  247. </configuration>
  248. EOF
  249. echo "INFO : SUCCESS TO FINISH mapred-site.xml CONFIG"
  250. #11、Hadoop内部yarn-site.xml 配置
  251. cat > yarn-site.xml <<EOF
  252. <?xml version="1.0"?>
  253. <!--
  254. Licensed under the Apache License, Version 2.0 (the "License");
  255. you may not use this file except in compliance with the License.
  256. You may obtain a copy of the License at
  257. http://www.apache.org/licenses/LICENSE-2.0
  258. Unless required by applicable law or agreed to in writing, software
  259. distributed under the License is distributed on an "AS IS" BASIS,
  260. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  261. See the License for the specific language governing permissions and
  262. limitations under the License. See accompanying LICENSE file.
  263. -->
  264. <configuration>
  265. <!-- Site specific YARN configuration properties -->
  266. <property>
  267. <name>yarn.resourcemanager.connect.retry-interval.ms</name>
  268. <value>10000</value>
  269. </property>
  270. <property>
  271. <name>yarn.resourcemanager.scheduler.class</name>
  272. <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
  273. </property>
  274. <property>
  275. <name>yarn.nodemanager.localizer.address</name>
  276. <value>$HOSTNAME:8040</value>
  277. </property>
  278. <property>
  279. <name>yarn.nodemanager.address</name>
  280. <value>$HOSTNAME:8050</value>
  281. </property>
  282. <property>
  283. <name>yarn.nodemanager.webapp.address</name>
  284. <value>$HOSTNAME:8042</value>
  285. </property>
  286. <property>
  287. <name>yarn.nodemanager.aux-services</name>
  288. <value>mapreduce_shuffle</value>
  289. </property>
  290. <property>
  291. <name>yarn.nodemanager.local-dirs</name>
  292. <value>$dir/yarn/local</value>
  293. </property>
  294. <property>
  295. <name>yarn.nodemanager.log-dirs</name>
  296. <value>$dir/yarn/log</value>
  297. </property>
  298. <property>
  299. <name>yarn.nodemanager.vmem-check-enabled</name>
  300. <value>false</value>
  301. </property>
  302. <property>
  303. <name>yarn.application.classpath</name>
  304. <value>$hc</value>
  305. </property>
  306. <property>
  307. <name>yarn.nodemanager.env-whitelist</name>
  308. <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
  309. </property>
  310. </configuration>
  311. EOF
  312. echo "INFO : SUCCESS TO FINISH yarn-site.xml CONFIG"
  313. #12.0 切换到脚本路径
  314. echo $sdir
  315. cd $sdir
  316. #12 格式化namenode
  317. hdfs namenode -format 1>/dev/null 2>>hadoop_err.log
  318. if [ $? -ne 0 ];then
  319. echo "ERROR : FAIL TO format hdfs namenode"
  320. exit 1
  321. else
  322. echo "INFO : SUCCESS TO FORMAT hdfs namenode"
  323. fi
  324. #13 启动Hadoop服务
  325. start-all.sh 1>/dev/null 2>>hadoop_err.log
  326. if [ $? -ne 0 ];then
  327. echo "ERROR : FAIL TO START HADOOP SERVICE"
  328. exit 1
  329. else
  330. sc=$(jps|awk 'BEGIN{c=0}/DataNode|SecondaryNameNode|NodeManager|ResourceManager|NameNode/{c++}END{print c}')
  331. if [ $sc -eq 5 ];then
  332. echo "INFO : SUCCESS TO START HADOOP SERVICE"
  333. else
  334. echo "WARN : FAIL TO START HADOOP SERVICE FOR NOT 5 SERVICES STARTED"
  335. fi
  336. fi
  337. unset hc
  338. unset dir
  339. unset sdir
  340. unset env
  341. unset sc
  342. unset old
  343. unset dest
  344. unset pack

 

         

        

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/空白诗007/article/detail/775174
推荐阅读
相关标签
  

闽ICP备14008679号