## 一、系统基础操纵:
sudo hostnamectl set-hostname hadoop
bash
![[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-LEqeHAg7-1664255722246)(:/ee9ce513a2e24ade80c7e23577721e20)]](https://1000bd.com/contentImg/2024/09/12/1955c9e56650dc37.png)
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service


vi /etc/selinux/config


tar -zxf jdk-8u221-linux-x64.tar.gz -C /opt/module/

vi /etc/profile.d/hadoop.sh
export JAVA_HOME=/opt/module/jdk1.8.0_221
export PATH=$PATH:$JAVA_HOME/bin
![(:/fd350a77086341629c77c0dfe6aa10f2)]](https://1000bd.com/contentImg/2024/09/12/1089474fe4d8afec.png)

source /etc/profile.d/hadoop.sh

java -version

systemctl start sshd

systemctl status sshd

ssh-keygen

cd ~/.ssh/

cat id_rsa.pub > authorized_keys

chmod 600 authorized_keys

ssh root@hadoop

exit

tar -xzf hadoop-2.7.7.tar.gz -C /opt/module/

vi /etc/profile.d/hadoop.sh
export HADOOP_HOME=/opt/module/hadoop-2.7.7
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_PATH/sbin


source /etc/profile.d/hadoop.sh

hadoop version

hadoop-env.sh中的JAVA_HOME先进入
/opt/module/hadoop-2.7.7/etc/hadoop
vi hadoop-env.sh
:! echo $JAVA_HOME


/opt/module/jdk1.8.0_221


vi core-site.xml

新的
<configuration>
<property>
<name>fs.default.namename>
<value>hdfs://localhost:9000value>
property>
configuration>
原来的
<property>
<name>dfs.defaultFSname>
<value>hdfs://hadoop:9000value>
property>
<property>
<name>hadoop.tmp.dirname>
<value>/home/root/hadoopvalue>
property>

vi hdfs-site.xml

<property>
<name>dfs.replicationname>
<value>1value>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>file:///home/root/hadoop/namevalue>
property>
<property>
<name>dfs.namenode.data.dirname>
<value>file:///home/root/hadoop/datavalue>
property>
cp mapred-site.xml.template mapred-site.xml

vi mapred-site.xml

<property>
<name>mapreduce.framework.namename>
<value>yarnvalue>
property>

vi yarn-site.xml

<property>
<name>yarn.resourcemanager.hostnamesname>
<value>hadoopvalue>
property>
<property>
<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>

vi slaves



hdfs namenode -format


在/opt/module/hadoop-2.7.7/sbin下
bash start-all.sh

查看启动:
jps

http://192.168.0.108:50070/dfshealth.html#tab-overview

http://192.168.0.108:8088/cluster

2022-08-27 12:08 凌晨
由于jar包导错了,导致最后的运行 出现不兼容现象,运行失败,两小时时间排错。卸载重装jdk,解决问题。要注意环境的需求。
参考大佬连接:
https://www.javaroad.cn/questions/79621