1.更改主机名

#使用root用户
vim /etc/sysconfig/network
#将HOSTNAME分别改为master,slave1,slave2....

2.设置固定ip

#使用root用户
#使用setup命令将所有主机改为同一网段的ip

3.更改hosts

#使用root用户
vim /etc/hosts
#将hosts改为下面的格式
127.0.0.1   localhost
192.168.112.134 master
192.168.112.135 slave1
192.168.112.136 

4.配置SSH免密码登录

#使用普通用户
ssh-keygen
#一直按回车
cp /home/matrix/.ssh/id_rsa.pub /home/matrix/.ssh/authorized_keys
#将ssh放入受信任列表

5.配置master到slave免密码登录

#把master的公钥复制到每个slave节点
scp /home/matrix/.ssh/id_rsa.pub matrix@192.168.112.135:~/.ssh/id_rsa.pub_sl
#在slave节点中把master的公钥加入到可信任列表
cat id_rsa.pub_sl >> authorized_keys

6.在hadoop目录下创建文件夹

mkdir dfs
cd mkdir
mkdir name
mkdir data

7.配置Hadoop

配置文件路径为hadoop2.6.0/etc/hadoop

配置slave文件

#slave节点
slave1
slave2

配置sh文件

#hadoop-env.sh,yarn-env.sh,mapred-env.sh
#去掉JAVA_HOME的注释并修改为JDK的安装路径

配置core-site.xml

<property>
    <name>fs.defaultFS</name>
    <value>hdfs://master:9000</value>
</property>
<property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
</property>
<property>
    <name>hadoop.tmp.dir</name>
    <value>file:/home/matrix/hadoop-2.6.0/tmp</value>
</property>
<property>
    <name>hadoop.proxyuser.spark.hosts</name>
    <value>*</value>
</property>
<property>
    <name>hadoop.proxyuser.spark.groups</name>
    <value>*</value>
</property>

配置hdfs-site.xml

<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>master:9001</value>
</property>
<property>
   <name>dfs.namenode.name.dir</name>
   <value>file:/home/matrix/hadoop-2.6.0/dfs/name</value>
</property>
<property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/home/matrix/hadoop-2.6.0/dfs/data</value>
</property>
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>
<property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
</property>

配置mapred-site.xml

把mapred-site.xml.template复制为mapred-site.xml并编辑

<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>
<property>
    <name>mapreduce.jobhistory.address</name>
    <value>master:10020</value>
</property>
<property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>master:19888</value>
</property>

配置yarn-site.xml

<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
    <name>yarn.resourcemanager.address</name>
    <value>master:8032</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>master:8030</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>master:8035</value>
</property>
 <property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>master:8033</value>
</property>
<property>
   <name>yarn.resourcemanager.webapp.address</name>
   <value>master:8088</value>
</property>

8.将配置好的hadoop文件复制到其它slave节点上

scp -r hadoop-2.6.0/ matrix@192.168.112.135:~/matrix/