vim /etc/sysconfig/networkjava
vim /etc/udev/rules.d/70-persistent-net.rulesnode
vim /etc/sysconfig/network-scripts/ifcfg-eth0linux
DEVICE=eth0web
TYPE=Ethernetapache
ONBOOT=yesvim
BOOTPROTO=staticbash
NAME="eth0"服务器
IPADDR=192.168.1.101app
PREFIX=24ssh
GATEWAY=192.168.1.2
DNS1=192.168.1.2
vim /etc/hosts
192.168.1.101 hadoop101
192.168.1.102 hadoop102
192.168.1.103 hadoop103
192.168.1.104 hadoop104
192.168.1.105 hadoop105
192.168.1.106 hadoop106
192.168.1.107 hadoop107
192.168.1.108 hadoop108
service iptables stop
chkconfig iptables off
useradd test
password test
sudo vim /etc/suoders
[test@hadoop102 opt]$ sudo mkdir /opt/software
[test@hadoop102 opt]$ sudo mkdir /opt/module
[test@hadoop102 opt]$ sduo chown test:test /opt/software /opt/software
cd ~
sudo mkdir bin/
cd bin/
vim xsync
#!/bin/bash
#1.获取输入参数个数,若是没有参数,直接退出
pcount=$#
if(pcount=$#);then
echo no args;
exit;
fi
#2.获取文件名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4 获取当前用户名称
user=`whoami`
#5 循环
for(host=103;host<105;host++);do
echo -------------- hadoop$
host ------------------
rsync -av $pdir/$fname $user@hadoop$host:$pdir
done
chmod +x xsync
sudo cp xsync /bin
sudo xsync /bin/xsync
(1)查询是否安装Java软件
[test@hadoop102 ~]$ rpm -qa | grep java
(2)若是安装的版本低于1.7,卸载该JDK
[test@hadoop102 ~]$ sudo -rpm -e 软件包
[test@hadoop102 ~]$sudo rpm -qa | grep java | xargs sudo rpm -e --nodeps
(3)查看JDK安装路径
[test@hadoop102 ~]$which java
[test@hadoop102 opt]$ tar -zxvf jdk-8u144-linux-x64.tar.gz -C /opt/module/
[test@hadoop102 opt]$sudo vim /etc/profile.d/env.sh
#JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_144
export PATH=$PATH:$JAVA_HOME/bin
[test@hadoop102 opt]$source /etc/profile.d/env.sh
java -version
[test@hadoop102 opt]$tar -zxvf hadoop-2.7.2.tar.gz -C /opt/module/
[test@hadoop102 opt]$sudo vim /etc/profile.d/env.sh
#HADOOP_HOME
export $HADOOP_HOME=/opt/module/hadoop-2.7.2
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
[test@hadoop102 opt]$soure /etc/profile.d/env.sh
[test@hadoop102 opt]$hadoop version
[test@hadoop102 .ssh]$hssh-keygen -t rsa 三次回车
[test@hadoop102 .ssh]$ssh-copy-id hadoop102
[test@hadoop102 .ssh]$ssh-copy-id hadoop103
[test@hadoop102 .ssh]$ssh-copy-id hadoop14
测试 [test@hadoop102 .ssh]$ssh hadoop103
[test@hadoop102 .ssh]$exit
xsync /home/test/.ssh
cd /opt/module/hhadoop-2.7.2/etc/hadoop
vim hadoop-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim yarn-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim marpred-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
vim slaves(不能有任何空格)
hadoop102
hadoop103
hadoop104
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop102:9000</value>
</property>
<!-- 指定Hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.7.2/data/tmp</value>
</property>
<!-- 数据的副本数量 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 指定Hadoop辅助名称节点主机配置 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop104:50090</value>
</property>
<!-- Site specific YARN configuration properties -->
<!-- Reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop103</value>
</property>
<!-- 日志汇集功能使能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 日志保留时间设置7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
配置:
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 历史服务器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop104:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop104:19888</value>
</property>
启动:
启动历史服务器:mr-jobhistory-daemon.sh start historyserver
xsync /opt/module/hadoop-2.7.2/etc
[test@hadoop102 hadoop-2.7.2]$bin/hdfs namenode -format
[test@hadoop102 hadoop-2.7.2]$bin/start-dfs.sh
[test@hadoop103 hadoop-2.7.2]$bin/start-yarn.sh
rm -fr data logs
<property>
<name>io.compression.codecs</name>
<value>
org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec,
org.apache.hadoop.io.compress.SnappyCodec,
com.hadoop.compression.lzo.LzoCodec,
com.hadoop.compression.lzo.LzopCodec
</value>
</property>
<property>
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
(1)建立并格式化新分区
fdisk /dev/sda
m #进入帮助引导模式
n #新增分区
p #指定新分区为基本分区
一路回车 #但要记住分区号
w #保存并执行刚才的分区操做
reboot #重启
==============================
fdisk -l
==============================
mkfs.xfs /dev/sdax,x为分区号
(2)建立盘符并挂载盘符
mdkir /newdisk
临时挂载 mount /dev/sdax /newdisk
永久挂载 vim /etc/fstab
/dev/sdax /newdisk ext4 defaults 0 0
(3)赋予权限
chown -R test:test /newdisk
vim /opt/module/hadoop-2.7.2/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.datanode.data.dir</name>
<value>${hadoop.tmp.dir}/dfs/data, /newdisk</value>
</property>