spark--环境搭建--6.Spark1.3.0集群搭建

1. spark安装java

$ cd /usr/localshell

$ tar -zxvf spark-1.3.0-bin-hadoop2.4.tgzapache

$ mv spark-1.3.0-bin-hadoop2.4 spark浏览器

$ vi ~/.bashrcbash

export SPARK_HOME=/usr/local/spark/
export PATH=$PATH:$SPARK_HOME/bin
export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib

$ source ~/.bashrcoop

$ cd spark/conf/spa

$ mv spark-env.sh.template spark-env.shscala

$ vi spark-env.shblog

export JAVA_HOME=/usr/java/latest/
export SCALA_HOME=/usr/local/scala/
# spark集群的master节点ip
export SPARK_MASTER_IP=192.168.2.100
# 指定worker节点可以最大分配给Excutors的内存大小
export SPARK_WORKER_MEMORY=1g
# hadoop集群的配置文件目录
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop

$ mv slaves.template slavesip

$ vi slaves

localhost 改成 
spark2
spark3

$ cd /usr/local

$ scp -r spark root@spark2:/usr/local/

$ scp -r spark root@spark3:/usr/local/

$ scp ~/.bashrc root@spark2:~/.bashrc

$ scp ~/.bashrc root@spark3:~/.bashrc

# 分别在2和3执行

$ source ~/.bashrc

2. 启动spark

$ cd spark/sbin/

$ ./start-all.sh

$ jps

# 浏览器打开  http://spark1:8080

$ cd ../../

$ spark-shell

> exit

相关文章
相关标签/搜索