1.下载spark2.4.3 使用用户的hadoop的版本,解压并放到/usr/local下并更名为spark目录shell
2.设置spark目录为本用户全部bash
3.设置环境变量oop
(1)#~/.bashrcspa
export SPARK_HOME=/usr/local/sparkxml
source ~/.bashrcblog
(2)cp /usr/local/spark/conf/spark-env.sh.template /usr/local/spark/conf/spark-env.shhadoop
(3)进入 /usr/local/spark/conf/spark-env.shit
export SPARK_DIST_CLASSPATH=$(/usr/local/hadoop/bin/hadoop classpath)spark
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoopast
SPARK_LOCAL_IP="127.0.0.1"
4.单机模式
/usr/local/spark/bin/spark-shell
5.增长内容到最外层标签内,文件是/usr/local/hadoop/etc/hadoop/yarn-site.xml
执行(yarn模式)
/usr/local/spark/bin/spark-shell --master yarn --deploy-mode client