<iframe width="800" height="500" src="//player.bilibili.com/player.html?aid=38193405&cid=67137841&page=3" scrolling="no" border="0" frameborder="no" framespacing="0" allowfullscreen="true"> </iframe>html
数据集是分布式数据集合。数据集是Spark 1.6中添加的一个新接口,它提供了RDD的优点(强类型,使用强大的lambda函数的能力)以及Spark SQL优化执行引擎的优势。数据集能够从JVM对象构造,而后使用功能转换(map,flatMap,filter等)进行操做。数据集API在Scala和Java中可用。 Python没有对Dataset API的支持。但因为Python的动态特性,数据集API的许多好处已经可用(即您能够经过名称天然地访问行的字段row.columnName)。 R的状况相似。
def sparkSession(isLocal:Boolean = false): SparkSession = { if(isLocal){ master = "local" val spark = SparkSession.builder .master(master) .appName(appName) .getOrCreate() //spark.sparkContext.addJar("/opt/n_001_workspaces/bigdata/spark-scala-maven-2.4.0/target/spark-scala-maven-2.4.0-1.0-SNAPSHOT.jar") //import spark.implicits._ spark }else{ val spark = SparkSession.builder .master(master) .appName(appName) .config("spark.eventLog.enabled","true") .config("spark.history.fs.logDirectory","hdfs://standalone.com:9000/spark/log/historyEventLog") .config("spark.eventLog.dir","hdfs://standalone.com:9000/spark/log/historyEventLog") .getOrCreate() // spark.sparkContext.addJar("/opt/n_001_workspaces/bigdata/spark-scala-maven-2.4.0/target/spark-scala-maven-2.4.0-1.0-SNAPSHOT.jar") //import spark.implicits._ spark } }
val spark = sparkSession(true) //返回dataFrame val df = spark.read.textFile("file:///"+ getProjectPath +"/src/main/resource/data/text/people.txt") df.show() // +-----------+ // | value| // +-----------+ // |Michael, 29| // | Andy, 30| // | Justin, 19| // | Think, 30| // +-----------+
val spark = sparkSession(true) //返回dataFrame val df = spark.read.textFile("hdfs://standalone.com:9000/home/liuwen/data/people.txt") df.show() // +-----------+ // | value| // +-----------+ // |Michael, 29| // | Andy, 30| // | Justin, 19| // | Think, 30| // +-----------+ spark.stop()
val spark = sparkSession(true) //返回dataFrame val df = spark.read.text("file:///"+ getProjectPath +"/src/main/resource/data/text/people.txt") df.show() // +-----------+ // | value| // +-----------+ // |Michael, 29| // | Andy, 30| // | Justin, 19| // | Think, 30| // +-----------+
object Run extends BaseSparkSession{ def main(args: Array[String]): Unit = { val spark = sparkSession(true) //返回dataFrame val df = spark.read.text("hdfs://standalone.com:9000/home/liuwen/data/people.txt") df.show() // +-----------+ // | value| // +-----------+ // |Michael, 29| // | Andy, 30| // | Justin, 19| // | Think, 30| // +-----------+ spark.stop() } }
object Run1 extends BaseSparkSession{ case class Person(name: String, age: Long) def main(args: Array[String]): Unit = { val spark = sparkSession(true) import spark.implicits._ spark.read.textFile("file:///"+ getProjectPath +"/src/main/resource/data/text/people.txt") .map(line => Person(line.split(",")(0),line.split(" ")(1).trim.toLong)) .foreach( person => println(s"name:${person.name}\t age:${person.age}")) spark.stop() } }
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.txt") println(dataSet.first()) //first里边调用的是head() spark.stop()
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.text") println(dataSet.head()) //first里边调用的是head()
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.text") println(dataSet.head(5)) //first里边调用的是head()
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.text") println(dataSet.count())
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.txt") println(dataSet.collect().mkString("\n"))
val spark = sparkSession(true) val dataSet = spark.read.textFile("/home/liuwen/data/a.txt") println( dataSet.collectAsList()) import scala.collection.JavaConversions._ for( v <- dataSet.collectAsList()) println(v) spark.stop()
val spark = sparkSession(true) val dataSet = spark.read.textFile("/home/liuwen/data/a.txt") dataSet.foreach(println(_))
object Run1 extends BaseSparkSession{ case class Person(name: String, age: Long) def main(args: Array[String]): Unit = { val spark = sparkSession(true) import spark.implicits._ spark.read.textFile("file:///"+ getProjectPath +"/src/main/resource/data/text/people.txt") .map(line => Person(line.split(",")(0),line.split(" ")(1).trim.toLong)) .foreach( person => println(s"name:${person.name}\t age:${person.age}")) spark.stop() } }
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/a.text") import spark.implicits._ val lineWordLength = dataSet.map( line => line.split(" ").size) println(lineWordLength.collect().mkString("\n"))
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/word.text") /** * 统计全部行单词个数 */ import spark.implicits._ val lineWordLength = dataSet.map( line => line.split(" ").size) val result = lineWordLength.reduce((a,b) => a + b) println(result)
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/word.big.cn.text") val result = dataSet.show() println(result)
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/word.big.cn.text") /** * 以表格的形式显示前3行数据 * numRows是显示前几行的数据 */ val result = dataSet.show(3) println(result)
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/word.big.text") /** * 以表格的形式显示前3行数据 * numRows是显示前几行的数据 * false 不进行返回行数据截断 */ val result = dataSet.show(10,false) println(result)
val spark = sparkSession() val dataSet = spark.read.textFile("/home/liuwen/data/word.big.txt") val result = dataSet.take(10) //等于head(n) println(result.mkString("\n"))
val spark = sparkSession() val dataSet = spark.read.json("hdfs://standalone.com:9000/home/liuwen/data/json/people.json") dataSet.describe("name","age").show() // +-------+-------+------------------+ // |summary| name| age| // +-------+-------+------------------+ // | count| 3| 2| // | mean| null| 24.5| // | stddev| null|7.7781745930520225| // | min| Andy| 19| // | max|Michael| 30| // +-------+-------+------------------+
endjava