spark2.x向mongodb中读取写入数据,读取写入相关参数参考https://docs.mongodb.com/spark-connector/current/configuration/#cache-configurationjava
从mongodb中读取数据时指定数据分区字段,分区大小提升读取效率, 当须要过滤部分数据集的状况下使用Dataset/SQL的方式filter,Mongo Connector会建立aggregation pipeline在mongodb端进行过滤,而后再传回给spark进行优化处理sql
val spark = SparkSession.builder .appName(this.getClass.getName().stripSuffix("$")) .getOrCreate() val inputUri="mongodb://test:pwd123456@192.168.0.1:27017/test.articles" val df = spark.read.format("com.mongodb.spark.sql").options( Map("spark.mongodb.input.uri" -> inputUri, "spark.mongodb.input.partitioner" -> "MongoPaginateBySizePartitioner", "spark.mongodb.input.partitionerOptions.partitionKey" -> "_id", "spark.mongodb.input.partitionerOptions.partitionSizeMB"-> "32")) .load() val currentTimestamp = System.currentTimeMillis() val originDf = df.filter(df("updateTime") < currentTimestamp && df("updateTime") >= currentTimestamp - 1440 * 60 * 1000) .select("_id", "content", "imgTotalCount").toDF("id", "content", "imgnum")
val outputUri="mongodb://test:pwd123456@192.168.0.1:27017/test.article_garbage" saveDF.write.options(Map("spark.mongodb.output.uri"-> outputUri)) .mode("append") .format("com.mongodb.spark.sql") .save()
spark操做mongodb的scala-api文档:https://docs.mongodb.com/spark-connector/current/scala-api/mongodb