package com.edata.bigdata.mongo

import com.edata.bigdata.annotation.Edata_Executor
import com.edata.bigdata.util.{EDataUtils, Executor}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.bson.Document

@Edata_Executor(target = "MONGOEXECUTOR")
class SparkMongoExecutor extends Executor {

  override var SESSION: SparkSession = _

  override def initialize(): Unit = {
  }

  /*
  * ReadConfig属性。
  * collection：要读取的集合名称。
  * database：要读取的数据库名称。
  * uri：MongoDB的连接URI。
  * partitioner：用于分区数据的分区器名称。
  * pipeline：用于在MongoDB中应用聚合管道的JSON字符串或List[Document]。
  * sampleSize：要在数据集中采样的文档数。
  * readPreference.name：读取首选项的名称，如primary、secondary等。
  * readPreference.tagSets：读取首选项的标签集。
  *
  * */

  // args:{'$match': {'type': 'apple'}}
  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    val rc = ReadConfig(Map(
      "uri" -> s"mongodb://${MG_IP}:${MG_PORT}/${MG_DATABASE}.${MG_COLLECTION}",
      "database" -> s"${MG_DATABASE}",
      "collection" -> s"${MG_COLLECTION}"
    ))
    val documents = args.map(arg=>{
      Document.parse(arg)
    })
    MongoSpark.load(SESSION.sparkContext, rc).withPipeline(documents).toDF()
  }


  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    val className = bean.toString()
    val (_,id, _) = find_SCHEMA(className)
    val k_t = id.split(":")
    val pipe = s"{$$match:{${id}:${EDataUtils.convertVToQueryStrByDataType(k_t(0), k_t(1))}}}"
    findData(pipe)
  }

  override def saveData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    createDataFrame(data, args(1)).write.format("com.mongodb.spark.sql")
      .option("uri", s"mongodb://${MG_IP}:${MG_PORT}/")
      .option("database", MG_DATABASE)
      .option("collection", MG_COLLECTION)
      .option("user", MG_USER)
      .option("password", MG_PASSWORD)
      .mode(SaveMode.Append).save()
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    val pipe = s"{$$match:{${key}:${value}}}"
    findData(pipe)
  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {

  }

  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    val rdd = SESSION.sparkContext.textFile(path)
    LOGGER.info(s"finding data from ${path}")
    createDataFrame(rdd, seperator)
  }

  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    LOGGER.info(s"saving data to ${path}")
    data.saveAsTextFile(path)
  }


}
