package com.fudian.spark_platform

import java.util.Base64

import com.fudian.spark_platform.Configure._
import com.fudian.spark_platform.DBConnector.{RedisConnector, MongoConnector}
import com.fudian.spark_platform.Utils.MLUtils
import net.minidev.json.JSONObject
import net.minidev.json.parser.JSONParser
import org.apache.spark.sql.{DataFrame, SparkSession}


import com.fudian.spark_platform.Clustering._

case class MLClusteringConf(spark: SparkSession, dataF: DataFrame, mLUtils: MLUtils, mLConfig: MLConfig, transformConf: TransformConfig, millConf:MillConfig)

object ClusteringBase extends Serializable {

    var appSettingConf: AppSettingConfig = new AppSettingConfig()
    var dataSourceConf: DataSourceInputConfig = new DataSourceInputConfig()
    var millConf: MillConfig = new MillConfig()
    var transformConf: TransformConfig = new TransformConfig()
    var mlConf: MLConfig = new MLConfig()
    var outputConf: OutputConfig = new OutputConfig()

    var spark: SparkSession = null
    var dataF: DataFrame = null
    var outputDF: DataFrame = null

    /**
      * 数据分析主入口也是整个项目的主入口
      *
      * @param args base64编码格式的配置对象
      */
    def main(args: Array[String]) {

        //首先获取配置参数
        if (args.length > 0) {
            settingsConf(args(0))
        } else {
            settingsConf("eyJBcHBTZXR0aW5nQ29uZiI6IHsiQXBwTmFtZSI6ICJrZF9zZXJ2ZXIiLCAiU3BhcmtNYXN0ZXIiOiAibG9jYWxbKl0ifSwgIkRhdGFTb3VyY2VJbnB1dENvbmYiOiB7IkRhdGFTb3VyY2VUeXBlIjogInRleHRGaWxlIiwgIkRhdGFTb3VyY2VGaWxlVHlwZSI6ICJjc3YiLCAiRGF0YVNvdXJjZUxvY2F0aW9uIjogIi9Vc2Vycy94aWFvanVuL0Rlc2t0b3AvXHU1ZjAwXHU1OTI3XHU0ZTJhXHU2MDI3XHU1MzE2XHU2M2E4XHU4MzUwXHU2MmE1XHU1NDRhL3VzZXJfcmVjb3JkLmNzdiIsICJEYXRhU291cmNlRGVsaW1pdGVyIjogIiwiLCAiRGF0YVNvdXJjZURCVXJpIjogIiIsICJEYXRhU291cmNlREJUeXBlIjogIm15c3FsIiwgIkRhdGFTb3VyY2VIb3N0IjogIjEyNy4wLjAuMSIsICJEYXRhU291cmNlUG9ydCI6IDMzMDYsICJEYXRhU291cmNlVXNlciI6ICJyb290IiwgIkRhdGFTb3VyY2VQd2QiOiAiIiwgIkRhdGFTb3VyY2VEQk5hbWUiOiAiIiwgIkRhdGFTb3VyY2VEQlRhYmxlIjogIiIsICJEYXRhU291cmNlREJGZHMiOiBbXSwgIkRhdGFTb3VyY2VMaW1pdENvdW50IjogMH0sICJNaWxsQ29uZiI6IHsiWkhBYmxlIjogdHJ1ZSwgIkFsbG93TnVtYmVyIjogdHJ1ZSwgIkxpbWl0Q291bnQiOiAyLCAiRmFzdFN0b3BXb3JkcyI6ICIifSwgIlRyYW5zZm9ybUNvbmYiOiB7IlRyYW5zZm9ybVR5cGUiOiAiQ291bnRWZWN0b3IiLCAiVHJhbnNmb3JtVHlwZUxpc3QiOiBbIkNvdW50VmVjdG9yIiwgIkhhc2hpbmdURiJdLCAiQ291bnRWZWN0b3IiOiB7IklucHV0Q29sIjogIndvcmRzIiwgIk91dFB1dENvbCI6ICJmZWF0dXJlIn0sICJIYXNoaW5nVEYiOiB7IklucHV0Q29sIjogIndvcmRzIiwgIk91dFB1dENvbCI6ICJmZWF0dXJlIn0sICJJbnB1dENvbCI6ICJzdHJpbmciLCAiT3V0UHV0Q29sIjogInN0cmluZyJ9LCAiTUxDb25mIjogeyJNTFR5cGUiOiAiQUxTIiwgIkxEQSI6IHsiayI6IDMsICJtYXhJdE51bSI6IDIwLCAib3B0aW1pemVyIjogImVtIiwgImRlc2NyaWJlVG9waWNzIjogMTB9LCAiVEZfSURGIjogeyJtaW5Eb2MiOiAyLCAiaW5wdXRDb2wiOiAiZmVhdHVyZXMiLCAib3V0cHV0Q29sIjogImlkZiJ9LCAiS19NZWFucyI6IHsiayI6IDEwLCAibWF4SXROdW0iOiAzMCwgInNlZWQiOiAxfSwgIkFMUyI6IHsiayI6IDEwLCAibWF4SXROdW0iOiAzMCwgInJlZ1BhcmFtIjogMC4wMSwgImltcGxpY2l0UHJlZnMiOiB0cnVlLCAidXNlckNvbCI6ICJpbmRpY2VzU2lkIiwgIml0ZW1Db2wiOiAiaW5kaWNlc0NpZCIsICJyYXRpbmdDb2wiOiAicmF0aW5nIn0sICJXb3JkMlZlY3RvciI6IHt9LCAiTUxQYXJhbXMiOiB7fX0sICJPdXRwdXRDb25mIjogeyJPdXRwdXRUeXBlIjogImRiIiwgIk91dFRleHRQYXRoIjogIiIsICJPdXREYXRhQmFzZVR5cGUiOiAibW9uZ29kYiIsICJPdXREYXRhQmFzZU5hbWUiOiAia2Rfb3V0cHV0IiwgIk91dERhdGFCYXNlSG9zdCI6ICIxMjcuMC4wLjEiLCAiT3V0RGF0YUJhc2VQb3J0IjogMjcwMTcsICJPdXREYXRhQmFzZVRhYmxlIjogInJlc3VsdHMiLCAiT3V0RGF0YUJhc2VGZHMiOiAiIiwgIk91dERhdGFCYXNlVXNlciI6ICJzdHJpbmciLCAiT3V0RGF0YUJhc2VQd2QiOiAic3RyaW5nIiwgIk91dEhERlNQYXRoIjogIiJ9fQ==")
        }

        val mlUtils = new MLUtils(this.millConf)
        //根据函数获取数据
        //TODO 后期要加入字段指定功能,也能在前台展示预先载入的数据列,用户可以进行字段指定删除,变更名字等基本操作
        this.dataF = getDataFrameLoad

        //然后是根据milling规则清洗数据
        //TODO 后期也要加入对不同字段的计算和处理规则的制定,可以对数据进行相关操作

        //然后是根据特征变化对字段进行操作
        //TODO 根据特征变化对字段进行操作,比如对某字段进行StringIndexer,CountVector,HashingTF等变换操作

        //设定log等级


        //执行特征变化


        //根据选定的算法实例化对应的类来执行方法
        val mlClusteringConf = MLClusteringConf(this.spark, this.dataF, mlUtils, this.mlConf, this.transformConf, this.millConf)
        mlClusteringConf.spark.sparkContext.setLogLevel("ERROR")
        this.mlConf.mLConfig("MLType") match {
            case "LDA" => {
                this.dataSourceConf.inputConfig("DataSourceType") match {
                    case "mongodb" => {
                        val ClusteringOBJ = new LDAClustering(mlClusteringConf)
                        this.outputDF = ClusteringOBJ.clustering()
                    }
                    case _ => {
                        val ClusteringOBJ = new CountVectorLDA(mlClusteringConf)
                        this.outputDF = ClusteringOBJ.clustering()
                    }
                }
            }
            case "ALS" => {
                val ClusteringOBJ = new ALSClustering(mlClusteringConf)
                this.outputDF = ClusteringOBJ.clustering()
            }
            case "K_Means" => {
                val ClusteringOBJ = new KMeansClustering(mlClusteringConf)
                this.outputDF = ClusteringOBJ.clustering()
            }
            case "TF_IDF" => {
                println("等待实现")
            }
            case "" => {
                println("读取算法配置失败,请确保参数正确")
            }
        }


        //这里进行数据结果分析输出
        this.outputConf.outConfig("OutputType") match {
            case "console" => {
                this.outputDF.show(false)
            }
            case "textFile" => {
                this.outputDF.write.save(this.outputConf.outConfig("OutTextPath"))
            }
            case "hdfs" => {
                this.outputDF.write.save(this.outputConf.outConfig("OutHDFSPath"))
            }
            case "db" => {
                val  dataBaseDBType = this.outputConf.outConfig("OutDataBaseType")
                dataBaseDBType match {
                    case "mongodb" => {
                        this.outputDF.show(false)
                        val mongodbUri = "mongodb://" + this.outputConf.outConfig("OutDataBaseHost") + ":" + this.outputConf.outConfig("OutDataBasePort") + "/"
                        this.outputDF.write
                            .option("uri",mongodbUri)
                            .option("database",this.outputConf.outConfig("OutDataBaseName"))
                            .option("collection", this.outputConf.outConfig("OutDataBaseTable"))
                            .mode("overwrite")
                            .format("com.mongodb.spark.sql")
                            .save()
                    }
                    case "mysql" => {
                        //TODO 保存至mysql数据库
                        println("暂时不支持将数据存入mysql")
                        this.outputDF.show(false)
                    }
                    case "oracle" => {
                        //TODO 保存至oracle数据库
                        println("暂时不支持将数据存入oracle")
                        this.outputDF.show(false)
                    }
                }

            }
            case _ => {
                this.outputDF.show(false)
            }
        }
        this.spark.close()
    }


    /**
      * 使用sparkSession构建数据加载,将数据导入到Spark框架处理
      *
      * @return
      */
    def getDataFrameLoad: DataFrame = {

        this.dataSourceConf.inputConfig("DataSourceType") match {
            case "textFile" => {
                //构建sparkSession
                this.spark = SparkSession.builder()
                    .appName(this.appSettingConf.settings("AppName"))
                    .master(this.appSettingConf.settings("SparkMaster"))
                    .getOrCreate()

                this.dataSourceConf.inputConfig("DataSourceFileType") match {
                    case "txt" => {
                        val loadedDF = this.spark.read.text {
                            this.dataSourceConf.inputConfig("DataSourceLocation").toString
                        }
                        loadedDF
                    }
                    case "csv" => {
                        val loadedDF = this.spark.read.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat")
                            .option("header", true)
                            .option("timestampFormat", "yyyy/MM/dd HH:mm:ss ZZ")
                            .option("delimiter", ",")
                            .load(this.dataSourceConf.inputConfig("DataSourceLocation").toString)
                        loadedDF
                    }
                }
            }
            case "hdfs" => {
                //构建sparkSession
                this.spark = SparkSession.builder()
                    .appName(this.appSettingConf.settings("AppName"))
                    .master(this.appSettingConf.settings("SparkMaster"))
                    .getOrCreate()

                this.dataSourceConf.inputConfig("DataSourceFileType") match {
                    case "txt" => {
                        val loadedDF = this.spark.read.text {
                            this.dataSourceConf.inputConfig("DataSourceLocation").toString
                        }
                        loadedDF
                    }
                    case "csv" => {
                        val loadedDF = this.spark.read.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat")
                            .option("header", true)
                            .option("timestampFormat", "yyyy/MM/dd HH:mm:ss ZZ")
                            .option("delimiter", ",")
                            .load(this.dataSourceConf.inputConfig("DataSourceLocation").toString)
                        loadedDF
                    }
                }
            }
            case "mongodb" => {
                //初始化mongodb的实例
                val mongoConnector = new MongoConnector(this.appSettingConf, this.dataSourceConf)
                //初始化对象
                val mongoData = mongoConnector.getMongoLoad()
                this.spark = mongoConnector.spark
                //缓存加载出来的mongodb数据
                mongoData
            }
        }
    }


    /**
      * 解码base64配置项,并覆盖默认的配置项信息
      *
      * @param conf
      */
    def settingsConf(conf: String) {
        //base64解码
        val jsonStr = new String(Base64.getDecoder.decode(conf))
        //接受到的参数一个json字符串
        val jsonParser = new JSONParser()
        val jsonConf: JSONObject = jsonParser.parse(jsonStr).asInstanceOf[JSONObject]
        println("参数传入如下: " + jsonConf)
        setConf(jsonConf)
    }


    /**
      * 配置公有的数据库等配置信息
      *
      * @param conf 配置Map
      */
    def setConf(conf: JSONObject) = {


        val jsonKey = conf.keySet()
        val iter = jsonKey.iterator()

        while (iter.hasNext) {
            val instance = iter.next()
            val value = conf.get(instance)
            value match {
                case appConf: JSONObject =>
                    instance match {
                        case "AppSettingConf" => {
                            this.appSettingConf.setConf(appConf)
                        }
                        case "DataSourceInputConf" => {
                            this.dataSourceConf.setConf(appConf)
                        }
                        case "MillConf" => {
                            this.millConf.setConf(appConf)
                        }
                        case "TransformConf" => {
                            this.transformConf.setConf(appConf)
                        }
                        case "MLConf" => {
                            this.mlConf.setConf(appConf)
                        }
                        case "OutputConf" => {
                            this.outputConf.setConf(appConf)
                        }
                    }
                case _ => {
                    println("不需要的参数如下: " + instance)
                }
            }

        }
    }

}
