package cn.wanda.jobs

import cn.wanda.topologies.SparkTopoContext
import cn.wanda.topologies.bases.SparkBatchTopology
import cn.wanda.utils.db.{CreateBigTable, MySQLDBManager}
import cn.wanda.utils.modelGroup.{group1, group2, group3, group4}
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

class CollectionRatingModel extends SparkBatchTopology {
    var appParaMap: mutable.HashMap[String, String] = _

    override def process(context: SparkContext, session: SparkSession): Unit = {
        // 获取合成之后的大表,表名为ccard_bigtable11
        new CreateBigTable(session, appParaMap).createTable()

        val group1 = new group1().getGroup(session)

        val group2 = new group2().getGroup(session)

        val group3 = new group3().getGroup(session)

        val group4 = new group4().getGroup(session)

        val all_group = group1.union(group2).union(group3).union(group4)
        val url: String = appParaMap("mysql.url")
        val tableName: String = appParaMap("mysql.tablename")
        val username: String = appParaMap("mysql.username")
        val password: String = appParaMap("mysql.password")
        new MySQLDBManager().save2Mysql(all_group, url, tableName, username, password)

    }

    override def config(context: SparkTopoContext, conf: SparkConf): Unit = {
        conf.setMaster("local")
        appParaMap = context.getMap
    }
}
