package cn.wanda.utils.modelGroup

import cn.wanda.utils.config
import cn.wanda.utils.config.groupJobTrait
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

class group1 extends groupJobTrait {
    val model1TableMap = new config.TableMap
    val model5TableMap = new config.TableMap

    def getGroup(sparkSessionContext: SparkSession): DataFrame = {
        val jdbcDF = sparkSessionContext.sql(group1Sql)
          .rdd
          .map(col => {
              val memberCode = col.get(0).toString
              val td_180d = isNull(col.get(1))
              val od_tran_amt_crt_1mth = isNull(col.get(2))

              

              // 计算group1
              var td_180d_model1_goal = 0f

              if (td_180d >= 0.5) {
                  td_180d_model1_goal = 500.6f
              } else if (td_180d < 0.5 && od_tran_amt_crt_1mth < 0.2244) {
                  td_180d_model1_goal = 583.7f
              } else {
                  td_180d_model1_goal = 556.8f
              }

              // group1 总分数
              model1TableMap.member_code = memberCode
              model1TableMap.td_180d_value = td_180d
              model1TableMap.od_tran_amt_crt_1mth_value = od_tran_amt_crt_1mth

              val group1Score: Float = td_180d_model1_goal
              model1TableMap.model_of_the_total_score = group1Score

              //计算group5
              var td_180d_model5_gold = 0f

              if (td_180d >= 0.5) {
                  td_180d_model5_gold = 528f
              } else if (td_180d < 0.5 && od_tran_amt_crt_1mth < 0.3309) {
                  td_180d_model5_gold = 619.1f
              } else {
                  td_180d_model5_gold = 591.1f
              }

              // group1 总分数
              model5TableMap.member_code = memberCode
              model5TableMap.td_180d_value = td_180d
              model5TableMap.od_tran_amt_crt_1mth_value = od_tran_amt_crt_1mth

              val group5Score: Float = td_180d_model5_gold
              model5TableMap.model_of_the_total_score = group5Score

              // 返回值
              (model1TableMap, model5TableMap)
          })

        val group1Rdd = jdbcDF.map(_._1).map(
            x => Row(x.data_date, 1, x.member_code, x.td_180d_value, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, null,
                null, x.od_tran_amt_crt_1mth_value, null, null, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, x.model_of_the_total_score)
        )

        val group5Rdd = jdbcDF.map(_._2).map(
            x => Row(x.data_date, 5, x.member_code, x.td_180d_value, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, null,
                null, x.od_tran_amt_crt_1mth_value, null, null, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, null,
                null, null, null, null, null, null, null, null, null, x.model_of_the_total_score)
        )

        val group1DF = sparkSessionContext.sqlContext.createDataFrame(group1Rdd, schema)
        val group5DF = sparkSessionContext.sqlContext.createDataFrame(group5Rdd, schema)

        group1DF.union(group5DF)
    }

}
