
/*
* 数据统计类:
* all=0 (默认)
* 足球:
*   按照赛事,年份,月份,终盘盘口统计
*
* all=1
* 足球:
*   按照赛事,月份,终盘盘口统计
* */

//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.71:/root/fb_FBST2017005.jar
//
//
////按照赛事,年份,月份,终盘盘口统计
//nohup ./bin/spark-submit --master spark://skn-rqg382b1-spark-master:7077 --class caiqr.model.statistics.fb.FBST2017005 --jars /usr/local/spark/jars/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /root/fb_FBST2017005.jar maxResultSize=6g  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 output_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/FBST2017005.csv match_big_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_all_asia_500w_1.csv big_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_all_asia_500w_1.csv company_ids=3 job_id=8632 spark_id=11  > /root/a.log < /dev/null 2>&1  &
//

////按照赛事,月份,终盘盘口统计
//nohup ./bin/spark-submit --master spark://skn-rqg382b1-spark-master:7077 --class caiqr.model.statistics.fb.FBST2017005 --jars /usr/local/spark/jars/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /root/fb_FBST2017005.jar maxResultSize=6g  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 output_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/FBST2017005_all.csv match_big_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_all_asia_500w_1.csv big_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_all_asia_500w_1.csv all=1 company_ids=3 job_id=8632 spark_id=11  > /root/a.log < /dev/null 2>&1  &


//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBST2017001_home.csv
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBST2017001_away.csv




//caiqr.model.statistics.fb.FBST2017005

package caiqr.model.statistics.fb

import com.redislabs.provider.redis._
import caiqr.utils.{AllAsiaInputFile, PredictionUtils, PredictionDBUtils, AllFBMatchInputFile}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException
//import java.util.ArrayList

object FBST2017005 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")

    val big_file_res1 = cmd_map.get("big_file")
    val big_file = big_file_res1.getOrElse("")
    val save_db_info = cmd_map.get("save_db_info").getOrElse("") //DB info
    val company_ids = cmd_map.get("company_ids").getOrElse("") //公司ID列表,格式: 1_3

    //all=0 获取: 赛事按年月盘口统计(默认)
    //3,英超,2016,10,-50,1333,0333,1010,572793_572802_572809_572815
    //all=1 获取: 赛事按月盘口统计
    //3,英超,10,-50,1333,0333,1010,572793_572802_572809_572815
    val all = cmd_map.get("all").getOrElse("0")

    val matchid_big_file_res1 = cmd_map.get("match_big_file") //要计算的比赛赔率信息数据文件
    val matchid_big_file = matchid_big_file_res1.getOrElse("")


    //结果输出文件(HDFS)
    val output_home_file = cmd_map.get("output_file").getOrElse("")

    if (job_id=="" || model_id=="" || big_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }

    //运行时结果集大小: 默认4g
    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")
    //////////////////////////////// 接收参数 ////////////////////////////////



    // 环境变量
    val conf = new SparkConf().setAppName("FBST2017005")
      .set("spark.driver.maxResultSize", maxResultSize)
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    import sqlContext.implicits._


    //1. 转换 company_id
    val company_id_list_df = PredictionUtils.transation_company_id_list_to_df(company_ids, sc)


    //2. 加载大数据文件
    //计算年月盘口分类
    val src_asia_df = load(sc, sqlContext, big_file)
    val src_all_odds_df = src_asia_df
      .join(company_id_list_df, src_asia_df("company_id") === company_id_list_df("sporttery_company_id"))
      .orderBy("season_pre", "season_id" , "match_time", "match_id", "company_id")
    //src_all_odds_df.head(100).foreach(println)
    //println(s"src_all_odds_df size: ${src_all_odds_df.collect.size}")


    //all=0
    //"bcompany_id,bseason_pre,byear,bmonth,bodds,size,result,rr,aresult,ar,bds,br,matchids"
    //all=1
    //"bcompany_id,bseason_pre,bmonth,bodds,size,result,rr,aresult,ar,bds,br,matchids"
    val result_df = calculate_FBST2017005_data(sc, src_all_odds_df, output_home_file, all, sqlContext)



    //    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,month"
    // 3. 加载大数据文件(待计算的比赛赔率文件)
    val src_need_calculate_match_df = AllAsiaInputFile.load_add_match_file(sc, sqlContext, matchid_big_file)
    val match_df = src_need_calculate_match_df.select("match_id","company_id","init_odds","curr_odds","season_pre","month")
    //match_df.show


    //4. 匹配
    if(all == "0"){ //包含年份,月份
      val init_odds_df = result_df.join(match_df).
        where(result_df("bcompany_id") === match_df("company_id")).
        where(result_df("bodds") === match_df("curr_odds")).
        where(result_df("bseason_pre") === match_df("season_pre")).
        where(result_df("bmonth") === match_df("month")).
        //orderBy(match_df("match_id").asc, match_df("company_id").asc).
        select("match_id","company_id","season_pre","byear","month","curr_odds","size","result","rr","aresult","ar","bds","br","matchids")

      //init_odds_df.show

      val result_rdd = init_odds_df.rdd.map { p =>
        Array(p.getString(0),p.getString(1),p.getString(2),p.getString(3),p.getString(4),p.getString(5),p.getString(6),p.getString(7),p.getString(8),p.getString(9),p.getString(10),p.getString(11),p.getString(12),p.getString(13)).mkString(",")
      }
      PredictionUtils.save_result_to_hdfs(result_rdd, output_home_file)


    }else{ //包含月份
      val init_odds_df = result_df.join(match_df).
        where(result_df("bcompany_id") === match_df("company_id")).
        where(result_df("bodds") === match_df("curr_odds")).
        where(result_df("bseason_pre") === match_df("season_pre")).
        where(result_df("bmonth") === match_df("month")).
        //orderBy(match_df("match_id").asc, match_df("company_id").asc).
        select("match_id","company_id","bseason_pre","bmonth","bodds","size","result","rr","aresult","ar","bds","br","matchids")
      val result_rdd = init_odds_df.rdd.map { p =>
        Array(p.getString(0),p.getString(1),p.getString(2),p.getString(3),p.getString(4),p.getString(5),p.getString(6),p.getString(7),p.getString(8),p.getString(9),p.getString(10),p.getString(11),p.getString(12)).mkString(",")
      }
      PredictionUtils.save_result_to_hdfs(result_rdd, output_home_file)

    }









    // 更新job 和spark状态
    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)


    sc.stop()
  }

  //1.球队赛季主客场胜率
  def calculate_FBST2017005_data(sc: SparkContext, match_df: DataFrame, output_file: String, all: String, sqlContext: SQLContext): DataFrame ={
    //match_df格式:
    //"match_id,company_id,match_time,season_id,  0-3
    // season_pre,myear,mmonth,home_match_result,curr_home_resul,bds, 4-9
    // init_odds,curr_odds" 10-11


    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd_01 = match_df.rdd.map{p =>

        if(all == "0"){
          //company_id,英超,2016,08,-25,1111
          //company_id,英超,2016,08,-25
          //diff_result,bds,match_id,match_time
          (s"${p.getString(1)}_${p.getString(4)}_${p.getString(5)}_${p.getString(6)}_${p.getString(11)}",
            (p.getString(7).toString, p.getString(8).toString, p.getString(9).toString, p.getString(0).toString, p.getString(2).toString) )


        }else{

          //company_id,英超,08,-25,1111
          //company_id,英超,08,-25
          //diff_result,bds,match_id,match_time
          (s"${p.getString(1)}_${p.getString(4)}_${p.getString(6)}_${p.getString(11)}",
            (p.getString(7).toString, p.getString(8).toString, p.getString(9).toString, p.getString(0).toString, p.getString(2).toString) )
        }



    }
//    tuple_same_init_odds_rdd.collect().foreach(println)



    //换成一个元祖
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd_01.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._5 < _._5)
      (p._1, sortArray)
    }

    //5. reduce结果集
    //OUT: (395_3720,50,14,30)
    //
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1,  p._2.map(p => p._1).reduce(_+_), // 赛果
              p._2.map(p => p._2).reduce(_+_), // 亚盘赛果
              p._2.map(p => p._3).reduce(_+_),// 均进球
              p._2.map(p => p._4).reduce(_.toString+"_"+_.toString)  //matchid
      )
    )
   // new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(team_id,season_id,进球数,失球数,场数)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>

      if( all == "0" ){
        //company_id,英超,2016,08,-25
        val keys = p._1.split("_")
        val company_id = keys(0)
        val season_pre = keys(1)
        val year = keys(2)
        val month = keys(3)
        val odds = keys(4)

        //胜平负赛果
        val results = p._2.split("")
        val size = results.length.toString
        val r3 = results.count(_=="3")
        val r1 = results.count(_=="1")
        val r0 = results.count(_=="0")
        val rr = s"${r3}_${r1}_${r0}"

        //亚盘赛果
        val aresults = p._3.split("")
        val a3 = aresults.count(_=="3")
        val a1 = aresults.count(_=="1")
        val a0 = aresults.count(_=="0")
        val ar = s"${a3}_${a1}_${a0}"

        //均进球赛果
        val bdsr = p._4.split("")
        val b0 = bdsr.count(_=="0")
        val b1 = bdsr.count(_=="1")
        val br = s"${b1}_${b0}"
        (company_id,season_pre,year,month,odds,size,p._2,rr,p._3,ar,p._4,br,p._5)

      }else{
        //company_id,英超,08,-25
        val keys = p._1.split("_")
        val company_id = keys(0)
        val season_pre = keys(1)
        val month = keys(2)
        val odds = keys(3)

        //胜平负赛果
        val results = p._2.split("")
        val size = results.length.toString
        val r3 = results.count(_=="3")
        val r1 = results.count(_=="1")
        val r0 = results.count(_=="0")
        val rr = s"${r3}_${r1}_${r0}"

        //亚盘赛果
        val aresults = p._3.split("")
        val a3 = aresults.count(_=="3")
        val a1 = aresults.count(_=="1")
        val a0 = aresults.count(_=="0")
        val ar = s"${a3}_${a1}_${a0}"

        //均进球赛果
        val bdsr = p._4.split("")
        val b0 = bdsr.count(_=="0")
        val b1 = bdsr.count(_=="1")
        val br = s"${b1}_${b0}"
        (company_id,season_pre,month,odds,size,p._2,rr,p._3,ar,p._4,br,p._5)
      }
    }
    //same_init_odds_map_rdd.collect().take(100).foreach(println)




    if( all == "0" ){
      val schemaString = "bcompany_id,bseason_pre,byear,bmonth,bodds,size,result,rr,aresult,ar,bds,br,matchids"
      import org.apache.spark.sql.Row
      import org.apache.spark.sql.types.{StringType, StructField, StructType}
      val schema =
        StructType(
          schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
      //val rowRDD = same_init_odds_map_rdd.map( p => Row(p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9, p._10, p._11, p._12, p._13))
      val rowRDD = same_init_odds_map_rdd.map {
              case (company_id,season_pre,year,month,odds,size,result,rr,aresult,ar,bds,br,matchids) =>
                Row(company_id,season_pre,year,month,odds,size,result,rr,aresult,ar,bds,br,matchids)
      }

      val result_df = sqlContext.createDataFrame(rowRDD, schema)
      return result_df

    }else{
      val schemaString = "bcompany_id,bseason_pre,bmonth,bodds,size,result,rr,aresult,ar,bds,br,matchids"
      import org.apache.spark.sql.Row
      import org.apache.spark.sql.types.{StringType, StructField, StructType}
      val schema =
        StructType(
          schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
      //val rowRDD = same_init_odds_map_rdd.map( p => Row(p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9, p._10, p._11, p._12))
      val rowRDD = same_init_odds_map_rdd.map {
        case (company_id,season_pre,month,odds,size,result,rr,aresult,ar,bds,br,matchids) =>
          Row(company_id,season_pre,month,odds,size,result,rr,aresult,ar,bds,br,matchids)
      }
      val result_df = sqlContext.createDataFrame(rowRDD, schema)
      return result_df
    }








//    //计算结果保存 HDFS
//    if( all == "0" ){
//      val result_file_rdd = same_init_odds_map_rdd.map {
//        case (company_id,season_pre,year,month,odds,size,result,rr,aresult,ar,bds,br,matchids) =>
//          Array(company_id,season_pre,year,month,odds,size,result,rr,aresult,ar,bds,br,matchids).mkString(",")
//      }
//      PredictionUtils.save_result_to_hdfs(result_file_rdd, output_file)
//
//    }else{
//      val result_file_rdd = same_init_odds_map_rdd.map {
//        case (company_id,season_pre,month,odds,size,result,rr,aresult,ar,bds,br,matchids) =>
//          Array(company_id,season_pre,month,odds,size,result,rr,aresult,ar,bds,br,matchids).mkString(",")
//      }
//      PredictionUtils.save_result_to_hdfs(result_file_rdd, output_file)
//    }



  }





  // 加载 亚盘大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,season_pre,myear,mmonth,home_match_result,curr_home_resul,bds,init_odds,curr_odds"


    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    val rowRDD = people.map(_.split("\t")).map{p =>

      val home_match_result = p(9).toString   //胜平负赛果
      val curr_home_result = p(19).toString   //亚盘终盘赛果


      //净胜球数
      val score = p(11).toString
      val home_score = score(0).toInt - '0'
      val away_score = score(1).toInt - '0'


      //双方都进球
      var bds = 1
      if(home_score==0 ||  away_score==0){
        bds = 0
      }

      //英超,2016,08,-25,1111
      //match_id,company_id,match_time,season_id,season_pre, myear, mmonth, diff_result, bds
      //Row(p(0), p(1), p(2), p(3), p(5), p(28), p(29), diff_result, bds.toString, p(13), p(16))


      //英超,2016,08,-25,胜平负赛果,亚盘终盘赛果,双方都进球
      Row(p(0), p(1), p(2), p(3), p(5), p(28), p(29), home_match_result, curr_home_result, bds.toString, p(13), p(16))

    }


    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    asia_df
  }

}





