
/*
* 数据统计类:
* 足球: (30s)
*   1). 球队赛季主客场胜率
*     例如: 足球-巴萨-2016/17-西甲-主场胜率/客场胜率
*   2). 球队赛季主客场进球/失球数
*   3). 球队赛季主客场积分
*
* */

//scp /data/caiqiu/SparkModelApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.71:/root/fb_MODEL2017007004.jar
//
//nohup ./bin/spark-submit --master spark://skn-rqg382b1-spark-master:7077 --class caiqr.model.statistics.fb.FBST2017004 --jars /usr/local/spark/jars/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /root/fb_MODEL2017007004.jar maxResultSize=6g  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 output_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/fb_MODEL2017007004.csv big_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_500w_3.csv job_id=8632 spark_id=11  > /root/b.log < /dev/null 2>&1  &
//

//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBST2017001_home.csv
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBST2017001_away.csv




//caiqr.model.statistics.fb.FBST2017004

package caiqr.model.statistics.fb

import com.redislabs.provider.redis._
import caiqr.utils.PredictionUtils
import caiqr.utils.PredictionDBUtils
import caiqr.utils.AllFBMatchInputFile
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException
//import java.util.ArrayList

object FBST2017004 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")

    val big_file_res1 = cmd_map.get("big_file")
    val big_file = big_file_res1.getOrElse("")
    val save_db_info = cmd_map.get("save_db_info").getOrElse("") //DB info

    //结果输出文件(HDFS)
    val output_home_file = cmd_map.get("output_file").getOrElse("")

    if (job_id=="" || model_id=="" || big_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }

    //运行时结果集大小: 默认4g
    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")
    //////////////////////////////// 接收参数 ////////////////////////////////



    // 环境变量
    val conf = new SparkConf().setAppName("FBST2017004")
      .set("spark.driver.maxResultSize", maxResultSize)
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    import sqlContext.implicits._

    // hdfs 加载赛事文件, 返回 df
    val match_df = AllFBMatchInputFile.load(sc, sqlContext, big_file)
    //match_df.head(10).foreach(println)


    //1.球队赛季主客场胜率

    calculate_FBST2017004_team_season_win_data(sc,match_df, output_home_file)

    // 更新job 和spark状态
    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)


    sc.stop()
  }

  //1.球队赛季主客场胜率
  //def calculate_FBST2017001_team_season_win_data(match_df: DataFrame, sqlContext: SQLContext): DataFrame ={
  def calculate_FBST2017004_team_season_win_data(sc: SparkContext, match_df: DataFrame, output_file: String): Unit ={
    //match_df格式:
    //  match_id,season_id,season_pre,host_id,away_id, 0-4
    //  group_pre,host_score,away_score,match_time, 5-8
    //  backup,recommend,spf_cnt,myear,mmonth, 9-13
    //  home_match_result,away_match_result,score 14-16

    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (host_id_赛季ID, (进球数, 失球数))
    //    (678_1978,(3,0))
    //    (1256_1939,(4,2))
    //    (4987_1939,(1,1))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd_01 = match_df.rdd.map{p =>
        (s"${p.getString(3)}_${p.getString(1)}",
          (p.getString(6).toInt, p.getString(7).toInt,1) )
    }

    val tuple_same_init_odds_rdd_02 = match_df.rdd.map{p =>
       (s"${p.getString(4)}_${p.getString(1)}",
          (p.getString(7).toInt, p.getString(6).toInt,1) )
    }

    val tuple_same_init_odds_rdd = tuple_same_init_odds_rdd_01 union(tuple_same_init_odds_rdd_02)

//    tuple_same_init_odds_rdd.collect().foreach(println)


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (xxx,((3,0),(3,1),(3,2),...))
    /*
    * OUT:
    * 395_3720
    *  (0,1,1)
    *  (1,2,1)
    *
    * */
    //换成一个元祖
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
        (p._1,p._2.toArray)
    }

    //5. reduce结果集
    //OUT: (395_3720,50,14,30)
    //
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, p._2.map(p => p._1).reduce(_.toInt+_.toInt), // 进球数
             p._2.map(p => p._2).reduce(_.toInt+_.toInt),// 失球数
             p._2.map(p => p._3).reduce(_.toInt+_.toInt)  //场次
      )
    )
   // new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(team_id,season_id,进球数,失球数,场数)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val team_id = keys(0)
      val season_id = keys(1)
    //      val result_size = p._2._1.toString.length.toInt
      (team_id,season_id,p._2,p._3,p._4)
      //s"${team_id},${season_id},${p._2._1},${p._2._2},${p._3},${result_size}"
    }
    //    same_init_odds_map_rdd.collect().foreach(println)



    //汇总结果集合
    //OUT
    val result_rdd = same_init_odds_map_rdd.map{ p =>
      val team_id = p._1
      val season_id = p._2
      val get_score = p._3
      val loss_score = p._4
      val result_cnt = p._5

      val loss_score_tod = loss_score.toDouble/result_cnt.toDouble
      val get_score_tod = get_score.toDouble/result_cnt.toDouble


      //得分率
      val get_rate = f"$get_score_tod%1.2f"
      val loss_rate = f"$loss_score_tod%1.2f"


      (team_id,season_id,get_score,loss_score,result_cnt,get_rate,loss_rate)
    }
//    result_rdd.collect().foreach(println)


    //计算结果保存 HDFS
    val result_file_rdd = result_rdd.
      map { case (team_id,season_id,get_score,loss_score,result_cnt,get_rate,loss_rate) => Array(team_id,season_id,get_score,loss_score,result_cnt,get_rate,loss_rate).mkString(",") }


//    if(!result_file_rdd.isEmpty()) {
//      result_file_rdd.saveAsTextFile(output_file)
//    }
    if (!result_file_rdd.isEmpty()) {
      val path = new org.apache.hadoop.fs.Path(output_file);
      val master = output_file.split("9000")(0).concat("9000")
      val hdfs = org.apache.hadoop.fs.FileSystem.get(
        new java.net.URI(master), new org.apache.hadoop.conf.Configuration())
      if (hdfs.exists(path)) {
        hdfs.delete(path, true)
      }
      result_file_rdd.saveAsTextFile(output_file)
    }




  }


}





