
/*
* 数据统计类:
* 篮球:
*   1). 球队赛季主客场胜率 (16s)
*     例如: 篮球-湖人-2016/17-NBA-主场胜率/客场胜率
*
*
* */

//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/BK_BKST2017001.jar
//
//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.83:/root/BK_BKST2017001.jar
//
//nohup ./bin/spark-submit --master spark://skn-pmukvrk0-spark-master:7077 --class caiqr.model.statistics.bk.BKST2017001  --jars /usr/local/spark/lib/spark-redis-0.1.1.jar,/usr/local/spark/lib/mysql-connector-java-5.1.35.jar,/usr/local/spark/lib/jedis-2.7.0.jar  --executor-memory 4G  /root/BK_BKST2017001.jar  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502  maxResultSize=4g  job_id=8439 spark_id=11  big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/basketball_match_500w_1.csv  redis_server=172.16.0.67  redis_port=6379 output_home_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/BKST2017001_home.csv  output_away_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/BKST2017001_away.csv  > /data/caiqiu/log/a.log < /dev/null 2>&1  &
//
//

// HDFS -> Mysql
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table BKST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/BKST2017001_away.csv
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table BKST2017001 --update-mode allowinsert --update-key "team_id,season_id,type" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/BKST2017001_home.csv
//



//caiqr.model.statistics.bk.BKST2017001

package caiqr.model.statistics.bk

import com.redislabs.provider.redis._
import caiqr.utils.PredictionUtils
import caiqr.utils.PredictionDBUtils
import caiqr.utils.AllBKMatchInputFile
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException
//import java.util.ArrayList

object BKST2017001 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")

    val big_file_res1 = cmd_map.get("big_file")
    val big_file = big_file_res1.getOrElse("")
    val save_db_info = cmd_map.get("save_db_info").getOrElse("") //DB info

    //结果输出文件(HDFS)
    val output_home_file = cmd_map.get("output_home_file").getOrElse("")
    val output_away_file = cmd_map.get("output_away_file").getOrElse("")


    if (job_id=="" || model_id=="" || big_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }

    //运行时结果集大小: 默认4g
    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")
    //////////////////////////////// 接收参数 ////////////////////////////////



    // 环境变量
    val conf = new SparkConf().setAppName("BKST2017001")
      .set("spark.driver.maxResultSize", maxResultSize)
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    // hdfs 加载赛事文件, 返回 df
    val match_df = AllBKMatchInputFile.load(sc, sqlContext, big_file)
    //match_df.head(10).foreach(println)


    //1.球队赛季主客场胜率
    // home_flag: 1-主场,0-客场
    calculate_BKST2017001_team_season_win_data(sc, 1, match_df, output_home_file)
    calculate_BKST2017001_team_season_win_data(sc, 0, match_df, output_away_file)


    // 更新job 和spark状态
    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)


    sc.stop()
  }

  //1.球队赛季主客场胜率
  //def calculate_BKST2017001_team_season_win_data(match_df: DataFrame, sqlContext: SQLContext): DataFrame ={
  def calculate_BKST2017001_team_season_win_data(sc: SparkContext, home_flag: Int, match_df: DataFrame, output_file: String): Unit ={
    //match_df格式:
    //match_id,season_id,season_pre,host_id,away_id, 0-4
    // group_pre,host_score,away_score, 5-7
    // match_time,backup,recommend,myear,mmonth, 8-12
    // home_match_result,away_match_result,score, 13-15

    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (host_id_赛季ID, ((主队赛果,赛事时间), 总分, match_id))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = match_df.rdd.map{p =>
      val match_time_second = sdf.parse(p.getString(8)).getTime

      if (home_flag==1){
        (s"${p.getString(3)}_${p.getString(1)}",
          ((p.getString(13),match_time_second.toString()), p.getString(15), p.getString(0)) )
      }
      else {
        (s"${p.getString(4)}_${p.getString(1)}",
          ((p.getString(14),match_time_second.toString()), p.getString(15), p.getString(0)) )
      }
    }
    //tuple_same_init_odds_rdd.collect().foreach(println)


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (xxx,((3,0),(3,1),(3,2),...))
    /*
    * OUT:
    * 2407_3735
    *  ((3,1122746400000),1,120028)
    *  ((0,1123956000000),3,120047)
    *  ((1,1124908200000),0,120067)
    *  ((3,1126980000000),4,120086)
    *
    * */
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //    new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //      println(p._1)
    //      p._2.foreach(println)
    //    }

    //5. reduce结果集
    //OUT:
    //(1989_3202,(30,3_3),471522_471530)
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2).reduce(_+"_"+_)), p._2.map(p => p._3).reduce(_+"_"+_)))
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(team_id,season_id,赛果,总进球,matchidlist,赛果总数)
    //1056,3974,1033333100,2_2_3_3_4_5_7_2_2_1,606843_606862_606881_606901_606920_606941_606961_606982_607014_607043,10
    //1056,3974,100101100011,4_1_1_2_6_0_4_5_4_3_2_2,606851_606873_606890_606907_606929_606950_606976_606995_606999_607024_607034_607056,12
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val team_id = keys(0)
      val season_id = keys(1)
      val result_size = p._2._1.toString.length.toInt
      (team_id,season_id,p._2._1,p._2._2,p._3,result_size)
      //s"${team_id},${season_id},${p._2._1},${p._2._2},${p._3},${result_size}"
    }
    //same_init_odds_map_rdd.collect().foreach(println)



    //汇总结果集合
    val result_rdd = same_init_odds_map_rdd.map{ p =>
      val team_id = p._1
      val season_id = p._2
      val results = p._3
      val score = p._4
      val matchids = p._5
      val result_cnt = p._6

      val win = results.count(_ == '3')
      val loss = results.count(_ == '0')

      val ratio3 = (win.toDouble / result_cnt.toDouble * 100).round.toInt
      val ratio0 = (loss.toDouble / result_cnt.toDouble * 100).round.toInt

//      (s"${team_id}_${season_id}",
//              s"${results}-${result_cnt}-${win}-${loss}-${ratio3}-${ratio0}-${score}-${matchids}"
//              )


      //s"${team_id},${season_id},${home_flag},${results},${result_cnt},${win},${loss},${ratio3},${ratio0},${score},${matchids}"

      (team_id,season_id,home_flag,results,result_cnt,win,loss,ratio3,ratio0,score,matchids)

    }


    //计算结果保存 HDFS
    val result_file_rdd = result_rdd.
      map { case (v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11) => Array(v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11).mkString(",") }
      //map { case (a,b) => Array(a,b).mkString(",") }


    if(!result_file_rdd.isEmpty()) {
      result_file_rdd.saveAsTextFile(output_file)
    }

  }



}



