


//
//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/fb_FBA2017006.jar
//
//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.83:/data/caiqiu/console/jar/fb_FBA2017006.jar
//
//
//nohup ./bin/spark-submit --master spark://skn-pmukvrk0-spark-master:7077 --class caiqr.model.FBA2017006.FBA2017006 --jars /usr/local/spark/lib/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /data/caiqiu/console/jar/fb_FBA2017006.jar min_match_result_cnt=10 maxResultSize=6g  big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_all_asia_500w_1.csv  match_big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_match_all_asia_500w_1.csv  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502  output_home_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBA2017006_home.csv  output_away_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBA2017006_away.csv job_id=8632 spark_id=11  > /data/caiqiu/log/a.log < /dev/null 2>&1  &
//
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBA2017006 --update-mode allowinsert --update-key "match_id,type,team_id,company_id,init_odds,curr_odds" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBA2017006_*.csv
//


//caiqr.model.FBA2017006.FBA2017006

//
//FBA2017006
//1. 主客队对阵相似球队战绩统计
//  2. 公司+亚盘初盘口+终盘口
//3. 实例: 巴萨vs皇马, 半球, 半球
//1). 巴萨,主场,bet365, 半球-半球,战绩统计
//2). 皇马,客场,bet365, 半球-半球,战绩统计
//4. 结果输出: match_fb_team_asia_water
//


package caiqr.model.FBA2017006
//import com.redislabs.provider.redis._
import caiqr.utils.AllAsiaInputFile
import caiqr.utils.PredictionUtils
import caiqr.utils.PredictionDBUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException

object FBA2017006 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap


    val big_file_res1 = cmd_map.get("big_file") //所有盘口大数据文件
    val big_file = big_file_res1.getOrElse("")

    val matchid_big_file_res1 = cmd_map.get("match_big_file") //要计算的比赛赔率信息数据文件
    val matchid_big_file = matchid_big_file_res1.getOrElse("")

    val save_db_info_res1 = cmd_map.get("save_db_info")
    val save_db_info = save_db_info_res1.getOrElse("")

    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")

    val min_match_result_cnt_res1 = cmd_map.get("min_match_result_cnt")
    val min_match_result_cnt = min_match_result_cnt_res1.getOrElse("3")

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")

    val output_home_file = cmd_map.get("output_home_file").getOrElse("")
    val output_away_file = cmd_map.get("output_away_file").getOrElse("")


    // 1. 环境变量
    val sc = new SparkContext(new SparkConf()
      .setAppName("FBA2017006")
      .set("spark.driver.maxResultSize", maxResultSize)
    )
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    // 2. hdfs 加载亚盘盘口文件, 返回 df
    val src_asia_df = AllAsiaInputFile.load(sc, sqlContext, big_file)
    val asia_df = src_asia_df.
      orderBy(src_asia_df("match_id").asc, src_asia_df("company_id").asc, src_asia_df("match_time").asc)
      .select("match_id","company_id","match_time","home_match_result","away_match_result","curr_home_result","curr_away_result","init_odds","curr_odds","host_id","away_id")

    // 3. 加载大数据文件(待计算的比赛赔率文件)
    val src_need_calculate_match_df = AllAsiaInputFile.load_match_file(sc, sqlContext, matchid_big_file)
    val need_calculate_match_df = src_need_calculate_match_df.
      orderBy(src_need_calculate_match_df("match_id").asc, src_need_calculate_match_df("company_id").asc, src_need_calculate_match_df("match_time").asc).
      select("match_id","company_id","init_odds","curr_odds","host_id","away_id")



    // 4. 计算
    //主队
    val home_type = 1
    //.相同球队初盘-终盘 盘口
    val home_result_init_curr_df = calculate_asia_init_curr_odds_data(asia_df, sqlContext, home_type)
    //.计算指定比赛的亚盘数据匹配(初盘-终盘盘口+水位)
    calculate_match_team_asia_range_data(home_result_init_curr_df, need_calculate_match_df, min_match_result_cnt.toInt, home_type, output_home_file)


    //客队
    val away_type = 0
    //.相同球队初盘-终盘 盘口
    val away_result_init_curr_df = calculate_asia_init_curr_odds_data(asia_df, sqlContext, away_type)
    //.计算指定比赛的亚盘数据匹配(初盘-终盘盘口+水位)
    calculate_match_team_asia_range_data(away_result_init_curr_df, need_calculate_match_df, min_match_result_cnt.toInt, away_type, output_away_file)


    // 更新job 和spark状态
    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)


    sc.stop()
  }



  //球队+初盘+终盘盘口
  def calculate_asia_init_curr_odds_data(asia_df: DataFrame, sqlContext: SQLContext, team_type: Int): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>

     // "match_id","company_id","match_time","home_match_result","away_match_result","curr_home_result","curr_away_result", 0-6
     // "init_odds","curr_odds","host_id","away_id" 7-10

      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      if(team_type==1){
        // ( hostid_companyid_initodds_currodds, ((matchresult,matchtime),(matchid,matchtime),curr_result) )
        (s"${p.getString(9)}_${p.getString(1)}_${p.getString(7)}_${p.getString(8)}", ((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(5)) )
      }else{
        // ( awayid_companyid_initodds_currodds, ((matchresult,matchtime),(matchid,matchtime),curr_result) )
        (s"${p.getString(10)}_${p.getString(1)}_${p.getString(7)}_${p.getString(8)}", ((p.getString(4),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(6)) )
      }
    }
    //tuple_same_init_odds_rdd.collect().foreach(println)


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val team_id = keys(0)
      val company_id = keys(1)
      val init_odds = keys(2)
      val curr_odds = keys(3)
      val result_size = p._2._1.toString.length.toInt

      s"${team_id},${company_id},${init_odds},${curr_odds},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bteam_id,bcompany_id,binit_odds,bcurr_odds,results,match_ids,match_times,lresult,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }


  //计算指定比赛的亚盘数据匹配
  def calculate_match_team_asia_range_data(result_df: DataFrame, match_df: DataFrame, min_match_result_cnt: Int, team_type: Int, output_file: String): Unit ={

    var host_id = ""
    if(team_type==1){
      host_id = "host_id"
    }else{
      host_id = "away_id"
    }

    val init_odds_df = result_df.join(match_df).
      where(result_df("bteam_id") === match_df(host_id)).
      where(result_df("bcompany_id") === match_df("company_id")).
      where(result_df("binit_odds") === match_df("init_odds")).
      where(result_df("bcurr_odds") === match_df("curr_odds")).
      where(result_df("result_size") > min_match_result_cnt).
      orderBy(match_df(host_id).asc, match_df("company_id").asc).
      select(host_id,"company_id","init_odds","curr_odds","match_ids","results","lresult","match_id")


    val result_rdd = init_odds_df.rdd.map{ p =>
      val team_id = p.getString(0)
      val company_id = p.getString(1)
      val init_odds = p.getString(2)
      val curr_odds = p.getString(3)
      //val match_ids = p.getString(4)
      val match_ids = p.getString(4).split("_").reverse.slice(0,10).mkString("_")
      val results = p.getString(5)
      val lresults = p.getString(6)
      val match_id = p.getString(7)

      val result_cnt = results.length.toInt
      val win = results.count(_ == '3')
      val draw = results.count(_ == '1')
      val loss = results.count(_ == '0')
      val win_ratio = (win.toDouble / result_cnt.toDouble * 100).round.toInt
      val draw_ratio = (draw.toDouble / result_cnt.toDouble * 100).round.toInt
      val loss_ratio = (loss.toDouble / result_cnt.toDouble * 100).round.toInt

      val lresult_cnt = lresults.length.toInt
      val lwin = lresults.count(_ == '3') //让球胜平负赛果
      val ldraw = lresults.count(_ == '1')
      val lloss = lresults.count(_ == '0')
      val lwin_ratio = (lwin.toDouble / lresult_cnt.toDouble * 100).round.toInt
      val ldraw_ratio = (ldraw.toDouble / lresult_cnt.toDouble * 100).round.toInt
      val lloss_ratio = (lloss.toDouble / lresult_cnt.toDouble * 100).round.toInt

      (match_id,team_type,team_id,company_id,init_odds,curr_odds,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,lwin,ldraw,lloss,lwin_ratio,ldraw_ratio,lloss_ratio,results,lresults,match_ids)
    }


    //计算结果保存 HDFS
    val result_file_rdd = result_rdd.
      map { case (match_id,team_type,team_id,company_id,init_odds,curr_odds,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,lwin,ldraw,lloss,lwin_ratio,ldraw_ratio,lloss_ratio,results,lresults,match_ids) => {

      // 计算预测结果
      val result_str = PredictionUtils.calcultion_spf_result(win,draw,loss)
      val items = result_str.split("_")
      val final_result = items(0)
      val caiqiu_index = items(1)

      Array("FBA2017006", team_type, match_id,team_id,company_id,result_cnt,win,draw,loss,final_result,caiqiu_index,match_ids,0).mkString(",")
    }
    }


    if(!result_file_rdd.isEmpty()) {
      result_file_rdd.saveAsTextFile(output_file)
    }


  }



}


