
//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/fb_FBA2017005.jar
//
//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.83:/data/caiqiu/prediction/jar/fb_FBA2017005.jar
//
//nohup ./bin/spark-submit --master spark://skn-pmukvrk0-spark-master:7077 --class caiqr.model.FBA2017005.FBA2017005 --jars /usr/local/spark/lib/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /data/caiqiu/console/jar/fb_FBA2017005.jar min_match_result_cnt=10 maxResultSize=6g big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_all_asia_500w_1.csv match_big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_match_all_asia_500w_1.csv save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 output_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBA2017005.csv job_id=8632 spark_id=11 > /data/caiqiu/log/a.log < /dev/null 2>&1
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBA2017005 --update-mode allowinsert --update-key "match_id,company_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_ret,curr_ret" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBA2017005.csv


//
//caiqr.model.FBA2017005.FBA2017005
//
//
//
//根据指定赛事的初盘口(含水位,返奖率)+即时盘口(含水位,返奖率)

package caiqr.model.FBA2017005
//import com.redislabs.provider.redis._
//import caiqr.model.fb_asia_water_retbonus.AsiaWaterRetInputFile
import caiqr.utils.AllAsiaInputFile
import caiqr.utils.PredictionUtils
import caiqr.utils.PredictionDBUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException

object FBA2017005 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap


    val big_file_res1 = cmd_map.get("big_file") //所有盘口大数据文件
    val big_file = big_file_res1.getOrElse("")

    val matchid_big_file_res1 = cmd_map.get("match_big_file") //要计算的比赛赔率信息数据文件
    val matchid_big_file = matchid_big_file_res1.getOrElse("")

    val save_db_info_res1 = cmd_map.get("save_db_info")
    val save_db_info = save_db_info_res1.getOrElse("")

    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")

    val min_match_result_cnt_res1 = cmd_map.get("min_match_result_cnt")
    val min_match_result_cnt = min_match_result_cnt_res1.getOrElse("5")

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")
    val output_file = cmd_map.get("output_file").getOrElse("")


    // 1. 环境变量
    val sc = new SparkContext(new SparkConf()
      .setAppName("FBA2017005")
      .set("spark.driver.maxResultSize", maxResultSize)
    )
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    // 2. hdfs 加载亚盘盘口文件, 返回 df
    val src_asia_df = AllAsiaInputFile.load(sc, sqlContext, big_file)
    val asia_df = src_asia_df.
      orderBy(src_asia_df("match_id").asc, src_asia_df("company_id").asc, src_asia_df("match_time").asc)
      .select("match_id","company_id","match_time","home_match_result","curr_home_result","init_odds","curr_odds",
        "init_home_water","init_away_water","curr_home_water","curr_away_water", "init_ret", "curr_ret")

    // 3. 加载大数据文件(待计算的比赛赔率文件)
    val src_need_calculate_match_df = AllAsiaInputFile.load_match_file(sc, sqlContext, matchid_big_file)
    val need_calculate_match_df = src_need_calculate_match_df.
      orderBy(src_need_calculate_match_df("match_id").asc, src_need_calculate_match_df("company_id").asc, src_need_calculate_match_df("match_time").asc).
      select("match_id","company_id","init_odds","curr_odds","init_home_water","init_away_water","curr_home_water","curr_away_water", "init_ret", "curr_ret")



    // 3.1 计算亚盘初盘-终盘 盘口+水位+返奖率结果集
    val result_init_curr_df = calculate_asia_init_curr_ret_data(asia_df, sqlContext)
    //println(s"####result_init_curr_df=${result_init_curr_df.collect().size}")


    // 5.1 计算指定比赛的亚盘数据匹配(初盘-终盘盘口+水位+返奖率)
    calculate_match_asia_range_data(result_init_curr_df, need_calculate_match_df, min_match_result_cnt.toInt, output_file)


    // 更新job 和spark状态
    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)


    sc.stop()
  }




  def calculate_asia_init_curr_ret_data(asia_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>
      //      match_id,company_id,match_time,home_match_result,curr_result, 0-4
      //      init_odds,curr_odds,init_home_water,init_away_water,curr_home_water,curr_away_water 5-10
      //      init_ret,curr_ret 11,12
      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      //company_id__init_home_water__init_odds__init_away_water__curr_home_water__curr_odds__curr_home_water, ((match_result,match_time), (match_id,match_time), curr_result)
      (s"${p.getString(1)}_${p.getString(7)}_${p.getString(5)}_${p.getString(8)}_${p.getString(9)}_${p.getString(6)}_${p.getString(10)}_${p.getString(11)}_${p.getString(12)}",((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(4)) )
    }
    //tuple_same_init_odds_rdd.collect().foreach(println)


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val init_home = keys(1)
      val init_odds = keys(2)
      val init_away = keys(3)
      val curr_home = keys(4)
      val curr_odds = keys(5)
      val curr_away = keys(6)
      val init_ret = keys(7)
      val curr_ret = keys(8)
      val result_size = p._2._1.toString.length.toInt

      s"${company_id},${init_home},${init_odds},${init_away},${curr_home},${curr_odds},${curr_away},${init_ret},${curr_ret},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,binit_home,binit_odds,binit_away,bcurr_home,bcurr_odds,bcurr_away,binit_ret,bcurr_ret,match_ids,results,match_times,lresults,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }



  //计算指定比赛的亚盘数据匹配
  def calculate_match_asia_range_data(result_df: DataFrame, match_df: DataFrame, min_match_result_cnt: Int, output_file: String): Unit ={

    val init_odds_df = result_df.join(match_df).
      where(result_df("bcompany_id") === match_df("company_id")).
      where(result_df("binit_home") === match_df("init_home_water")).
      where(result_df("binit_odds") === match_df("init_odds")).
      where(result_df("binit_away") === match_df("init_away_water")).
      where(result_df("bcurr_home") === match_df("curr_home_water")).
      where(result_df("bcurr_odds") === match_df("curr_odds")).
      where(result_df("bcurr_away") === match_df("curr_away_water")).
      where(result_df("binit_ret") === match_df("init_ret")).
      where(result_df("bcurr_ret") === match_df("curr_ret")).
      where(result_df("result_size") > min_match_result_cnt).
      orderBy(match_df("match_id").asc, match_df("company_id").asc).
      select("match_id","company_id","init_home_water","init_odds","init_away_water","curr_home_water","curr_odds","curr_away_water","init_ret","curr_ret","match_ids","results","match_times","lresults")
    //init_odds_df.collect().foreach(println)


    val result_rdd = init_odds_df.rdd.map{ p =>

      val match_id = p.getString(0)
      val company_id = p.getString(1)

      val init_home = p.getString(2)
      val init_odds = p.getString(3)
      val init_away = p.getString(4)
      val init = s"${init_home}_${init_away}"

      val curr_home = p.getString(5)
      val curr_odds = p.getString(6)
      val curr_away = p.getString(7)
      val curr = s"${curr_home}_${curr_away}"
      val odds = s"${init_odds}_${curr_odds}"

      val init_ret = p.getString(8)
      val curr_ret = p.getString(9)
      val ret = s"${init_ret}_${curr_ret}"

      val results = p.getString(10)
      val match_ids = p.getString(11).split("_").reverse.slice(0,10).mkString("_")
      val lresults = p.getString(13)

      val result_cnt = results.length.toInt //胜平负赛果
      val win = results.count(_ == '3')
      val draw = results.count(_ == '1')
      val loss = results.count(_ == '0')
      val win_ratio = (win.toDouble / result_cnt.toDouble * 100).round.toInt
      val draw_ratio = (draw.toDouble / result_cnt.toDouble * 100).round.toInt
      val loss_ratio = (loss.toDouble / result_cnt.toDouble * 100).round.toInt

      val lresult_cnt = lresults.length.toInt
      val lwin = lresults.count(_ == '3') //让球胜平负赛果
      val ldraw = lresults.count(_ == '1')
      val lloss = lresults.count(_ == '0')
      val lwin_ratio = (lwin.toDouble / lresult_cnt.toDouble * 100).round.toInt
      val ldraw_ratio = (ldraw.toDouble / lresult_cnt.toDouble * 100).round.toInt
      val lloss_ratio = (lloss.toDouble / lresult_cnt.toDouble * 100).round.toInt

      (match_id,company_id,odds,init,curr,ret,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,lwin,ldraw,lloss,lwin_ratio,ldraw_ratio,lloss_ratio,results,lresults,match_ids)
    }


    //计算结果保存 HDFS
    val result_file_rdd = result_rdd.
      map { case (match_id,company_id,odds,init,curr,ret,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,lwin,ldraw,lloss,lwin_ratio,ldraw_ratio,lloss_ratio,results,lresults,match_ids) => {

//      val ret_items = ret.split("_")
//      val init_ret = ret_items(0)
//      val curr_ret = ret_items(1)
//
//      val init_items = init.split("_")
//      val init_home = init_items(0)
//      val init_away = init_items(1)
//
//      val curr_items = curr.split("_")
//      val curr_home = curr_items(0)
//      val curr_away = curr_items(1)
//
//      val odds_items = odds.split("_")
//      val init_odds = odds_items(0)
//      val curr_odds = odds_items(1)
//
//
//
//      Array(match_id,company_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_ret,curr_ret,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,lwin,ldraw,lloss,lwin_ratio,ldraw_ratio,lloss_ratio,results,lresults,match_ids).mkString(",")


      // 计算预测结果
      val result_str = PredictionUtils.calcultion_spf_result(win,draw,loss)
      val items = result_str.split("_")
      val final_result = items(0)
      val caiqiu_index = items(1)

      Array("FBA2017005", match_id,company_id,result_cnt,win,draw,loss,final_result,caiqiu_index,match_ids,0).mkString(",")


    }
    }


    if(!result_file_rdd.isEmpty()) {
      result_file_rdd.saveAsTextFile(output_file)
    }




  }




}