//FBE2017004
//变盘分析
//1. 初盘范围+即时盘范围匹配
//2. 主要博彩公司
//  2. 例如: 1.95-3.25-5.45 匹配范围为: 1.90-3.20-5.40"
//4. 结果输出: redis, hashset-FBE2017004

// caiqr.model.FBE2017004.FBE2017004

//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/fb_FBE2017004.jar
//
//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.83:/data/caiqiu/console/jar/fb_FBE2017004.jar
//
//nohup ./bin/spark-submit --master spark://skn-pmukvrk0-spark-master:7077 --class caiqr.model.FBE2017004.FBE2017004  --jars /usr/local/spark/lib/spark-redis-0.1.1.jar,/usr/local/spark/lib/mysql-connector-java-5.1.35.jar,/usr/local/spark/lib/jedis-2.7.0.jar  --executor-memory 4G  /data/caiqiu/console/jar/fb_FBE2017004.jar  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502  maxResultSize=4g  job_id=8439 spark_id=11  big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_all_europe_500w_2.csv  match_big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/football_match_all_europe_500w_1.csv  min_match_result_cnt=10  redis_server=172.16.0.67  redis_port=6379 output_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBE2017004.csv  > /data/caiqiu/log/a.log < /dev/null 2>&1  &
//
//
//sqoop export  --connect jdbc:mysql://172.16.4.17/prediction --username root --password Caiqiu502 --table FBE2017004 --update-mode allowinsert --update-key "match_id,company_id,init,curr" --fields-terminated-by ','  -export-dir hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/result/FBE2017004.csv



//欧盘-初盘范围+即时盘范围匹配
package caiqr.model.FBE2017004
import caiqr.utils.{PredictionUtils, PredictionDBUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException

object FBE2017004 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap


    val big_file = cmd_map.get("big_file").getOrElse("") //盘口大数据文件
    val matchid_big_file = cmd_map.get("match_big_file") .getOrElse("") //要计算的比赛赔率信息数据文件

    val maxResultSize = cmd_map.get("maxResultSize").getOrElse("4g") //运行时结果集大小: 默认4g

    val job_id = cmd_map.get("job_id").getOrElse("") //Spark prediction-job_id
    val model_id = cmd_map.get("spark_id").getOrElse("") // Spark model-mode_id

    val save_db_info = cmd_map.get("save_db_info").getOrElse("") //DB info
    val output_file = cmd_map.get("output_file").getOrElse("")


    //最终匹配的公司,最少场次(匹配的场次数<最少场次,忽略)
    //(647693_738_1600_3800_5300_1600_3800_5300,23-15-3-5-65-13-22-31330031313333333003303-370646_389639_420318_420320_398839_388355_388361_398858_389744_403789_401943_421965_426694_400395_429061_433028_429176_452629_454777_502591_508623_536471_604717)
    val min_match_result_cnt = cmd_map.get("min_match_result_cnt").getOrElse("5") //



    // 1. 环境变量
    val sc = new SparkContext(new SparkConf()
      .setAppName("FBE2017004")
      .set("spark.driver.maxResultSize", maxResultSize)
    )
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)



    // 2. 加载欧赔大数据文件, 选出指定字段
    val src_europe_df = load(sc, sqlContext, big_file)
    val europe_df = src_europe_df.orderBy(src_europe_df("match_id").asc, src_europe_df("company_id").asc, src_europe_df("match_time").asc)
      .select("match_id","company_id","match_time","home_match_result","goal",
        "init_win_range","init_draw_range","init_loss_range",
        "curr_win_range","curr_draw_range","curr_loss_range")
    //europe_df.collect().foreach(println)


    // 5. 加载赛事大数据文件(待计算的比赛赔率文件)
    // 选出指定字段
    val src_need_calculate_match_df = load_match_file(sc, sqlContext, matchid_big_file)
    val need_calculate_match_df = src_need_calculate_match_df.
      orderBy(src_need_calculate_match_df("match_id").asc, src_need_calculate_match_df("company_id").asc, src_need_calculate_match_df("match_time").asc).
      select("match_id","company_id","init_win_range","init_draw_range","init_loss_range",
        "curr_win_range","curr_draw_range","curr_loss_range")
    //need_calculate_match_df.collect().foreach(println)



    // 6.1 计算范围数据
    val result_df = calculate_FBE2017004_data(europe_df, sqlContext)
    //result_df.collect().foreach(println)

    // 6.2 匹配赛事,保存redis
    calculate_match_FBE2017004_data(sc, result_df, need_calculate_match_df, min_match_result_cnt.toInt, output_file)


    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)

    sc.stop()
  }




  def calculate_FBE2017004_data(europe_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = europe_df.rdd.map{p =>
      //      "match_id","company_id","match_time","home_match_result","goal", 0-4
      //        "init_win_range","init_draw_range","init_loss_range", 5-7
      //        "curr_win_range","curr_draw_range","curr_loss_range" 8-10
      val match_time_second = sdf.parse(p.getString(2)).getTime
      (s"${p.getString(1)}_${p.getString(5)}_${p.getString(6)}_${p.getString(7)}_${p.getString(8)}_${p.getString(9)}_${p.getString(10)}",
        ((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(4)) )
    }


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (xxx,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+"_"+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val init_win = keys(1)
      val init_draw = keys(2)
      val init_loss = keys(3)
      val curr_win = keys(4)
      val curr_draw = keys(5)
      val curr_loss = keys(6)
      val result_size = p._2._1.toString.length.toInt

      s"${company_id},${init_win},${init_draw},${init_loss},${curr_win},${curr_draw},${curr_loss},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,binit_win,binit_draw,binit_loss,bcurr_win,bcurr_draw,bcurr_loss,match_ids,results,match_times,goals,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11)))
    val all_europe_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_europe_range_df

  }







  //计算指定比赛的欧盘数据匹配
  def calculate_match_FBE2017004_data(sc: SparkContext, result_df: DataFrame, match_df: DataFrame, min_match_result_cnt: Int, output_file: String): Unit ={

    val new_result_df = result_df.join(match_df).
    where(result_df("bcompany_id") === match_df("company_id")).
      where(result_df("binit_win") === match_df("init_win_range")).
      where(result_df("binit_draw") === match_df("init_draw_range")).
      where(result_df("binit_loss") === match_df("init_loss_range")).
      where(result_df("bcurr_win") === match_df("curr_win_range")).
      where(result_df("bcurr_draw") === match_df("curr_draw_range")).
      where(result_df("bcurr_loss") === match_df("curr_loss_range")).
      where(result_df("result_size") > min_match_result_cnt).
      orderBy(match_df("match_id").asc, match_df("company_id").asc).
      select("match_id", "company_id", "binit_win", "binit_draw", "binit_loss", "bcurr_win", "bcurr_draw", "bcurr_loss", "match_ids","results","match_times","goals")

    //new_result_df.collect().foreach(println)

    val result_rdd = new_result_df.rdd.map{ p =>
      val results = p.getString(8)
      val match_ids = p.getString(9).split("_").reverse.slice(0,10).mkString("_")
      val goal =  p.getString(11)

      val result_cnt = results.length.toInt
      val win = results.count(_ == '3')
      val draw = results.count(_ == '1')
      val loss = results.count(_ == '0')

      val win_ratio = (win.toDouble / result_cnt.toDouble * 100).round.toInt
      val draw_ratio = (draw.toDouble / result_cnt.toDouble * 100).round.toInt
      val loss_ratio = (loss.toDouble / result_cnt.toDouble * 100).round.toInt

      val ratio31 = ((win.toDouble+draw.toDouble) / result_cnt.toDouble * 100).round.toInt
      val ratio30 = ((win.toDouble+loss.toDouble) / result_cnt.toDouble * 100).round.toInt
      val ratio10 = ((draw.toDouble+loss.toDouble) / result_cnt.toDouble * 100).round.toInt


      //      ( s"${p.getString(0)}_${p.getString(1)}_${p.getString(2)}_${p.getString(3)}_${p.getString(4)}_${p.getString(5)}_${p.getString(6)}_${p.getString(7)}",
//        s"${result_cnt}-${win}-${draw}-${loss}-${win_ratio}-${draw_ratio}-${loss_ratio}-${results}-${goal}-${match_ids}")

      val match_id = p.getString(0)
      val company_id = p.getString(1)
      val init_win = p.getString(2)
      val init_draw = p.getString(3)
      val init_loss = p.getString(4)
      val curr_win = p.getString(5)
      val curr_draw = p.getString(6)
      val curr_loss = p.getString(7)

      (match_id,company_id,s"${init_win}_${init_draw}_${init_loss}",s"${curr_win}_${curr_draw}_${curr_loss}",result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,ratio31,ratio30,ratio10,results,goal,match_ids)
    }

    //计算结果保存 HDFS
    val result_file_rdd = result_rdd.
      map { case (match_id,company_id,init,curr,result_cnt,win,draw,loss,win_ratio,draw_ratio,loss_ratio,ratio31,ratio30,ratio10,results,goal,match_ids) => {


      // 计算预测结果
      val result_str = PredictionUtils.calcultion_spf_result(win,draw,loss)
      val items = result_str.split("_")
      val final_result = items(0)
      val caiqiu_index = items(1)

      Array("FBE2017004", match_id,company_id,result_cnt,win,draw,loss,final_result,caiqiu_index,match_ids,0).mkString(",")
    }
    }

    if(!result_file_rdd.isEmpty()) {
      result_file_rdd.saveAsTextFile(output_file)
    }


    //result_rdd.collect().foreach(println)
    //sc.toRedisHASH(result_rdd, s"FBE2017004", (redis_server, redis_port))
  }



  // 加载 欧赔大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,home_match_result,away_match_result,init_win_range,init_draw_range,init_loss_range,curr_win_range,curr_draw_range,curr_loss_range,goal"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema = StructType(schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    //加工赔率值
    val rowRDD = people.map(_.split("\t")).map{p =>

      //1).赔率范围
      val init_win_range = (p(12).toInt/100)*100
      val init_draw_range = (p(13).toInt/100)*100
      val init_loss_range = (p(14).toInt/100)*100
      val curr_win_range = (p(15).toInt/100)*100
      val curr_draw_range = (p(16).toInt/100)*100
      val curr_loss_range = (p(17).toInt/100)*100

      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), init_win_range.toString,init_draw_range.toString,init_loss_range.toString,curr_win_range.toString,curr_draw_range.toString,curr_loss_range.toString,p(20))
    }


    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }




  // 加载需要计算的比赛所有盘口信息文件
  def load_match_file(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_win,init_draw,init_loss,curr_win,curr_draw,curr_loss,myear,mmonth,init_win_range,init_draw_range,init_loss_range,curr_win_range,curr_draw_range,curr_loss_range"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    //加工赔率值
    val rowRDD = people.map(_.split("\t")).map{p =>

      val init_win = p(9).toInt
      val init_draw = p(10).toInt
      val init_loss = p(11).toInt
      val curr_win = p(12).toInt
      val curr_draw = p(13).toInt
      val curr_loss = p(14).toInt

      //1).赔率范围
      val init_win_range = (init_win/100)*100
      val init_draw_range = (init_draw/100)*100
      val init_loss_range = (init_loss/100)*100
      val curr_win_range = (curr_win/100)*100
      val curr_draw_range = (curr_draw/100)*100
      val curr_loss_range = (curr_loss/100)*100

      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15), p(16), init_win_range.toString,init_draw_range.toString,init_loss_range.toString,curr_win_range.toString,curr_draw_range.toString,curr_loss_range.toString)
    }

    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }


}

