// 欧盘初盘线上模型(预测结果+大数据列表)
// 可以指定参数.
// caiqr.model.FBE2017001.FBE2017001
//
//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/fb_FBE2017001.jar
//
//nohup ./bin/spark-submit --master spark://skn-pmukvrk0-spark-master:7077 --class caiqr.model.FBE2017001.FBE2017001 --driver-class-path /usr/local/spark/lib/mysql-connector-java-5.1.35.jar --executor-memory 4G  /root/test.jar  match_id_list=616424_647763_628566_628562_577909_577908_577902_577904_577899_577903_577906_577907_577905_577901_578459_578456_578458_578454_578453_578451_578460_578457_578461_578452_578455_577900_578450_630290_632913_630291_631591_631615_631585_631621_628563_628561_574215_647690_647691_631597_631609   save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 continuous_same_total=10 max_continuous=1 recent_max_continuous=2 maxResultSize=4g  job_id=8414 spark_id=11  big_file=hdfs://skn-wf2zlrwn-hadoop-master:9000/data/caiqiu/csv/online_football_europe_500w_1.csv  > /data/caiqiu/log/a.log < /dev/null 2>&1  &
//
//
///参数格式:
// 至少传递两个参数: match_id_list, db_info
//1.match_id_list: "id_id_id_id"
//2.结果集保存DB信息: IP-DB名称-user-passwd
//3,4.连续相同赛果总数(默认大于"10"), 最大连续值(默认大于"1")
//5.最近连续场次: (默认大于"4")
//6.运行时结果集大小: 默认4g
//7.job_id
//8.model_id

package caiqr.model.FBE2017001

import caiqr.utils.PredictionUtils
import caiqr.utils.PredictionDBUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException

object FBE2017001 {

  def main(args: Array[String]){

    // 至少4个参数:
    // match_id_list, save_db_info, job_id, model_id
    if (args.length < 5) {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap

    // 接收参数
    //match_id_list: "id_id_id_id"
    val match_id_list_res1 = cmd_map.get("match_id_list")
    val match_id_list_str = match_id_list_res1.getOrElse("")

    //结果集保存DB信息: IP-DB名称-user-passwd
    val save_db_info_res1 = cmd_map.get("save_db_info")
    val save_db_info = save_db_info_res1.getOrElse("")

    val job_id_res1 = cmd_map.get("job_id")
    val job_id = job_id_res1.getOrElse("")

    val model_id_res1 = cmd_map.get("spark_id")
    val model_id = model_id_res1.getOrElse("")

    val big_file_res1 = cmd_map.get("big_file")
    val big_file = big_file_res1.getOrElse("")

    if (match_id_list_str=="" || save_db_info=="" || job_id=="" || model_id=="" || big_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }


    //连续相同赛果总数(默认大于"10")
    val continuous_same_total_res1 = cmd_map.get("continuous_same_total")
    val continuous_same_total = continuous_same_total_res1.getOrElse("10")

    //最大连续值(默认大于"1")
    val max_continuous_res1 = cmd_map.get("max_continuous")
    val max_continuous = max_continuous_res1.getOrElse("1")

    //最近连续场次: (默认大于"4")
    val recent_max_continuous_res1 = cmd_map.get("recent_max_continuous")
    val recent_max_continuous = recent_max_continuous_res1.getOrElse("4")

    //运行时结果集大小: 默认4g
    val maxResultSize_res1 = cmd_map.get("maxResultSize")
    val maxResultSize = maxResultSize_res1.getOrElse("4g")
    //////////////////////////////// 接收参数 ////////////////////////////////



    // 1. 环境变量
    val conf = new SparkConf().setAppName("FBE2017001").
      set("spark.driver.maxResultSize", maxResultSize)
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    // 2. 获取当前开售的赛事ID列表, 返回 df
    //football_match_sporttery_service.load(sqlContext)
    //val match_id_list_df = football_match_sporttery_service.get_selling_match_id_list()
    //val match_id_list_rdd = sc.parallelize(match_id_list_str.split("_"))
    val match_id_list_df = PredictionUtils.transation_match_id_list_to_df(match_id_list_str, sc)


    // 3. hdfs 加载欧盘盘口文件, 返回 df
    //val europe_odds_file_name = "/data/csv/sparkeurope.csv"
    //val europe_df = football_match_europe_odds.load(sc, sqlContext, europe_odds_file_name)
    val europe_df = FBE2017001InputFile.load(sc, sqlContext, big_file)


    // 获取所有竞彩开售比赛初盘和公司ID.
    val init_odds_df = europe_df.join(match_id_list_df, europe_df("match_id") === match_id_list_df("sporttery_match_id")).
      orderBy(europe_df("match_id").asc, europe_df("company_id").asc).
      selectExpr("match_id as src_match_id", "company_id as src_company_id",
        "init_win as src_init_win", "init_draw as src_init_draw",
        "init_loss as src_init_loss", "match_time as src_match_time")

    // 获取初盘相同的所有赛事信息, 按照赛事时间倒序排序
    val same_init_odds_df = europe_df.filter(europe_df("host_score").isNotNull).filter(europe_df("away_score").isNotNull)
      .filter(europe_df("host_score") !== "NULL").filter(europe_df("away_score") !== "NULL")
      .join(init_odds_df,
        europe_df("company_id") === init_odds_df("src_company_id")
          && europe_df("init_win") === init_odds_df("src_init_win")
          && europe_df("init_draw") === init_odds_df("src_init_draw")
          && europe_df("init_loss") === init_odds_df("src_init_loss")
          && europe_df("match_time") < init_odds_df("src_match_time")).
      orderBy(europe_df("init_win").asc, europe_df("init_draw").asc,
        europe_df("init_loss").asc,  europe_df("company_id").asc, europe_df("match_time").desc).
      selectExpr("src_match_id", "match_id as find_match_id", "company_id",
        "init_win", "init_draw", "init_loss", "match_time", "match_result")//.rdd.saveAsTextFile("/data/csv/5.csv")


    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")

    // 转换为元祖 (matchid_companyid_initwin_initdraw_initloss, (match_result,(match_id,match_time)) )
    val tuple_same_init_odds_rdd = same_init_odds_df.rdd.map{p =>

      val match_time = p.getString(6)
      val match_time_second = sdf.parse(match_time).getTime

      (s"${p.getString(0)}_${p.getString(2)}_${p.getString(3)}_${p.getString(4)}_${p.getString(5)}",
        (p.getString(7), (p.getString(1),match_time_second.toString())) )
    }

    //tuple_same_init_odds_rdd.collect().foreach(println)


    // 根据盘口收集相同盘口比赛赛果序列
    // 1).
    // 为每一行添加下标, 用于下面排序使用
    //IN: RDD(  (301925_3243_1050_10000_25000,3), (301925_3243_1050_10000_25000,1), ...  )
    //OUT:
    //    ((301925_487_1010_7500_16000,(3,(id,sec))),0)
    //    ((301925_487_1010_7500_16000,(3,(id,sec)),1)
    //    ((301925_487_1010_7500_16000,(3,(id,sec)),2)
    //    ((301925_487_1010_7500_16000,(3,(id,sec)),3)
    //    ((301925_487_1010_7500_16000,(3,(id,sec)),4)
    val tuple_same_init_odds_index_rdd = tuple_same_init_odds_rdd.zipWithIndex
    //tuple_same_init_odds_index_rdd.collect().foreach(println)
    //tuple_same_init_odds_index_rdd.saveAsTextFile("/data/csv/index1")

    //2).
    //IN:
    //    ((301925_310_1060_8500_17000,(3,(301325,2009-11-14 19:00:00))),0)
    //    ((301925_310_1060_8500_17000,(3,(301425,2009-11-11 03:00:00))),1)
    //    ((301925_86_1060_9000_18000,(3,(301225,2011-07-13 02:00:00))),2)
    //OUT:
    //    (301925_234_1070_8500_17000,((3,590),(324027,1310058000000)))
    //    (301925_234_1070_8500_17000,((3,591),(323990,1309449540000)))
    //    (301925_234_1070_8500_17000,((3,592),(316201,1305730800000)))
    val new_tuple_same_init_odds_index_rdd = tuple_same_init_odds_index_rdd.map(p =>
      (      p._1._1, ((p._1._2._1, p._2),p._1._2._2)     )
    )
    //new_tuple_same_init_odds_index_rdd.collect().foreach(println)
    //new_tuple_same_init_odds_index_rdd.saveAsTextFile("/data/csv/index2")


    //3). 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = new_tuple_same_init_odds_index_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //        new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //          println(p._1)
    //          p._2.foreach(println)
    //        }
    //new_tuple_same_init_odds_index_order_rdd.saveAsTextFile("/data/csv/index3")


    //4). reduce结果集
    //OUT:
    //(301925_120_1050_9000_20000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)
    //    //new_tuple_same_init_odds_rdd.saveAsTextFile("/data/csv/index4")



    //4.  分解最近连续相同赛果出现场次个数
    //exmp:
    //(301925,3243,1050,10000,25000,3,9,3333333331, match_id_list, match_time_list)
    //格式:
    //(match_id, company_id, init_win, init_draw, init_loss, 最近赛果, 最近赛果最大连续值, 结果序列, match_id_list, match_time_list)
    // 要求 结果序列 必须>10, 最大连续值必须>1
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map(p =>
      (p._1.split("_")(0),
        p._1.split("_")(1),
        p._1.split("_")(2),
        p._1.split("_")(3),
        p._1.split("_")(4),
        p._2._1.toArray.head.toString,
        p._2._1.toArray.span(_ == p._2._1.toArray.head)._1.length.toString,
        p._2._1,
        p._2._2,
        p._2._3)).filter(p => p._7.toInt > max_continuous.toInt)
      .filter(p => p._8.length>=continuous_same_total.toInt)
    //p._2._3)).filter(p => p._7.toInt > 1).filter(p => p._8.length>=10)
    //same_init_odds_map_rdd.collect().foreach(println)
    //same_init_odds_map_rdd.saveAsTextFile("/data/csv/index5")

    /////////////////////

    //IN: (match_id, company_id, init_win, init_draw, init_loss, 最近赛果, 最近赛果最大连续值, 结果序列)
    //OUT: List(  (match_id, (win_cnt,draw_cnt,loss_cnt,win_peak_cnt,draw_peak_cnt,loss_peak_cnt))  )
    val result_list_rdd = same_init_odds_map_rdd.map(p => {
      var win_cnt = 0
      var draw_cnt = 0
      var loss_cnt = 0
      var win_peak_str = ""
      var draw_peak_str = ""
      var loss_peak_str = ""

      p._6.toInt match {
        case 3 =>
          win_cnt += 1
          if (p._7.toInt >= recent_max_continuous.toInt) {
            win_peak_str = p._7.toString
          }
        case 1 =>
          draw_cnt += 1
          if (p._7.toInt >= recent_max_continuous.toInt) {
            draw_peak_str = p._7.toString
          }
        case 0 =>
          loss_cnt += 1
          if (p._7.toInt >= recent_max_continuous.toInt) {
            loss_peak_str = p._7.toString
          }
      }
      (p._1, (win_cnt, draw_cnt, loss_cnt, win_peak_str, draw_peak_str, loss_peak_str))
    })

    //result_list_rdd.collect().foreach(println)


    val result_list_rdd2 = result_list_rdd.reduceByKey((x,y) => {
      var win_peak_str = y._4.toString
      var draw_peak_str = y._5.toString
      var loss_peak_str = y._6.toString

      if( win_peak_str != "" ){
        if (x._4 != ""){
          win_peak_str = x._4 + "-" + win_peak_str
        }
      }else{
        win_peak_str = x._4
      }

      if( draw_peak_str != "" ){
        if (x._5 != "") {
          draw_peak_str = x._5 + "-" + draw_peak_str
        }
      }else{
        draw_peak_str = x._5
      }

      if( loss_peak_str != "" ){
        if (x._6 != "") {
          loss_peak_str = x._6 + "-" + loss_peak_str
        }
      }else{
        loss_peak_str = x._6
      }

      (x._1+y._1, x._2+y._2, x._3+y._3, win_peak_str, draw_peak_str, loss_peak_str)
    })


    val result_list_rdd3 = result_list_rdd2.map(p => {
      var win_peak_str = p._2._4.toString
      var draw_peak_str = p._2._5.toString
      var loss_peak_str = p._2._6.toString

      if (win_peak_str == ""){
        win_peak_str = "N"
      }
      if (draw_peak_str == ""){
        draw_peak_str = "N"
      }
      if (loss_peak_str == ""){
        loss_peak_str = "N"
      }

      (p._1, s"${p._2._1}_${p._2._2}_${p._2._3}_${win_peak_str}_${draw_peak_str}_$loss_peak_str")
    })//.collect().foreach(println)

    // (301925,9_0_0_13-6-7-18-51-8-8-8-11_N_N)
    //result_list_rdd3.collect().foreach(println)

    // 计算预测结果 (310)
    // OUT:
    //(301925,9_0_0_13-6-7-18-51-8-8-8-11_N_N,3)
    //(301925,9_10_0_13-6-7-18-51-8-8-8-11_N_N,31)
    val result_list_rdd4 = result_list_rdd3.map { p =>
      val items = p._2.split("_")
      val cnt_3 = items(0).toInt
      val cnt_1 = items(1).toInt
      val cnt_0 = items(2).toInt

      val cnt_map = Map((cnt_3, "3"), (cnt_1, "1"), (cnt_0, "0"))
      val cnt = List(cnt_3, cnt_1, cnt_0).sorted
      var final_match_result = ""

      //#查看是否出现相同个数类型
      //#特殊类型:
      //# [11-6-11]
      //# [15-15-5]
      //# [4-11-11]
      //# [10-10-6]
      //# [0-0-4]
      if (cnt_map.size == 2){
        if (cnt_3 == cnt_0){  // #[11-6-11] [6-11-6] [0-11-0]
          if (cnt_3 == 0 || cnt_3<5){
            final_match_result = "31"
          }else{
            final_match_result = "30"
          }
        }else if (cnt_3 == cnt_1){
          if (cnt_3 == 0){
            final_match_result = "0"
          }else if (cnt_3 > cnt_0) { //#[15-15-5]
            final_match_result = "31"
          }else if (cnt_3 < cnt_0) { //#[5-5-7]
            final_match_result = "10"
          }
        }else if (cnt_1 == cnt_0){
          if (cnt_1 == 0 ){ // #[4-0-0]
            final_match_result = "3"
          }else if (cnt_1 > cnt_3) { //#[4-11-11]
            final_match_result = "10"
          }else if (cnt_1 < cnt_3) { //#[10-5-5] [20-3-3] [9-3-3]
            if (cnt_1 > 4) {
              final_match_result = "31"
            }else if (cnt_3 > 10) {
              final_match_result = "3"
            }else{
              val cha = cnt_3 - cnt_1
              if (cha > 5) {
                final_match_result = "3"
              }
              else {
                final_match_result = "31"
              }
            }
          }
        }
      }else {
        if (cnt(2) < 10) {
          if (cnt(2) - cnt(1) > 5)
            final_match_result = cnt_map(cnt(2))
          else {
            final_match_result = cnt_map(cnt(2))
            final_match_result += cnt_map(cnt(1))
          }
        } else {
          final_match_result = cnt_map(cnt(2))
          if (cnt(1) > 4) {
            final_match_result += cnt_map(cnt(1))
          }
        }

        if (final_match_result.length == 1) {
          if (final_match_result == "1") {
            final_match_result = "10"
          }
        }

        if (cnt_map.size == 1) {
          final_match_result = "10"
        }
      }

      // "310" 排序.
      final_match_result = final_match_result.toList.sortWith((a,b)=>a>b).mkString

      (p._1, p._2, final_match_result)
    }


    ////OUT:
    //("301925","9_0_0_8-8-13-18-6-7-11-8-51_N_N","3","337889;337921;321089;320933;329258;334124;320992;333000;332994;332947")
    val final_match_result_array = get_big_match_info_list(sc, result_list_rdd4, same_init_odds_map_rdd)
    //final_match_result_array.foreach(println)



    // 将计算结果保存 cassandra
    // cassandra: mykeyspace.tmp_match
    //    CREATE TABLE tmp_match (
    //      match_id int PRIMARY KEY,
    //      win_cnt int,
    //      draw_cnt int,
    //      loss_cnt int,
    //      win_peak text,
    //      draw_peak text,
    //      loss_peak text,
    //      forecast_result text,
    //      big_data_match_id_list text
    //    );
    val final_match_result_rdd = sc.parallelize(final_match_result_array).map(p => {
      val items = p._2.split("_")
      var win_peak = items(3)  // 峰值排序
      var draw_peak = items(4)
      var loss_peak = items(5)
      if (win_peak != "N") win_peak = win_peak.split("-").map(p => p.toInt).toList.sorted.mkString("-")
      if (draw_peak != "N") draw_peak = draw_peak.split("-").map(p => p.toInt).toList.sorted.mkString("-")
      if (loss_peak != "N") loss_peak = loss_peak.split("-").map(p => p.toInt).toList.sorted.mkString("-")

      (p._1, items(0), items(1), items(2), win_peak, draw_peak, loss_peak, p._3, p._4.replaceAll(";", ","))
    })


    final_match_result_rdd.collect().foreach(println)


    // 生成计算结果
    val model_run_result = final_match_result_rdd.collect().map{ p =>
      p._1.toString+"_"+p._2.toString+"_"+p._3.toString+"_"+
        p._4.toString+"_"+p._5.toString+"_"+
        p._6.toString+"_"+p._7.toString+"_"+
        p._8.toString+"_"+p._9.toString
    }.mkString(";")

    // 保存 to DB
    PredictionDBUtils.save_result_to_mysql(model_run_result, save_db_info, job_id, model_id)


    sc.stop()
  }

  //1. 获取大数据分析赛事列表ID
  def get_big_match_info_list(sc: SparkContext,
                              result_list_rdd4: RDD[(String,String,String)], // 预测结果
                              same_init_odds_map_rdd: // 大数据匹配310数据
                              RDD[(String,String,String,String,String,String,String,String,String,String)]):
  Array[(String, String, String, String)] ={

    // 获取大数据匹配 === start
    // 1. 每行: (matchID_最近场次结果, finded_matchID_matchTime;finded_matchID_matchTime;finded_matchID_matchTime)
    //(301925_3,321929_1319058000000;321926_1310058000000;321927_1317058000000;321928_1318058000000)
    val big_data_match_info_rdd1 = same_init_odds_map_rdd.map{p =>
      val match_id = p._1
      val recent = p._6
      val recent_cnt = p._7
      val match_id_list_str = p._9.toString
      val match_time_list_str = p._10.toString

      val match_id_list = match_id_list_str.split("_")
      val match_time_list = match_time_list_str.toString.split("_")
      var match_info_set: Set[String] = Set( match_id_list(0)+"_"+match_time_list(0) )

      var i = 1
      while(i < recent_cnt.toInt){
        match_info_set += match_id_list(i) + "_" + match_time_list(i)
        i += 1
      }

      (match_id+"_"+recent, match_info_set.toList)
    }
    //    println("big_data_match_info_rdd1...")
    //    big_data_match_info_rdd1.collect().foreach{ p =>
    //      println(p._1)
    //      p._2.foreach(p => print(p+","))
    //      println("")
    //    }


    //2. 合并相同比赛，相同结果的找到的 match_list, 是无序的, 再通过 sortBy 排序好.
    //
    // IN:
    //(301925_3,List(321927_1317058000000, 321926_1310058000000, 321929_1319058000000, 321928_1318058000000, 321929_1519058000000, 321928_1518058000000, 321927_1517058000000))
    //(891925_3,List(321929_1319058000000, 321928_1318058000000, 321927_1317058000000, 321926_1310058000000, 321929_1519058000000, 321928_1518058000000, 321927_1517058000000))
    // OUT:
    //(301925_3,Array((321929,1319058000000), (321928,1318058000000), (321927,1317058000000), (321926,1310058000000))),
    //(891925_3,Array((321929,1319058000000), (321928,1318058000000), (321927,1317058000000), (321926,1310058000000)))
    val big_data_match_info_list = big_data_match_info_rdd1.reduceByKey(_:::_).collect().map{p =>
      val temp_list = p._2.map{p =>
        val items = p.split("_")
        (items(0),items(1))
      }
      val match_sorted_list = sc.parallelize(temp_list).sortBy(c => c._2, ascending = false).collect()
      (p._1, match_sorted_list)
    }
    //    println("big_data_match_info_list...")
    //    big_data_match_info_list.foreach{ p =>
    //      println(p._1)
    //      p._2.foreach(p => print(p+","))
    //      println("")
    //    }


    // List => Map
    val big_data_match_info_map = sc.parallelize(big_data_match_info_list).collectAsMap()


    // 3. 根据预测结果整理  match_list
    //result_list_rdd4: (301925,9_0_0_13-6-7-18-51-8-8-8-11_N_N,3),(301925,9_0_0_13-6-7-18-51-8-8-8-11_N_N,3)
    val final_result_list =  result_list_rdd4.collect().map{p =>
      val items = p._2.split("_")
      val cnt_3 = items(0).toInt
      val cnt_1 = items(1).toInt
      val cnt_0 = items(2).toInt

      val match_id = p._1

      val win_key = match_id+"_3"
      val draw_key = match_id+"_1"
      val loss_key = match_id+"_0"

      // 获取本场比赛的大数据匹配310 依次的 match_id_list
      var cnt_3_match_list: Array[(String,String)] = Array()
      var cnt_1_match_list: Array[(String,String)] = Array()
      var cnt_0_match_list: Array[(String,String)] = Array()

      val res_win =  big_data_match_info_map.get(win_key)
      var result = res_win.orNull
      if (result != null){
        cnt_3_match_list = result
      }

      val res_draw =  big_data_match_info_map.get(draw_key)
      result = res_draw.orNull
      if (result != null){
        cnt_1_match_list = result
      }

      val res_loss =  big_data_match_info_map.get(loss_key)
      result = res_loss.orNull
      if (result != null){
        cnt_0_match_list = result
      }

      // 拼接 match_id_list
      val big_match_id_list_str = get_cnt_ratio(cnt_3, cnt_1, cnt_0, cnt_3_match_list, cnt_1_match_list, cnt_0_match_list)

      (p._1, p._2, p._3, big_match_id_list_str)
    }

    // 获取大数据匹配 === end
    final_result_list
  }


  //2. 拼接大数据分析赛事列表ID
  // 返回值: 大数据分析match_id列表字符串
  // 例如: 1001,1002,1004,1005,1006,1007,2001,2002,2003,2004
  def get_cnt_ratio(cnt_3: Int, cnt_1: Int, cnt_0: Int,
                    cnt_3_match_list: Array[(String,String)],
                    cnt_1_match_list: Array[(String,String)],
                    cnt_0_match_list: Array[(String,String)] ): String ={

    if (cnt_3==0 && cnt_1==0 && cnt_0==0) return ""

    val total = cnt_3 + cnt_1 + cnt_0
    val ratio_3 = get_cnt_round(cnt_3, total)
    var ratio_1 = get_cnt_round(cnt_1, total)
    var ratio_0 = 10 - ratio_3 - ratio_1

    if ((ratio_3+ratio_1)>10) {
      ratio_0 = 0
      ratio_1 = 10 - ratio_3
    }

    // 各个列表去重复按照指定比例获取前N个 match_id
    var match_id_list: Array[(String,String)] = Array()
    if ( ratio_3 != 0 ) match_id_list = match_id_list ++ cnt_3_match_list.distinct.take(ratio_3.toInt)
    if ( ratio_1 != 0 ) match_id_list = match_id_list ++ cnt_1_match_list.distinct.take(ratio_1.toInt)
    if ( ratio_0 != 0 ) match_id_list = match_id_list ++ cnt_0_match_list.distinct.take(ratio_0.toInt)

    //println(ratio_3)
    //println(ratio_1)
    //println(ratio_0)
    //match_id_list.foreach(println)

    // 去重复
    match_id_list = match_id_list.distinct

    // 按照 match_time 排序
    match_id_list = match_id_list.sortWith(_._2.toLong > _._2.toLong)

    // 返回 match_id_list 字符串
    match_id_list.map(p => p._1).mkString(";")
  }

  //val str = get_cnt_ratio(10,5,4, cnt_3_match_list,cnt_1_match_list,cnt_0_match_list)

  def get_cnt_round(cnt: Int, total: Int): Long ={
    ((cnt.toDouble/total)*10).round
  }

//
//  def transation_match_id_list_to_df(match_id_list_str: String, sc: SparkContext): DataFrame ={
//    // 1. Array -> rdd
//    val match_id_list_rdd = sc.parallelize(match_id_list_str.split("_"))
//
//    //2. rdd -> DF
//    import org.apache.spark.sql.Row
//    val new_match_id_list_rdd = match_id_list_rdd.map(p => Row(p))
//
//    import org.apache.spark.sql.types.{StringType, StructField, StructType}
//    val newSchemaString = "sporttery_match_id"
//    val newSchema =
//      StructType(
//        newSchemaString.split(",").map(fieldName => StructField(fieldName, StringType)))
//
//    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
//    val match_id_list_df = sqlContext.createDataFrame(new_match_id_list_rdd, newSchema)
//    match_id_list_df
//  }
//
//



}



object FBE2017001InputFile {


  // 加载 欧赔大文件, 保存临时表 "football_match_europe_odds", 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 欧赔大文件
    //val people = sc.textFile("/data/csv/football_match_europe_odds.csv")
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,match_month,match_week,season_id,league,host_score,away_score,match_result,init_win,init_draw,init_loss,init_time,curr_win,curr_draw,curr_loss,curr_time,win_differ,draw_differ,loss_differ"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }



}

