package caiqr.utils

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.rdd.RDD

object AllAsiaInputFile {

  // 加载 亚盘大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,home_match_result,away_match_result,score,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_result,curr_home_result,init_away_result,curr_away_result,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,myear,mmonth,half_result,half_all_result,goal,rqresult_diff,sscore"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    val rowRDD = people.map(_.split("\t")).map{p =>
      val score = p(11).toString
      val home_score = score(0).toInt - '0'
      val away_score = score(1).toInt - '0'
      val rqscore = home_score-away_score // 主队比分 - 客队比分 (用于求出让球胜平负赛果)
      val half_all_result = p(30)+p(9)//半全场赛果



      // 计算竞彩比分赛果
      var new_result = score
      if(home_score > away_score) {
        if(home_score > 5 || away_score > 2){
          new_result = "90"
        }
      }else if(home_score == away_score){
        if(home_score > 3){
          new_result = "99"
        }
      }else{
        if(home_score > 2 || away_score > 5){
          new_result = "09"
        }
      }

      // 计算竞彩总进球(7+)
      var goal = 0
      if(home_score+away_score>6){
        goal = 7
      }else{
        goal = home_score+away_score
      }


      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15),p(16), p(17), p(18), p(19), p(20), p(21), p(22), p(23), p(24), p(25), p(26), p(27), p(28), p(29), p(30), half_all_result.toString, goal.toString, rqscore.toString, new_result)
    }


    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    //val asia_df = spark.createDataFrame(rowRDD, schema)
    asia_df
  }


  // 加载需要计算的比赛数据文件
  def load_match_file(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }



  // 加载需要计算的比赛数据文件
  // 含有竞彩让球胜平负盘口
  def load_match_file_rangqiu(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,rq_odds"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20), p(21)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }





  // 加载需要计算的比赛数据文件
  // 可以增加需要的字段
  def load_add_match_file(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,month"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map{p =>
      val match_time = p(2).toString
      val items = match_time.split("-")
      val month = items(1).toInt
      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20), month.toString)}
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }







  def load_asia_and_half_score(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,home_match_result,away_match_result,score,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_result,curr_home_result,init_away_result,curr_away_result,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,myear,mmonth,half_result,half_all_result,goal,rqresult_diff,sscore,host_half_score,away_half_score"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map{p =>
      val score = p(11).toString
      val home_score = score(0).toInt - '0'
      val away_score = score(1).toInt - '0'
      val rqscore = home_score-away_score // 主队比分 - 客队比分 (用于求出让球胜平负赛果)
    val half_all_result = p(30)+p(9)//半全场赛果
    // 计算竞彩比分赛果
    var new_result = score
      if(home_score > away_score) {
        if(home_score > 5 || away_score > 2){
          new_result = "90"
        }
      }else if(home_score == away_score){
        if(home_score > 3){
          new_result = "99"
        }
      }else{
        if(home_score > 2 || away_score > 5){
          new_result = "09"
        }
      }
      // 计算竞彩总进球(7+)
      var goal = 0
      if(home_score+away_score>6){
        goal = 7
      }else{
        goal = home_score+away_score
      }
      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15),p(16), p(17), p(18), p(19), p(20), p(21), p(22), p(23), p(24), p(25), p(26), p(27), p(28), p(29), p(30), half_all_result.toString, goal.toString, rqscore.toString, new_result,p(31),p(32))
    }
    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    //val asia_df = spark.createDataFrame(rowRDD, schema)
    asia_df
  }





  // 加载 篮球让分盘大文件, 返回 DF.
  def load_bk_asia(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,host_id,away_id,match_time,match_year,match_month,season_id,league,host_score,away_score,match_result,init_home_result,curr_home_result,init_win,init_odds,init_loss,curr_win,curr_odds,curr_loss,total_score,final_score_diff"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    val rowRDD = people.map(_.split("\t")).map{p =>

      //1. 计算胜分差
      val home_score = p(9).toInt
      val away_score = p(10).toInt
      val diff_score = home_score - away_score
      val abs_diff_score = diff_score.abs

      //主胜
      //1-5		6-10	11-15	16-20	21-25	26+
      //301,	306,	311,	316,	321,	326
      //
      //客胜
      //1-5		6-10	11-15	16-20	21-25	26+
      //001,	006,	011,	016,	021,	026
      var diff = ""
      if (abs_diff_score>=0 && abs_diff_score<=5){
        diff = "01"
      }else if (abs_diff_score<=10){
        diff = "06"
      }
      else if (abs_diff_score<=15){
        diff = "11"
      }
      else if (abs_diff_score<=20){
        diff = "16"
      }
      else if (abs_diff_score<=25){
        diff = "21"
      }
      else{
        diff = "26"
      }

      var final_score_diff = ""
      if (diff_score>0){
        final_score_diff = "3"+diff
      }else{
        final_score_diff = "0"+diff
      }

      //2. 大小分
      val total_score = home_score+away_score

      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15),p(16), p(17), p(18), p(19), total_score.toString, final_score_diff.toString)
    }

    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    asia_df
  }





  // 加载 篮球总分盘大文件, 返回 DF.
  def load_bk_over_under(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,host_id,away_id,match_time,match_year,match_month,season_id,league,host_score,away_score,match_result,init_home_result,curr_home_result,init_win,init_odds,init_loss,curr_win,curr_odds,curr_loss,final_score_diff"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    val rowRDD = people.map(_.split("\t")).map{p =>

      //1. 计算胜分差
      val home_score = p(9).toInt
      val away_score = p(10).toInt
      val diff_score = home_score - away_score
      val abs_diff_score = diff_score.abs

      //主胜
      //1-5		6-10	11-15	16-20	21-25	26+
      //301,	306,	311,	316,	321,	326
      //
      //客胜
      //1-5		6-10	11-15	16-20	21-25	26+
      //001,	006,	011,	016,	021,	026
      var diff = ""
      if (abs_diff_score>=0 && abs_diff_score<=5){
        diff = "01"
      }else if (abs_diff_score<=10){
        diff = "06"
      }
      else if (abs_diff_score<=15){
        diff = "11"
      }
      else if (abs_diff_score<=20){
        diff = "16"
      }
      else if (abs_diff_score<=25){
        diff = "21"
      }
      else{
        diff = "26"
      }

      var final_score_diff = ""
      if (diff_score>0){
        final_score_diff = "3"+diff
      }else{
        final_score_diff = "0"+diff
      }

      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15),p(16), p(17), p(18), p(19), final_score_diff.toString)
    }

    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    asia_df
  }








  // 加载需要计算的比赛数据文件
  // 篮球, 让分盘盘口数据
  def load_match_file_bk_all_asia(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,host_id,away_id,match_time,match_year,match_month,season_id,league,init_win,init_odds,init_loss,curr_win,curr_odds,curr_loss"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }




  // 加载需要计算的比赛数据文件
  // 篮球, 总分盘口数据
  def load_match_file_bk_all_over_under(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,host_id,away_id,match_time,match_year,match_month,season_id,league,init_win,init_odds,init_loss,curr_win,curr_odds,curr_loss"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }


}
