package caiqr.utils

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.rdd.RDD

object AllAsiaInputFile {

  // 加载 亚盘大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={

    // 1. 加载 亚赔大文件
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,home_match_result,away_match_result,score,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_result,curr_home_result,init_away_result,curr_away_result,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,myear,mmonth,half_result,half_all_result,goal,rqresult_diff,sscore"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    val rowRDD = people.map(_.split("\t")).map{p =>
      val score = p(11).toString
      val home_score = score(0).toInt - '0'
      val away_score = score(1).toInt - '0'
      val rqscore = home_score-away_score // 主队比分 - 客队比分 (用于求出让球胜平负赛果)
      val half_all_result = p(30)+p(9)//半全场赛果



      // 计算竞彩比分赛果
      var new_result = score
      if(home_score > away_score) {
        if(home_score > 5 || away_score > 2){
          new_result = "90"
        }
      }else if(home_score == away_score){
        if(home_score > 3){
          new_result = "99"
        }
      }else{
        if(home_score > 2 || away_score > 5){
          new_result = "09"
        }
      }

      // 计算竞彩总进球(7+)
      var goal = 0
      if(home_score+away_score>6){
        goal = 7
      }else{
        goal = home_score+away_score
      }


      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15),p(16), p(17), p(18), p(19), p(20), p(21), p(22), p(23), p(24), p(25), p(26), p(27), p(28), p(29), p(30), half_all_result.toString, goal.toString, rqscore.toString, new_result)
    }


    val asia_df = sqlContext.createDataFrame(rowRDD, schema)
    //val asia_df = spark.createDataFrame(rowRDD, schema)
    asia_df
  }


  // 加载需要计算的比赛数据文件
  def load_match_file(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }



  // 加载需要计算的比赛数据文件
  // 含有竞彩让球胜平负盘口
  def load_match_file_rangqiu(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,match_time,season_id,match_desc,season_pre,group_pre,host_id,away_id,init_home,init_odds,init_away,curr_home,curr_odds,curr_away,init_home_water,init_away_water,curr_home_water,curr_away_water,init_ret,curr_ret,rq_odds"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9),p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17), p(18), p(19), p(20), p(21)))
    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df
  }



}
