package caiqr.model.fb_asia_water

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException

object AsiaWaterUtils {


  def calculate_asia_init_curr_range_data(asia_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>
      //      match_id,company_id,match_time,home_match_result,curr_result, 0-4
      //      init_odds,curr_odds,init_home_water,init_away_water,curr_home_water,curr_away_water 5-10
      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      //company_id__init_home_water__init_odds__init_away_water__curr_home_water__curr_odds__curr_home_water, ((match_result,match_time), (match_id,match_time), curr_result)
      (s"${p.getString(1)}_${p.getString(7)}_${p.getString(5)}_${p.getString(8)}_${p.getString(9)}_${p.getString(6)}_${p.getString(10)}",((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(4)) )
    }
    //tuple_same_init_odds_rdd.collect().foreach(println) //TODO


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val init_home = keys(1)
      val init_odds = keys(2)
      val init_away = keys(3)
      val curr_home = keys(4)
      val curr_odds = keys(5)
      val curr_away = keys(6)
      val result_size = p._2._1.toString.length.toInt

      s"${company_id},${init_home},${init_odds},${init_away},${curr_home},${curr_odds},${curr_away},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,binit_home,binit_odds,binit_away,bcurr_home,bcurr_odds,bcurr_away,match_ids,results,match_times,lresults,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }


  //计算亚盘初盘 盘口+水位结果集
  def calculate_asia_init_range_data(asia_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    //tuple_same_init_odds_rdd.collect().foreach(println)
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>
      //      match_id,company_id,match_time,home_match_result,curr_result, 0-4
      //      init_odds,curr_odds,init_home_water,init_away_water,curr_home_water,curr_away_water 5-10
      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      //company_id__init_home_water__init_odds__init_away_water, ((match_result,match_time), (match_id,match_time), curr_result)
      (s"${p.getString(1)}_${p.getString(7)}_${p.getString(5)}_${p.getString(8)}",((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(4)) )
    }



    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val init_home = keys(1)
      val init_odds = keys(2)
      val init_away = keys(3)
      val result_size = p._2._1.toString.length.toInt

      s"${company_id},${init_home},${init_odds},${init_away},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,binit_home,binit_odds,binit_away,match_ids,results,match_times,lresults,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }



  //计算亚盘终盘 盘口+水位结果集
  def calculate_asia_curr_range_data(asia_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>
      //      match_id,company_id,match_time,home_match_result,curr_result, 0-4
      //      init_odds,curr_odds,init_home_water,init_away_water,curr_home_water,curr_away_water 5-10
      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      //company_id__curr_home_water__curr_odds__curr_home_water, ((match_result,match_time), (match_id,match_time), curr_result)
      (s"${p.getString(1)}_${p.getString(9)}_${p.getString(6)}_${p.getString(10)}",((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString()), p.getString(4)) )
    }



    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
      (p._1, (p._2.map(p => p._1._1).reduce(_+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_), p._2.map(p => p._3).reduce(_+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val curr_home = keys(1)
      val curr_odds = keys(2)
      val curr_away = keys(3)
      val result_size = p._2._1.toString.length.toInt

      s"${company_id},${curr_home},${curr_odds},${curr_away},${p._2._1},${p._2._2},${p._2._3},${p._2._4},${result_size}"
    }


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,bcurr_home,bcurr_odds,bcurr_away,match_ids,results,match_times,lresults,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }









  def calculate_asia_init_curr_range_rq_data(asia_df: DataFrame, sqlContext: SQLContext): DataFrame ={
    //3. 初盘-终盘范围统计
    // 转换为元祖
    // OUT:
    // (公司ID_初盘范围_终盘范围, ((赛果,赛事时间),(赛事ID,赛事时间), 比分))
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tuple_same_init_odds_rdd = asia_df.rdd.map{p =>
      //      match_id,company_id,match_time,rqresult_diff,0-3
      //      init_odds,curr_odds,init_home_water,init_away_water,curr_home_water,curr_away_water 4-9
      val match_time = p.getString(2)
      val match_time_second = sdf.parse(match_time).getTime

      //company_id__init_home_water__init_odds__init_away_water__curr_home_water__curr_odds__curr_home_water, ((rqresult_diff,match_time), (match_id,match_time))
      (s"${p.getString(1)}_${p.getString(6)}_${p.getString(4)}_${p.getString(7)}_${p.getString(8)}_${p.getString(5)}_${p.getString(9)}",((p.getString(3),match_time_second.toString()), (p.getString(0),match_time_second.toString())) )
    }
    //tuple_same_init_odds_rdd.collect().take(10).foreach(println) //TODO


    //4. 分组(groupByKey), 并将分组后keys值, 按照序号排序(保证结果集按照顺序)
    //OUT: (301925_487_1010_7500_16000,((3,0),(3,1),(3,2),...))
    val new_tuple_same_init_odds_index_order_rdd = tuple_same_init_odds_rdd.groupByKey().map{ p =>
      val sortArray = p._2.toArray.sortWith(_._1._2 < _._1._2)
      (p._1, sortArray)
    }
    //            new_tuple_same_init_odds_index_order_rdd.collect().foreach{p =>
    //              println(p._1)
    //              p._2.foreach(println)
    //            }


    //5. reduce结果集
    //OUT:
    //(472_1700_3100_4200_1200_4300_13000,(310,321925_331925_305925,2011-02-20 01:15:00_2011-01-13 04:45:00_2010-11-11 05:00:00, 33_10_21))
    val new_tuple_same_init_odds_rdd = new_tuple_same_init_odds_index_order_rdd.map(p =>
//      (p._1, (p._2.map(p => p._1._1).reduce(_+"_"+_), p._2.map(p => p._2._1).reduce(_+"_"+_), p._2.map(p => p._2._2).reduce(_+"_"+_)))
      (p._1, (p._2.map(p => p._1._1).reduce(_+"_"+_), p._2.map(p => p._2._1).reduce(_+"_"+_)))
    )
    //new_tuple_same_init_odds_rdd.collect().take(10).foreach(println)


    //6. 汇总最终结果,保存 hdfs
    //OUT:
    //(472,1700,3100,4200,1200,4300,13000,33,215024_214933,1255543200000_1255545900000,33_10_12)
    //(公司ID,初盘范围,终盘范围, 赛果序列,赛事ID序列,开赛时间序列,比分序列)
    val same_init_odds_map_rdd = new_tuple_same_init_odds_rdd.map { p =>
      val keys = p._1.split("_")
      val company_id = keys(0)
      val init_home = keys(1)
      val init_odds = keys(2)
      val init_away = keys(3)
      val curr_home = keys(4)
      val curr_odds = keys(5)
      val curr_away = keys(6)
      val result_size = p._2._1.toString.split("_").length.toInt

//      s"${company_id},${init_home},${init_odds},${init_away},${curr_home},${curr_odds},${curr_away},${p._2._1},${p._2._2},${p._2._3},${result_size}"
      s"${company_id},${init_home},${init_odds},${init_away},${curr_home},${curr_odds},${curr_away},${p._2._1},${p._2._2},${result_size}"
    }
    //same_init_odds_map_rdd.collect().take(10).foreach(println)


    // 7. 将结果集保存为DF, 返回
    // 不再保存 hdfs, 用时: 2.6 min
    val schemaString = "bcompany_id,binit_home,binit_odds,binit_away,bcurr_home,bcurr_odds,bcurr_away,rqresult_diff,match_ids,result_size"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = same_init_odds_map_rdd.map(_.split(",")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9)))
    val all_asia_range_df = sqlContext.createDataFrame(rowRDD, schema)
    return all_asia_range_df
  }



}

