//
//  欧盘变盘分析:
//
//
//
//cp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar /data/caiqiu/prediction/jar/fb_CFBE2017007001.jar
//
//scp FirstSparkApp/out/artifacts/FirstSparkAppJar/firstsparkapp.jar root@172.16.0.71:/root/fb_CFBE2017007001.jar
//
//
//#TEST
//nohup ./bin/spark-submit --master spark://skn-rqg382b1-spark-master:7077 --class caiqr.model.CFBE2017007001.CFBE2017007001 --jars /usr/local/spark/jars/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /root/fb_CFBE2017007001.jar maxResultSize=6g  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 big_file_1=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_change_all_europe2.csv big_file_2=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_europe_change_increase_2.csv company_ids=1_3 output_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/fb_CFBE2017007001.csv big_file_3=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_500w_4.csv job_id=8632 spark_id=11  > /root/a.log < /dev/null 2>&1  &
//
//
//#ALL
//nohup ./bin/spark-submit --master spark://skn-rqg382b1-spark-master:7077 --class caiqr.model.CFBE2017007001.CFBE2017007001 --jars /usr/local/spark/jars/mysql-connector-java-5.1.35.jar --executor-memory 6G --driver-memory 4G /root/fb_CFBE2017007001.jar maxResultSize=6g  save_db_info=172.16.4.17-prediction-caiqiu-Caiqiu502 big_file_1=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_change_all_europe.csv big_file_2=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_europe_change_increase_2.csv company_ids=1 output_file=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/fb_CFBE2017007001.csv big_file_3=hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_500w_3.csv job_id=8632 spark_id=11  > /root/a.log < /dev/null 2>&1  &
//
//
//
//cd /usr/local/hadoop; bin/hadoop fs -get hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_change_all_europe2.csv ~/
//
//cd /usr/local/hadoop; bin/hadoop fs -get hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/fb_europe_change_increase_2.csv ~/
//
//cd /usr/local/hadoop; bin/hadoop fs -get hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/	fb_CFBE2017007001* ~/
//
//cd /usr/local/hadoop; bin/hadoop fs -get hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_500w_1.csv ~/
//
//cd /usr/local/hadoop; bin/hadoop distcp file:///root/football_match_500w_4.csv hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/csv/football_match_500w_4.csv
//
//cd /usr/local/hadoop; bin/hadoop fs -rmr -skipTrash hdfs://skn-qcqegnt5-hadoop-master:9000/data/caiqiu/result/*
//
//
//
//



// caiqr.model.CFBE2017007001.CFBE2017007001


//欧盘-初盘范围+即时盘范围(标准赔率)+返奖率匹配
package caiqr.model.CFBE2017007001
//import com.redislabs.provider.redis._
import caiqr.utils.{AllFBMatchInputFile, PredictionUtils, PredictionDBUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, DataFrame}
import java.text.SimpleDateFormat
import java.sql.DriverManager
import java.lang.IllegalArgumentException


object CFBE2017007001 {

  def main(args: Array[String]){

    //////////////////////////////// 接收参数 ////////////////////////////////
    // 将参数转换为 Map
    val cmd_list = args.toList
    val cmd = cmd_list.map{p =>
      val items = p.split("=")
      (items(0),items(1))
    }
    val cmd_map = cmd.toMap

    val maxResultSize = cmd_map.get("maxResultSize").getOrElse("4g") //运行时结果集大小: 默认4g
    val job_id = cmd_map.get("job_id").getOrElse("") //Spark prediction-job_id
    val model_id = cmd_map.get("spark_id").getOrElse("") // Spark model-mode_id
    val save_db_info = cmd_map.get("save_db_info").getOrElse("") //DB info
    //    val redis_server = cmd_map.get("redis_server").getOrElse("localhost") //Redis info
    //    val redis_port = cmd_map.get("redis_port").getOrElse("6379").toInt
    val output_file = cmd_map.get("output_file").getOrElse("")

    val all_big_file = cmd_map.get("big_file_1").getOrElse("") //全量变盘文件
    val increase_big_file = cmd_map.get("big_file_2").getOrElse("") //增量变盘文件
    val company_ids = cmd_map.get("company_ids").getOrElse("") //公司ID列表,格式: 1_3
    val fb_match_file = cmd_map.get("big_file_3").getOrElse("") //赛事表,获取 match_time


    if (maxResultSize=="" || save_db_info=="" || job_id=="" || model_id=="" || output_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }
    if (all_big_file=="" || increase_big_file=="" || company_ids=="" || fb_match_file=="") {
      throw new IllegalArgumentException("Spark main args is error. ${args.length}")
    }



    // 1. 环境变量
    val sc = new SparkContext(new SparkConf()
      .setAppName("CFBE2017007001")
      .set("spark.driver.maxResultSize", maxResultSize)
    )
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    import sqlContext.implicits._


    //2. 转换 company_id
    val company_id_list_df = PredictionUtils.transation_company_id_list_to_df(company_ids, sc)



    //3. 读取欧盘变盘数据文件
    //3.1 赛事文件
    //println("match_df...")
    val match_df = AllFBMatchInputFile.load(sc, sqlContext, fb_match_file).selectExpr("match_id as mmatch_id","match_time as mmatch_time")
    //match_df.show


    //3.2 全量数据文件, 指定公司过滤
    val read_all_odds_df = load(sc, sqlContext, all_big_file, ",")
    val src_all_odds_df = read_all_odds_df
      .join(company_id_list_df, read_all_odds_df("company_id") === company_id_list_df("sporttery_company_id"))
      .orderBy("match_id","company_id","odds_time")
      .select("match_id","company_id","odds_time","stand_win","stand_draw","stand_loss", "flag")



    //3.3 增强数据文件, 指定公司过滤
    val read_increase_odds_df = load(sc, sqlContext, increase_big_file, "\t")
    val increase_odds_df = read_increase_odds_df
      .join(company_id_list_df, read_increase_odds_df("company_id") === company_id_list_df("sporttery_company_id"))
      .orderBy("match_id","company_id","odds_time")
      .select("match_id","company_id","odds_time","stand_win","stand_draw","stand_loss", "flag")


    //3.4 全量赔率数据文件合并
    val all_odds_df = (src_all_odds_df union increase_odds_df).orderBy("match_id","company_id","odds_time")



    //4. 时间段赔率清洗
    //4.1 find 给定公司的 match_id
    val rinse_match_df = all_odds_df.groupBy("match_id","company_id").count
    val ready_match_df = rinse_match_df
      .join(match_df, rinse_match_df("match_id") === match_df("mmatch_id"))
      .select("match_id","company_id","mmatch_time")
    //ready_match_df.show


    //过滤全量文件, 只保留有 match_time 的赛事
    //println("new_all_odds_df...")
    val new_all_odds_df = all_odds_df
      .join(match_df, all_odds_df("match_id") === match_df("mmatch_id"))
      .select("match_id","company_id","odds_time","stand_win","stand_draw","stand_loss", "flag")
    //new_all_odds_df.show


    //临场分钟阶段:(4小时内,间隔10分钟,24个点+1<比赛开始时>)
    val mins = sc.parallelize(List(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24)).map(x=>x*10)
    //临场小时阶段:(临场2天内,间隔1小时,48个点)
    val hours = sc.parallelize(List(5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48)).map(x=>x*60)
    val times = mins++hours

//    val mins = sc.parallelize(List(1,2,3,4,5,6,7,8,9)).map(x=>x*10)
//    val hours = sc.parallelize(List(1,2,3,4,5,6,7,8,9)).map(x=>x*60)
//    val times = mins++hours




    //4.2 make 时间段赔率初始数据
    import java.text.SimpleDateFormat
    import java.util.Date
    val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    //ready_match_df.rdd.map(p=>(p(0),p(1),p(2))).cartesian(times).collect.foreach(println)
    val src_time_rdd = ready_match_df.rdd.map(p=>(p(0),p(1),p(2))).cartesian(times).map{p =>
      // p格式:
      //      ((659062,1,2017-06-16 03:00:00),10)
      //      ((659062,1,2017-06-16 03:00:00),20)
      //      ((659062,1,2017-06-16 03:00:00),30)
      val match_id = p._1._1.toString
      val company_id = p._1._2.toString
      val match_time = p._1._3.toString
      val time_diff = p._2.toInt
      val match_time_second = sdf.parse(match_time).getTime //match_time 毫秒数
      val odds_time = sdf.format(new Date((match_time_second-(time_diff)*60*1000)))
      (match_id, company_id, odds_time, "0", "0", "0", "1")
    }
    //println("src_time_rdd...")
    //src_time_rdd.collect.foreach(println)




    //4.3 make数据与全量赔率数据合并,排序(order by odds_time asc)
    println("src_all_time_odds_df...")
    val src_time_df = src_time_rdd.toDF("match_id","company_id","odds_time","stand_win","stand_draw","stand_loss", "flag")
    val src_all_time_odds_df = (new_all_odds_df union src_time_df).orderBy("match_id","company_id","odds_time")
    //src_all_time_odds_df.show



    //4.4 match分组,赔率填充,如果当前赔率无则取上一个时刻赔率
    val group_time_odds_rdd = src_all_time_odds_df.rdd.map(p=>(s"${p(0)}_${p(1)}",(p.getString(2),p.getString(3),p.getString(4),p.getString(5),p.getString(6)))).groupByKey().map { p =>
      val sortArray = p._2.toArray.sortWith(_._1 < _._1)
      (p._1, sortArray)
    }
//    group_time_odds_rdd.collect().foreach{p =>
//      println(p._1)
//      p._2.foreach(println)
//    }

    //IN:
    //    659062_1
    //    (2017-06-15 07:30:42,1.895,3.723,4.907,0)
    //    (2017-06-15 18:00:00,0,0,0,1)
    //    (2017-06-15 20:00:00,0,0,0,1)
    //    (2017-06-15 20:56:49,1.706,3.954,6.214,0)
    //    (2017-06-15 21:00:00,0,0,0,1)
    //    (2017-06-15 22:00:00,0,0,0,1)
    //OUT:
    //    659062_1
    //    (2017-06-15 18:00:00,1.895,3.723,4.907,1)
    //    (2017-06-15 20:00:00,1.895,3.723,4.907,1)
    //    (2017-06-15 21:00:00,1.706,3.954,6.214,1)
    //    (2017-06-15 22:00:00,1.706,3.954,6.214,1)
    val fill_time_odds_rdd = group_time_odds_rdd.map{ p =>
      val a = p._2

      var tmp:(String, String, String, String, String) = ("","","","","")
      val b = for (i <- 0 until a.length)
        yield {
          var data:(String, String, String, String, String) = ("","","","","")
          if( i == 0 ){
            data = a(i)
            tmp = a(i)
          }else if( a(i)._5 == "1" ) {
            //data = ( a(i)._1, a(i-1)._2, a(i-1)._3, a(i-1)._4, "1" )
            data = ( a(i)._1, tmp._2, tmp._3, tmp._4, "1" )
          }
          else if( a(i)._5 == "0" ) {
            data = a(i)
            tmp = a(i)
          }
          data
        }

      val c = b.filter(_._5=="1").map(p=>(p._1,p._2,p._3,p._4))
      (p._1, c)
    }
//    fill_time_odds_rdd.collect().foreach{p =>
//      println(p._1)
//      p._2.foreach(println)
//    }


    //org.apache.spark.rdd.RDD[Array[String]]
    val c = fill_time_odds_rdd.map( p=>p._2.map{ x =>
      val items = p._1.toString.split("_") //519102_1
      Array(items(0),items(1),x._1,x._2,x._3,x._4).mkString(",")
    })

    //    #org.apache.spark.rdd.RDD[String]
    //    #659062,1,2017-06-16 00:40:00,1.729,3.841,6.203
    //    #659062,1,2017-06-17 00:50:00,1.729,3.841,6.203
    //    #659062,1,2017-06-18 00:50:00,1.729,3.841,6.203
    //    #659063,1,2017-06-17 00:50:00,2.729,3.841,6.203
    //    #659063,1,2017-06-18 01:00:00,2.729,3.841,6.203
    //    #659063,1,2017-06-19 01:10:00,2.729,3.841,6.203
    val result_file_rdd = c.map(x => x.mkString("\n"))




    //保存文件
    //PredictionUtils.save_result_to_hdfs(result_file_rdd, s"${output_file}_${company_ids}")
    PredictionUtils.save_result_to_hdfs(result_file_rdd, output_file)



    // spark运行完成,待 sqoop导入DB
    PredictionDBUtils.update_job_spark_status(save_db_info, job_id, model_id)

    sc.stop()
  }





  // 加载大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String, split_str: String): DataFrame ={

    val people = sc.textFile(filename)
    val schemaString = "match_id,company_id,odds_time,win,draw,loss,ret,stand_win,stand_draw,stand_loss,flag"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema = StructType(schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))

    //加工赔率值
    val rowRDD = people.map(_.split(split_str)).map{p =>

      val match_id = p(0).toInt
      val company_id = p(1).toInt
      val odds_time = p(2).toString.substring(0, "2010-09-29 12:50:07".size)
      val win = p(3).toInt
      val draw = p(4).toInt
      val loss = p(5).toInt
      val ret = p(6).toInt

      //.标准赔率=赔率/返奖率
      val stand_win = f"${(win.toDouble/1000)/(ret.toDouble/100000)}%3.3f"
      val stand_draw = f"${(draw.toDouble/1000)/(ret.toDouble/100000)}%3.3f"
      val stand_loss = f"${(loss.toDouble/1000)/(ret.toDouble/100000)}%3.3f"

      Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), stand_win, stand_draw, stand_loss, "0")
    }


    val europe_df = sqlContext.createDataFrame(rowRDD, schema)
    europe_df

  }





}

