//亚盘大数据文件加载 From HDFS
package caiqr.model.fb_asia_odds

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}

/**
 * Created by zuojianhua on 15/6/19.
 * 从 HDFS 总获取大文件.
 */
object AsiaInitOrCurrInputFile {

  var asia_df: DataFrame = null

  // 加载 欧赔大文件, 返回 DF.
  def load(sc: SparkContext, sqlContext: SQLContext, filename: String): DataFrame ={
    val people = sc.textFile(filename)
    val schemaString = "match_id,season_id,season_name_pre,season_pre,group_pre,match_time,match_year,match_month,match_week,league,host_score,away_score,match_result,company_id,init_home,init_odds,init_away,init_time,curr_home,curr_odds,curr_away,curr_time,init_home_range,init_away_range,curr_home_range,curr_away_range,asia_init_win_flag,asia_curr_win_flag,asia_change"
    import org.apache.spark.sql.Row
    import org.apache.spark.sql.types.{StringType, StructField, StructType}
    val schema =
      StructType(
        schemaString.split(",").map(fieldName => StructField(fieldName, StringType, nullable = true)))
    val rowRDD = people.map(_.split("\t")).map(p => Row(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), p(9), p(10), p(11), p(12), p(13), p(14), p(15), p(16), p(17),p(18),p(19),p(20), p(21), p(22), p(23), p(24), p(25), p(26), p(27),p(28)))
    asia_df = sqlContext.createDataFrame(rowRDD, schema)
    asia_df
  }


}
