package cn.sheep.dmp.etl

import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Sheep.Old @ 64341393
  * Created 2018/3/28
  */
object Sql2Parquet1 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("日志转parquet文件")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") // RDD

    val sc = new SparkContext(sparkConf)
    val sqlc = new SQLContext(sc)

    val parquet: DataFrame = sqlc.read.parquet("parquet")
    //获取字段


    //REQUESTMODE	PROCESSNODE	ISEFFECTIVE	ISBILLING	ISBID	ISWIN	ADORDERID

    parquet.map(row => {
      val rmode = row.getAs[Int]("requestmode")
      val pnode = row.getAs[Int]("processnode")
      val eff = row.getAs[Int]("iseffective")
      val isbl = row.getAs[String]("isbilling")
      val isdd = row.getAs[String]("isbid")
      val iswin = row.getAs[String]("iswin")
      val adoid = row.getAs[String]("adorderid")


      val reqresult = if (rmode == 1) {
        if (pnode == 1) List[Double](1,0,0)
        else if (pnode == 2) List[Double](1,1,0)
        else if (pnode == 3) List[Double](1,1,1)
        else List[Double](0,0,0)
      }else List[Double](0,0,0)

      
      
    })


    sc.stop()
  }

}



