package com.bj58.test

import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
import util.SparkReadUtil

/**
  * Created by 6v on 2018/11/5.
  */
object RoiClickRead {

  val URL = "/home/hdp_lbg_ectech/resultdata/adsp/dp/ods/roi/click/"
  val SPLIT_WORD = '\u0001'
  val SPLIT_LINE = '\n'

  val COLUMN_SEQ = Seq("sid", "psid", "sessionid", "cookieid", "sloc1", "sloc2", "sloc3", "sloc4", "scate1", "scate2"
    , "scate3", "scate4", "scate5", "url", "ip", "imei", "xforward", "ua", "pf", "userid"
    , "utmsrc", "spm", "params", "validst", "infoid", "infotype", "clickid", "clicktime", "clicktag","clicktype"
    , "sourcepage", "abtest")

  val COLUMN_OUTPUT = Seq("cookieid", "sloc1", "sloc2", "sloc3",   "scate1", "scate2", "scate3"
    ,  "ip", "imei",  "ua",  "userid"
     , "infoid", "infotype", "clicktime", "clicktag"
    , "sourcepage", "abtest")


  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("RoiClickRead")
      .master("local[4]")
      .getOrCreate()



    val path = "C:\\Users\\lenovo\\Desktop\\1 (2).txt"
    println(path)
    val data = SparkReadUtil.readFromFile(spark,path,SPLIT_LINE,SPLIT_WORD,column_input =COLUMN_SEQ, COLUMN_OUTPUT)
    data.show(8)
  /*  val rdd = spark.read.textFile(path).rdd.flatMap(_.split(SPLIT_LINE)).map(row => {
      println(row)
      val values = row.split(SPLIT_WORD).toSeq
//      println("====="+values.size)
      Row.fromSeq(values)
    })
    val schema = StructType(COLUMN_SEQ.map(fieldName => StructField(fieldName, StringType)))
    val data = spark.createDataFrame(rdd, schema)
   data.show(5)
    println("count:"+data.count())


    data.select(COLUMN_OUTPUT.head,COLUMN_OUTPUT.tail:_*).show(8)*/

  }


}
