package com.bj58.test

import org.apache.spark.sql.SparkSession
import util.SparkReadUtil
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.functions.expr

/**
  * Created by 6v on 2018/11/5.
  */
object BeeHiveRead {

  val URL = "/home/hdp_lbg_ectech/resultdata/adsp/dp/ods/roi/click/"
  val SPLIT_WORD = '\u0001'
  val SPLIT_LINE = '\n'

  val COLUMN_SEQ = Seq("uid","ts","os","usertype","adplaceid","taskid","tasktype","actiontype","platform")

  val COLUMN_OUTPUT = Seq("uid","ts","os","usertype","adplaceid","taskid","tasktype","actiontype","platform")


  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("BeeHiveRead")
      .master("local[1]")
      .getOrCreate()



    val path = "C:\\Users\\lenovo\\Desktop\\3.txt"
    println(path)
    var data = SparkReadUtil.readFromFileTest(spark,path,SPLIT_LINE,SPLIT_WORD,column_input =COLUMN_SEQ, COLUMN_OUTPUT)
    data = data.withColumn("uid",expr("split(uid,',')[1] "))
    data.show(8)
  /*  val rdd = spark.read.textFile(path).rdd.flatMap(_.split(SPLIT_LINE)).map(row => {
      println(row)
      val values = row.split(SPLIT_WORD).toSeq
//      println("====="+values.size)
      Row.fromSeq(values)
    })
    val schema = StructType(COLUMN_SEQ.map(fieldName => StructField(fieldName, StringType)))
    val data = spark.createDataFrame(rdd, schema)
   data.show(5)
    println("count:"+data.count())


    data.select(COLUMN_OUTPUT.head,COLUMN_OUTPUT.tail:_*).show(8)*/

  }


}
