package test

import com.sun.org.apache.xalan.internal.xsltc.compiler.util.IntType
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable

/**
  * Created by liuwei on 2017/7/7.
  */
object TestRead {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("LDATest").setMaster("local[8]")
    val sc = new SparkContext(sparkConf)
    val spark = SparkSession.builder.getOrCreate()

    val hashMap = new mutable.HashMap[Int,String]


    val hm = sc.textFile("data/mllib/C0_segfeatures.txt").map( x => x.split(",")).map( x =>
      hashMap.put(x(0).replaceAll("\"","").toInt,x(1).replaceAll("\"",""))
    )

    println(hashMap+"====")

    val rdd = sc.textFile("data/mllib/C0_segfeatures.txt").map( x => x.split(",")).map( x =>
      Row(x(0).replaceAll("\"",""),x(1).replaceAll("\"",""))
    )

    rdd

    //set schema structure
    val schema = StructType(
      Seq(
        StructField("index",StringType,true)
        ,StructField("word",StringType,true)
      )
    )

   val resultUDF = udf((termIndices: mutable.WrappedArray[Integer]) => {//处理第二列输出
      termIndices.map( hashMap.get(_))
    })


    val dataset = spark.createDataFrame(rdd,schema)
    dataset.show(false)

//    dataset.select("column",_)
    val arr = Array(54,34,33,7,44,15)

    println(dataset.schema)


  }

}
