package spark.work

//import breeze.linalg.{max, sum}
//import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import com.databricks.spark.csv.CsvParser
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.types.{DataType, DoubleType}
import org.apache.spark.sql.{Column, Row, SQLContext, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import util.DataSampleUtils

import scala.collection.mutable.HashMap
//import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions.udf

import org.apache.spark.sql.functions.col
/**
  * Created by liuwei on 2017/11/8.
  */
object Sampling {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("RowToColumnTest").setMaster("local[4]")
    val sc = new SparkContext(sparkConf)
    val sqlContext = new SQLContext(sc)
    import sqlContext.implicits
     val df = sqlContext.read.format("com.databricks.spark.csv").option("header","true").option("inferSchema",true.toString).load(("C:\\Users\\lenovo\\Desktop\\AdultCensusIncomeBinaryClassificationdataset.csv")).repartition(6)
    val df2 = df.withColumn("age",typeModifyUdf(col("\uFEFFage"))).drop("\uFEFFage")
    println(df2.schema)



        val  fractions: Map[String, Double]= Map("age:46.0-"-> 0.0, "age:71.0-"-> 0.0, "age:44.0-"-> 0.0, "age:36.0-"-> 0.0, "age:87.0-"-> 0.0, "age:54.0-"-> 0.0, "age:33.0-"-> 0.0, "age:43.0-"-> 0.0, "age:63.0-"-> 0.0, "age:70.0-"-> 0.0, "age:67.0-"-> 0.0, "age:56.0-"-> 0.0, "age:29.0-"-> 0.0, "age:38.0-"-> 0.0, "age:40.0-"-> 0.0, "age:59.0-"-> 0.0, "age:65.0-"-> 0.0, "age:72.0-"-> 0.0, "age:79.0-"-> 0.0, "age:35.0-"-> 0.0, "age:20.0-"-> 0.0, "age:47.0-"-> 0.0, "age:81.0-"-> 0.0, "age:74.0-"-> 0.0, "age:27.0-"-> 0.0, "age:25.0-"-> 0.0, "age:77.0-"-> 0.0, "age:39.0-"-> 0.0, "age:49.0-"-> 0.0, "age:34.0-"-> 0.0, "age:83.0-"-> 0.0, "age:88.0-"-> 0.0, "age:90.0-"-> 0.0, "age:22.0-"-> 0.0, "age:69.0-"-> 0.0, "age:66.0-"-> 0.0, "age:45.0-"-> 0.0, "age:19.0-"-> 0.0, "age:42.0-"-> 0.0, "age:57.0-"-> 0.0, "age:30.0-"-> 0.0, "age:31.0-"-> 0.0, "age:53.0-"-> 0.0, "age:78.0-"-> 0.0, "age:55.0-"-> 0.0, "age:37.0-"-> 0.0, "age:68.0-"-> 0.0, "age:51.0-"-> 0.0, "age:60.0-"-> 0.0, "age:80.0-"-> 0.0, "age:23.0-"-> 0.0, "age:75.0-"-> 0.0, "age:61.0-"-> 0.0, "age:32.0-"-> 0.0, "age:84.0-"-> 0.0, "age:64.0-"-> 0.0, "age:86.0-"-> 0.0, "age:73.0-"-> 0.0, "age:21.0-"-> 0.0, "age:58.0-"-> 0.0, "age:18.0-"-> 0.0, "age:17.0-"-> 0.0, "age:50.0-"-> 0.0, "age:48.0-"-> 0.0, "age:85.0-"-> 0.0, "age:82.0-"-> 0.0, "age:24.0-"-> 0.0, "age:52.0-"-> 0.0, "age:62.0-"-> 0.0, "age:41.0-"-> 0.0, "age:76.0-"-> 0.0, "age:26.0-"-> 0.0, "age:28.0-"-> 0.0)
    val selectColumns = Seq("age")

    val initRdd = df2.rdd.map(row => generateRddKey(selectColumns, row) -> row)

    val (originRdd, otherRDD) = DataSampleUtils.BernoulliSampling(initRdd, fractions, 47L)


    val res = df2.sparkSession.createDataFrame(originRdd.map(_._2), df2.schema)
    println(res.count)

  }


  private def typeModifyUdf = udf((age: Int)=> {
    age.toDouble
  })
  private def generateRddKey(selectColumns: Seq[String], row: Row): String = {
    val titleNames = row.schema.fieldNames.toList.map(row =>{
      if (row.startsWith("\uFEFF")) row.replace("\uFEFF", "") else row
    })
    val result = StringBuilder.newBuilder
    selectColumns.foreach(name => {
      titleNames.indexOf(name) match {
        case -1 => result.append("")
        case i => result.append(s"$name:").append(if (row.isNullAt(i)) "" else row.get(i).toString)
      }
      result.append("-")
    })
    result.toString
  }






}
