package spark.work

//import breeze.linalg.{max, sum}
//import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import org.apache.commons.codec.digest.DigestUtils
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SQLContext, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer
//import org.apache.spark.sql.functions._

/**
  * Created by liuwei on 2017/11/8.
  */
object ColumnEncrypt {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("ClearingRepeat").setMaster("local[4]")
    val sc = new SparkContext(sparkConf)
    val sqlContext = new SQLContext(sc)
    val spark = SparkSession.builder

    val ss = spark.getOrCreate();
    val df = ss.createDataFrame(Seq(
//      ("张三", "女", "数学", 60, 15, "张老师"),
      ("afffDD", "sssSSS", "数学", 88, 15, ""),
//      ("张三", "女", "数学", 88, 15, "liu老师"),
      ("", "女", "语文", 70, 12, "李老师"),
      ("张三", "女", "物理", 50, 22, "张老师"),
      ("王二", "男", "数学", 50, 16, "赵老师"),
      ("王二", "男", "语文", 60, 20, "孙老师"),
      ("王二", "男", "历史", 50, 22, "李老师")
    )).toDF("name", "sex", "科目", "成绩", "排名", "监考老师")
    val titleNames = Seq("name", "sex", "科目", "成绩", "排名", "监考老师")
    val columnNames = Seq( "成绩", "监考老师")
    val upperOrLowers =  Seq("upCase", "upCase")
    val encryptTypes =  Seq("MD5", "MD5")
    val retainOldColumn = true

//    val outputRDD = df.rdd.map(row => {
//      val rowArr = row.toSeq.toArray
//      columnNames.indices.foreach(index => {
//        val idx = titleNames.indexOf(columnNames(index))
//        var cell = if (rowArr(idx) == null) "" else rowArr(idx).toString
//        val upperOeLower = upperOrLowers(index)
//        val encryptType = encryptTypes(index)
//        cell = encryptType match {
//          case "MD5" => DigestUtils.md5Hex(cell)
//          case _ => DigestUtils.sha1Hex(cell)
//        }
//        val rowStr = upperOeLower match {
//          case "upCase" => cell.toUpperCase
//          case _ => cell.toLowerCase
//        }
//        rowArr(idx) = rowStr
//      })
//      Row.fromSeq(rowArr.toSeq)
//    })

      val outputRDD = df.rdd.map(row => {
        var rowArr = row.toSeq
        columnNames.indices.foreach(index => {
          val idx = titleNames.indexOf(columnNames(index))
          var cell = if (rowArr(idx) == null) "" else rowArr(idx).toString
          val upperOeLower = upperOrLowers(index)
          val encryptType = encryptTypes(index)
          cell = encryptType match {
            case "MD5" => DigestUtils.md5Hex(cell)
            case _ => DigestUtils.sha1Hex(cell)
          }
          val rowStr = upperOeLower match {
            case "upCase" => cell.toUpperCase
            case _ => cell.toLowerCase
          }
          rowArr = rowArr :+ rowStr
        })
        Row.fromSeq(rowArr)
      })

    var newSchema = df.schema
     df.schema.foreach(schema => {
      if (columnNames.indexOf(schema.name) >= 0)
        newSchema = newSchema.add(StructField(schema.name + "_" + encryptTypes(columnNames.indexOf(schema.name)), StringType, true))
    })
    var outputDf = ss.createDataFrame(outputRDD, newSchema)
    if(!retainOldColumn) outputDf = outputDf.drop(columnNames:_*)

    println(newSchema)
    outputDf.show(true)
    //    println(groupByCondition)
  }







}
