package com.chenjj.bigdata.spark.scala.sparkSaveDemo

import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 测试卸数功能
  * 将数据库文件卸成定长双文件
  *
  * 参考 https://www.cnblogs.com/Gxiaobai/p/10705712.html  -- saveAsHadoopFile
  *
  *   https://www.jianshu.com/p/2509e77d1c7f -- map & mapPartitions
  *
  *   https://www.jianshu.com/p/5e992ae1df1a -- RDD & Pair RDD
  */
object Application {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(this.getClass.getName).setMaster("local[4]")
      .set("spark.testing.memory", "1024000000")
    val sc = new SparkContext(conf)
    val sqlContext = SparkSession.builder().config(conf).getOrCreate().sqlContext
    //
    val df = sqlContext.read.format("com.databricks.spark.csv")
      .option("header","false") //这里如果在csv第一行有属性的话，没有就是"false"
      .option("inferSchema",true.toString)//这是自动推断属性列的数据类型。
      .load("D:\\Code\\Gitee\\bigdata\\spark\\src\\main\\resources\\data.csv")

    df.printSchema()


    df.filter(df("_c0")>5).rdd.map(
      x=>{
        val tmpline = x(0) + "      中文       "  + x(1); //这里将定界文件转换未定长文件
        val line = new String(tmpline.getBytes("UTF-8"),0,tmpline.length,"GBK")  //转换编码
        ("row",line)
      }
    )
    .saveAsHadoopFile("file:///D:/tmp//sparkdata",classOf[String],classOf[String],classOf[CustomOutputFormat])


  }
}
