package com.chenjj.bigdata.sparksql.scala

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.datasources.csv.MultiLineCSVDataSource

import java.io.InputStream

object CsvReader {
  def main(args: Array[String]): Unit = {
    // Configure Spark application name
    val spark = SparkSession
      .builder()
      .appName("CsvReader")
      .getOrCreate()
    import spark.implicits._


    val sc = spark.sparkContext

    val csvRdd = spark.read.
      option("encoding", "gbk").
      option("multiline",true).
      option("header", "false").
      option("quote", "\"").
      option("delimiter", "|").
      option("nullValue","").
      csv("/tmp/deposit_trans4spark.txt").rdd

    csvRdd.cache()

    csvRdd.count()

    //去掉换行符
    csvRdd.map( x =>{
      var line = new String(x.toString().getBytes("gbk"),"gbk") // 转码
      line = line.replace(",","\001") //替换分隔符
      line = line.replace("\n","<br>")
      line
    })



    sc.hadoopFile("/tmp/xxx", classOf[TextInputFormat], classOf[LongWritable], classOf[Text])
      .map(x =>{
        var line = new String(x._2.getBytes, "gbk")
        line
      }).repartition(5)
      .saveAsTextFile("/tmp/bbbb")

    org.apache.spark.util.ChildFirstURLClassLoader
    csvRdd.foreach(x=>{
      MultiLineCSVDataSource
      print(x.mkString("|||"))
    })
  }
}
