package com.owen.spark.file

import org.apache.spark.{SparkConf, SparkContext}

object FileStatistic {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(s"${FileStatistic.getClass.getSimpleName}")
    val sc = new SparkContext(conf)

    val mainFile = sc.textFile("G:/a/1.csv")
    val otherFile = sc.textFile("G:/a/2.csv")

    val main = mainFile.map(x => {
      val strings = x.split(",")
//       println("====="+strings.length.toString)
//       println("====="+x)
      if(strings.length<=7){
        (strings(4), (strings(1), strings(2), strings(3), strings(5), strings(6), "0", "0"))
      }else{
        (strings(4), (strings(1), strings(2), strings(3), strings(5), strings(6), strings(7), strings(8)))
      }
    })
    val outher = otherFile.map(x => {
      val strings = x.split(",")
//      println(strings.length)
//      println(x)
      (strings(1), (strings(3), strings(4),strings(5)))
    })

    main.join(outher).map({case(phone,((name,potision,username,staus,createTime,lan,lng),(r,p,t)))=>{
      name+",\""+potision.replace(";",",")+"\","+username+","+phone+","+staus+","+createTime+","+lan+","+lng+","+r+",\""+p.replace(";",",")+"\","+t
    }}).repartition(1).saveAsTextFile("G:/a/data")

  }

}
