package org

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.StringType

object ScalaUtil {
   def getDate(args:String):StringBuffer = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("SparkSQLWordCount")
      .getOrCreate()

    val frame: DataFrame = readData(spark,args)
  //  frame.show(10000)



    val unit: Unit = frame.createOrReplaceTempView("people")

    var rdd=spark.sql(" select _c7 as c7,count(1) as count from (select * from  people  where _c1=='S'and (_c7 == 'ICM1' or  _c7 == 'ICM2' or  _c7 == 'ICM3' or  _c7 == 'ICM4')" +
      "and _c9=='WFI101CV011'  and _c12=='fault'  and _c13=='start'  and _c18 like '%-monitoring%')  group by _c7 ").rdd
       var stringBuffer=new StringBuffer();
   val list2=   rdd.collect().map(x=>{
       var tx= x.mkString("===")
       /* val builder: StringBuilder = list.addString(new StringBuilder(tx))
      stringBuilder.addString(builder)*/
        println("tx",tx)
      stringBuffer.append(tx+"       &&&&         ");
     })
   stringBuffer;

}



  def readData(spark: SparkSession, path: String): DataFrame = {

    // 读取数据并将其中的分隔符（不定个数的空格）都转为“，”
   // val tmpRdd = spark.sparkContext.textFile("C:\\Users\\86176\\Desktop\\a.txt").map(_.replaceAll("\\s+", ","))
     val tmpRdd = spark.sparkContext.textFile(path).map(_.replaceAll("\\s+", ","))
   // tmpRdd.foreach(println)
    print("==============================")

    // 将转换过的数据保存到一个临时目录中
    val tmpPathStr = "C:\\tmp1234"
    // 判断此临时目录是否存在，若存在则删除
    val tmpPath: Path = new Path(tmpPathStr)
    val fs: FileSystem = tmpPath.getFileSystem(new Configuration())
    if (fs.exists(tmpPath)) {
      fs.delete(tmpPath, true)
    }
    // 保存
    try {
      tmpRdd.saveAsTextFile(tmpPathStr)
    }catch {
      case ex:Exception=>{
        println(ex.getMessage)
      }
    }

    // 从此临时目录中以CSV方式读取数据
    val df = spark.read.csv(tmpPathStr)
    // 将读取到的数据中的每一列都转为Double类型
    val cols = df.columns.map(f => col(f).cast(StringType))
    val data = df.select(cols: _*)

    data
  }


  def main(args: Array[String]): Unit = {
    val buffer: StringBuffer = getDate(null)

    println(123)
    println("aaa",buffer)
  }


}
