package edu.csl.study.spark.basic

import java.text.SimpleDateFormat
import java.util.Date

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit, InputSplit, TextInputFormat}
import org.apache.spark.rdd.{HadoopRDD, RDD}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 日志分析：
 * （1）GBK编码的文件，textFile默认使用UTF-8编码会乱码，所以要转换处理。
 *
 */
object Core_LogAnalysis_sinosoft {
  /***
   * 读取GBK的RDD
   * @param sc
   * @param path
   * @return
   */
  def transfer(sc: SparkContext, path: String,encoding:String ,minPartutuibs:Int): RDD[String] = {
    sc.hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartutuibs)
      .map(p => new String(p._2.getBytes, 0, p._2.getLength, encoding))
  }

  /**
   * RDD[文件名,行内容，1]
   * @param sc       spark context 上下文
   * @param path     文件读取路径
   * @param encoding 文件编码
   * @return （文件路径名，文件行内容,1）
   */
  def loadFileToRdd(sc: SparkContext, path: String, encoding: String = "GBK"): RDD[(String, String, Int)] = {
    sc.hadoopFile[LongWritable, Text, TextInputFormat](path)
      .asInstanceOf[HadoopRDD[LongWritable, Text]]
      .mapPartitionsWithInputSplit((inputSplit: InputSplit, iterator: Iterator[(LongWritable, Text)]) => {
        val file = inputSplit.asInstanceOf[FileSplit]
        iterator.filter(x => x._2 != null).map(x => {
          (file.getPath.getName, new String(x._2.getBytes, 0, x._2.getLength, encoding), 1)
        })
      })
  }

  /**
   * 获取上下文
   *
   * @return
   */
  def newContext:SparkContext={
    //注意：线程数不能设置为1，必须大于1 .因为必须有一个线程接受数据，其他线程处理数据
    val conf:SparkConf = new SparkConf().setAppName("LogAnalysis").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("warn")
    sc
  }
  val rootDir = System.getProperty("user.dir")+ "\\testFile\\"
  def main(args: Array[String]): Unit = {
      // 1.配置信息
      val sc = newContext
      //2.数据输入 :
      //SPARK 常用的textFile方法默认是写死了读UTF－8格式的文件,读取GBK文件会存在中文乱码
      //
      val rdd:RDD[String] = transfer(sc,rootDir+"backend-car_20200427182652_out.log","UTF-8",1)
       println( "原始行："+rdd.count())
      val regex_user_Str = "\\[用户：\\d*\\]";
      val userurl = rdd.filter(item =>{
          item .matches(".*"+regex_user_Str+".*")
      })
      println( "根据用户过滤的行："+userurl.count())

      //4.数据输出
      //userurl.coalesce(1).saveAsTextFile(rootDir+"company"+datetime+".log")


      val pattern = regex_user_Str.r
      val user_str:RDD[(String,Int)] =  userurl.map(item =>{
      ((pattern findFirstIn item).getOrElse(""),1)
      })
      val count:RDD[(String,Int)] = user_str.reduceByKey(_+_).sortBy(_._2)
      val  datetime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date())
      //count.coalesce(1).saveAsTextFile(rootDir+"41_company_count"+datetime+".log")
      val pattern2 = "\"type\":\"\\d{4}_n\"".r
    val userurl2= rdd.filter(item =>{
      (item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:48.*")
        ||item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:61.*")
        )
    }).map(item =>{
      ((pattern2 findFirstIn item).getOrElse("") + " "+item)
    })
    //userurl2.coalesce(1).saveAsTextFile(rootDir+"22_EdiInterfaceServlet"+datetime+".log")

      val userurl3 = rdd.filter(item =>{
        (item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:48.*")
             ||item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:61.*")
             || item .matches(".*\\[SinoExcutor_\\d*\\].*"))
      })

      val userurl4 = userurl3.map(item =>{
        ((pattern2 findFirstIn item).getOrElse("") + " "+item)
      })
      //userurl4.coalesce(1).saveAsTextFile(rootDir+"22_new_EdiInterfaceServlet"+datetime+".log")

      val pattern3 = "\\[http-nio-18020-exec-\\d{1,3}\\]".r
      val userurl5 = rdd.filter(item =>{
      (item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:48.*"))
      }).map(item => ((pattern3 findFirstIn(item)).getOrElse(""),1)).reduceByKey(_+_)
       //userurl5.coalesce(1).saveAsTextFile(rootDir+"22_input"+datetime+".log")
      val userurl6 = rdd.filter(item =>{
      (item .matches(".*i\\.s\\.base\\.EdiInterfaceServlet:61.*"))
      }).map(item => ((pattern3 findFirstIn(item)).getOrElse(""),1)).reduceByKey(_+_)
       //userurl6.coalesce(1).saveAsTextFile(rootDir+"22_output"+datetime+".log")



    val pattern7= "耗时：\\d*".r
    val costRdd:RDD[Double] = rdd.map(item =>{
      ((pattern7 findFirstIn item).getOrElse("").substring(3) .toDouble)
    })



    val sum = costRdd.sum.toDouble
    val size = costRdd.count()
    println("总数："+size)
    println("最小："+costRdd.min())
    println("最大："+costRdd.max())
    println("均值："+sum/size)

    val count1 = costRdd.filter(_ > 20000).count()
    println("大于20："+count1 +"  "+count1* 1.0/size * 100.0+"%")
    val  count2 = costRdd.filter(item =>{item > 10000 && item<= 20000}).count()
    println("10到20："+count2+"  "+count2 * 1.0/size* 100.0+"%")
    val  count3 = costRdd.filter(item =>{item > 5000 && item<= 10000}).count()
    println("5到10："+count3+"  "+count3* 1.0/size* 100.0+"%")
    val  count4 = costRdd.filter(_ <= 5000).count()
    println("小于等于5："+count4+"  "+count4* 1.0/size* 100.0+"%")
    val  count5 = costRdd.filter(_ <= 3000).count()
    println("小于等于3："+count5+"  "+count5* 1.0/size* 100.0+"%")
    val  count6 = costRdd.filter(_ <= 2000).count()
    println("小于等于2："+count6+"  "+count6* 1.0/size* 100.0+"%")
    val  count7 = costRdd.filter(_ <= 1000).count()
    println("小于等于1："+count7+"  "+count7* 1.0/size* 100.0+"%")

    //关闭上下文
      sc.stop()
  }


}
