package lhb.spark.sparkreduce

import java.sql.Timestamp

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

case  class  alarm (timein:String,timeout:String,
                    tagname:String,almvalue:String, almstatus:String)

object sparkreduce{


    def main(args: Array[String]): Unit = {
      /*val startTime = "2018-12-22 00:00:00"
      val stopTime = "2018-12-23 23:59:59"*/

      val spark = SparkSession.builder().appName("sparkreduce ").getOrCreate()

      import spark.implicits._

      val frame: DataFrame = spark.sparkContext
        .textFile("hdfs://192.168.0.201:8020/spark/data/part-m-0000[0-3]")
        .map(_.split(","))
        .map(attr => alarm(attr(0).trim.toString, attr(1).trim.toString,
          attr(3).trim.toString, attr(5).trim.toString, attr(9).trim.toString))
        .toDF()

      val nonull = frame.where("tagname <> ''")

      val rddnonull: RDD[Row] = nonull.rdd

      val data: RDD[(String, (String, String))] = rddnonull.map {
        t => {
          val key = t.getAs("timein").toString;
          val value1 = t.getAs("timeout").toString;
          val value2 = t.getAs("tagname").toString;
          (key, (value1, value2))
        }
      }

      val timegroup = data
          .reduceByKey((x,y)=>if(Timestamp.valueOf(x._1).getTime() < Timestamp.valueOf(y._1).getTime ) y else x)
          .map(x=>(x._1,x._2._1,x._2._2))//x._1,x._2._1,x._2._2
          .toDF()


      timegroup.write.format("JDBC")
        .option("url", "jdbc:sqlserver://192.168.0.200:1433;DatabaseName=sjk")
        .option("driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver")
        .option("dbtable", "dbo.ALARMGROUP")
        .option("user", "sa")
        .option("password", "123456")
        .option("truncate", "True")
        .mode("overwrite")
        .save()



    }

}
