package com.darrenchan.spark.rdd

import org.apache.spark.{SparkConf, SparkContext}
import org.codehaus.jackson.map.ObjectMapper

import scala.collection.JavaConversions._
import scala.util.matching.Regex
import org.joda.time.DateTime

object SparkStreamingBaiduTest2 {
    def main(args: Array[String]): Unit = {
        val sparkConf = new SparkConf().setMaster("local[2]").setAppName("WordCountApp")
        val sc = new SparkContext(sparkConf)

        val clusterRegex: Regex = """cluster:\w+""".r
        val pipeletRegex: Regex = """piplet: \d+""".r
        val startPointRegex: Regex = """offset: \d+""".r
        val commandNoRegex = """\"command_no\":\d+""".r
        val logTimeReg: Regex = """\"tm\":\"\d{13}\"""".r

        val rdd = sc.textFile("streaming.txt").filter(row => row.split("""\],"data":""").length > 1).flatMap(row => {
            val cluster = clusterRegex.findFirstIn(row) match {
                case Some(y) => y.substring(8)
                case _ => ""
            }
            val pipelet = pipeletRegex.findFirstIn(row) match {
                case Some(y) => y.substring(8).toInt
                case _ => 0
            }
            val startPoint = startPointRegex.findFirstIn(row) match {
                case Some(y) => y.substring(8).toLong
                case _ => 0l
            }
            val commandNo = commandNoRegex.findFirstIn(row) match {
                case Some(y) => y.substring(13)
                case _ => ""
            }
            val tm = logTimeReg.findFirstIn(row) match {
                case Some(y) =>
                    y.substring(6, 19).toLong
                case _ => 0.toLong
            }
            val logTime = if (tm > 0) {
                new DateTime(tm).toString("yyyyMMdd")
            } else {
                ""
            }

            val dataStr = row.split("""\],"data":""")(1).split(""","logFrom""")(0)
            val dataNode = new ObjectMapper().readTree(s"[${dataStr}]")

            var keyNo = 7

            var idsJsonList: List[String] = dataNode.toList.map(item => {
                try {
                    if (item.has("id")) {
                        println(item.get("id").toString)
//                        val nids = item.get("id").toList.map(row => {
//                            val nid = if(row.has("nid")) row.get("nid").asText() else ""
//                            nid
//                        })

                        val nids = item.get("id").toList.map(row => {
                            row.asText()
                        })
                        nids.mkString(",")
                    } else {
                        ""
                    }
                } catch {
                    case e: Exception => {
                        ""
                    }
                }
            })

            idsJsonList.flatMap(_.split(",")).map(nid => (cluster, pipelet, startPoint, commandNo, logTime, tm, nid, keyNo))
        }).filter(row => row._5 != "" && row._7 != "" && row._7.startsWith("dt")).cache()

        rdd.foreach(println)


        sc.stop()
    }
}
