package cn.cihon.stream.wordcount

import java.util

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer

/**
  * Created by eeexiu on 16-12-28.
  */
object OfflinePart {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("wordcount").setMaster("local[2]")
    //val conf = new SparkConf().setAppName("wordcount").setMaster("local[2]")
    val sc = new SparkContext(conf)
    //val textRDD = sc.textFile("/var/testreadlocal")
    val textRDD = sc.textFile("data/wordcount/offset")
    val loadDataSize = textRDD.collect().size

    if(loadDataSize < 3 && loadDataSize > 0){
      val listOffsetBuff = new ListBuffer[OffsetMess]
      textRDD.collect().foreach{f =>
        val str = f.split("\t")
        val offset = OffsetMess(str(0),str(1).toInt,str(2).toLong)
        listOffsetBuff += offset
      }
      val listOffset = listOffsetBuff.toList
      val loadDataRDD = sc.parallelize(listOffset)
    }

    if(loadDataSize == 0){
      println("******hello********world")
    }

    //println("********"+loadDataRDD.collect().size)

/**
    if(textRDD.collect().size < 3){
      val offsetMessRDD = textRDD.map{f =>
        val str = f.split("\t")
        val offset = OffsetMess(str(0),str(1).toInt,str(2).toLong)
        offset
      }
      val getLastOffsetRDD = sc.textFile("data/wordcount/part-00001")
      val lastOffsetMessRDD = getLastOffsetRDD.map{f =>
        val str = f.split("\t")
        val offset = OffsetMess(str(0),str(1).toInt,str(2).toLong)
        offset
      }
      val allRDD = offsetMessRDD.union(lastOffsetMessRDD)
      val groupKeyRDD = allRDD.map(f => (f.partition,f))
      val collectRDD  = groupKeyRDD.groupByKey
      val valuesRDD = collectRDD.mapValues{f =>
        val list = f.toList
        val topicAndOffset = list match {
          case i if i.size == 2 => getMaxOffset(i)
          case _ => (list(0).topicName,list(0).offset)
        }
        topicAndOffset
      }
      val resultRDD = valuesRDD.map{f =>
        f._2._1+"\t"+f._1+"\t"+f._2._2
      }
      val offsetResultRDD = resultRDD.cache()
      DeletePath("data/wordcount/offset/")
      offsetResultRDD.saveAsTextFile("data/wordcount/offset/")
      //resultRDD.collect().foreach(f => println("**********"+f))
//      val getLastOffsetRDD = sc.textFile("data/wordcount/part-00001")
//      val lackRDD = getLastOffsetRDD.subtract(getLastOffsetRDD)
//      val updateRDD = lackRDD.union(textRDD)
//      updateRDD.collect().foreach(f => println("*********"+f))
    }
  */
    //println("**********"+offsetMessRDD.collect().size)
    /**
    val getLastOffsetRDD = sc.textFile("data/wordcount/part-00002")
    val lastOffsetMessRDD = getLastOffsetRDD.map{f =>
      val str = f.split("\t")
      val offset = OffsetMess(str(0),str(1).toInt,str(2).toLong)
      (offset.partition,offset)
    }
    val getJoinRDD = offsetMessRDD.join(lastOffsetMessRDD)
    val compareRDD = getJoinRDD.map { f =>
        if(f._2._1.offset >= f._2._2.offset){
          f._2._1
        }else{
          f._2._2
        }
    }
    compareRDD.collect().foreach(f => println("*********"+f))

      */
  }

  def getMaxOffset(list: List[OffsetMess])={
    if(list(0).offset >= list(1).offset){
      (list(0).topicName,list(0).offset)
    }else{
      (list(0).topicName,list(1).offset)
    }
  }

  def DeletePath(path: String): Unit = {
    val fs: FileSystem = FileSystem.get(new Configuration)
    fs.delete(new Path(path), true)
  }
}

case class OffsetMess(topicName:String,partition:Int,offset:Long)
