package com.algo.url

import java.text.SimpleDateFormat
import java.util.Calendar
import java.util.Date
import java.net.URLDecoder

import org.apache.spark._
import org.apache.spark.broadcast
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD

import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.io.LongWritable

import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap

import com.miaozhen.utils.MzSequenceFileInputFormat
import com.algo.utils.PanelUtils
import com.algo.utils.ContainerUtils
import com.algo.utils.InputFileList

object PanelFeature {

  val panelFeatureDir = "/user/algo/lujianfeng/panelfeature"
  val monitorEtlDir = "/tong/data/output/dailyMerger"
  val panelDir = "/user/algo/lujianfeng/panel"
  val panelLog = "/user/algo/lujianfeng/panellog"
 
  val numPartitions = 150
  val word2VecDim = 200

  val effectSampleThreshold = 200 
  var JobType = 0
  def getWord2VecFeature(fromMonth: String, toMonth: String, sc: SparkContext)
  : RDD[(String, (Array[Double], String))] = {
    val panelFeature = getPanelFeatureMid(fromMonth, toMonth, sc)
    val wordVec = sc.broadcast(Word2VecUtil.getWordVecMap("outofw2v", word2VecDim))
    val feature = panelFeature.mapPartitions(tps => {
      val w2vec = wordVec.value			//filter(tp => tp._2._1.size > effectSampleThreshold).
      tps.map(tp => (tp._1, (tp._2._1.foldLeft(new Array[Double](word2VecDim))((vec, kv) => {
        val weights = w2vec.get(kv._1)
        if(weights != null){
          for(i <- 0 to word2VecDim-1)
        	  vec(i) += weights(i)*kv._2
        }
        vec
      	}), tp._2._2))
      )
    })
    feature
  }

  def getPanelFeatureMid(fromMonth: String, toMonth: String, sc: SparkContext):
	  RDD[(String, (scala.collection.mutable.HashMap[String, Double], String))] = {
    
    val simpleDateFormat = new SimpleDateFormat("yyyyMM")
    val fromCld = Calendar.getInstance()
    fromCld.setTime(simpleDateFormat.parse(fromMonth))
    val toCld = Calendar.getInstance
    toCld.setTime(simpleDateFormat.parse(toMonth))
    var i = 0
    val hdfs = FileSystem.get(sc.hadoopConfiguration)
    val lines = new ArrayBuffer[RDD[(String, (scala.collection.mutable.HashMap[String,Double], String))]]()
    while(fromCld.compareTo(toCld) <= 0){
      val time = simpleDateFormat.format(fromCld.getTime())
      val fileName = new StringBuilder(panelFeatureDir).append("/").append(time).toString
      if(hdfs.exists(new Path(fileName))){
    	  println("file found" + fileName)
    	  lines.append(sc.objectFile[(String, (scala.collection.mutable.HashMap[String,Double], String))](fileName))
    	  fromCld.add(Calendar.MONTH, 1)
    	  i = i + 1
      }
      else{
        println("begin generate feature")
        if(JobType == 0)
          generateFeatureMid1(time, sc)
        else if(JobType == 2)
          generateFeatureMid1(time, sc)
        else throw new RuntimeException("error JobType")
        println("end generate feature")
      }
    }
    if(i == 1) sc.union(lines)
    else
      sc.union(lines).reduceByKey((s1, s2) => 
        (ContainerUtils.combine(s1._1, s2._1, _+_), s1._2))
  }
  def generatePanelLog(month: String, sc: SparkContext){
    //step 1: read panel and cookie mapping data
    println("Start read panel")
    
    val panel = PanelUtils.getPanel(sc, panelDir + "/l" + month)
    val uuids = sc.broadcast(panel.map(tp => tp._1).collect.toSet)
    
    //val files = new InputFileList(month, month).getEtlLogInputFileList()
    //for(day <- 0 until files.size()){
      val logs = sc.newAPIHadoopFile(monitorEtlDir + "/" + month + "*/campaign*",   //files.get(day),
        classOf[MzSequenceFileInputFormat], classOf[LongWritable], classOf[Text])
        .mapPartitions(tp => {
          val uuidSet = uuids.value
          tp.map(s => s._2.toString()).filter(record=> {
	          val startPos = record.indexOf("uuid=")+5
	          val endPos = record.indexOf("^", startPos)
	          val uuid = record.substring(startPos, endPos)
	          if(uuidSet.contains(uuid))	true
	          else false
	      })
        })
        .saveAsTextFile(panelLog + "/" + month);//String.format(panelLog + "/" + month + "%2d", new Integer(day)))
    //}
  }
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf())
    generatePanelLog(args(0), sc)
  } 
  /**
   * 
   */
  def generateFeatureMid1(month: String, sc: SparkContext){
    
    //step 1: read panel and cookie mapping data
    println("Start read panel")
    
    val panel = PanelUtils.getPanel(sc, panelDir + "/l" + month).repartition(numPartitions)
    val total = panel
    
    //step 3: broadcast all unique uuids and read monitor record data
    val uuids = sc.broadcast(total.map(tp => tp._1).collect.toSet)
    println("uuids number = " + uuids.value.size)
    
    //broadcast url to key words map
    val urlKeyWords  = sc.broadcast(new SpidKeyWordsMapFactory("/home/algo/lujianfeng/urltitle/htmlresult.csv", "keywords_201501_02").getSpidKeyWordsMap())
    
    val uuidSpid = sc.newAPIHadoopFile(monitorEtlDir + "/" + month + "*/campaign*",
        classOf[MzSequenceFileInputFormat], classOf[LongWritable], classOf[Text])
        .mapPartitions(tp => {
          val uuidSet = uuids.value
          val urlWords = urlKeyWords.value
          System.gc()
          val freeMem = Runtime.getRuntime().freeMemory()
          val totalMem = Runtime.getRuntime().totalMemory()
          println("map freeMem = " + freeMem + " totalMem = " + totalMem)
          tp.flatMap(ts => {val items = ts._2.toString().split("\\^").map(s => {val kv = s.split("=")
    	  	(kv(0), kv(1))}).toMap
    	  	val uuid = items("uuid");
    	  	if(uuidSet.contains(uuid)){
			val urlc = items.get("c")
			val urlpr = if(urlc.isEmpty) items.get("pr")
				else null
    	  	  val url = if(urlc.isDefined)
    	  		  	URLDecoder.decode( urlc.get.replaceAll("%(?![0-9a-fA-F]{2})", "%25"), "UTF-8")
	  			  else if(urlpr.isDefined)
	  				  URLDecoder.decode( urlpr.get.replaceAll("%(?![0-9a-fA-F]{2})", "%25"), "UTF-8")
	  			  else null
			val words = if(url != null)	urlWords.get(url)
				else null
  			  val arrayBuf = if(words != null ){
  			    val keyWords = words.split("\\|");
  			    keyWords.foldLeft(new ArrayBuffer[String]())(
  			        (arr, elem) => {if(elem.size > 1) arr.append(elem); arr})
  			  }else new ArrayBuffer[String]()
    	  	  
    	  	  arrayBuf.map(s => (uuid, (s, 1.0/arrayBuf.size)))
    	  	}
    	  	else Iterator()})
        }).groupByKey(numPartitions)
    	.map(tp => { val spidPv = tp._2.foldLeft(HashMap.empty[String, Double])(
    				(pv, p) => {
	    				if(pv.contains(p._1))
	    					pv.put(p._1, pv.get(p._1).get+p._2)
	    				else pv.put(p._1, p._2)
    	    			pv })
    		(tp._1, spidPv) })
    
    val panelFeatureMid1 = uuidSpid.join(total)
    panelFeatureMid1.saveAsObjectFile(new StringBuilder(panelFeatureDir).append("/").append(month).toString)
  }
}
