package com.spark.graph

import org.ansj.splitWord.analysis.BaseAnalysis
import org.apache.spark.graphx.Graph
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.{SparseVector => SV}
import org.apache.spark.rdd.RDD

/**
  * Created by Administrator on 2017/7/27.
  */
object zjolGraph {

  def main(args: Array[String]): Unit = {
    val sim = 0.4
    val conf = new SparkConf().setAppName("zjol").setMaster("local")
    val sc = new SparkContext(conf)

    val rdd = sc.textFile("F:\\data\\data.txt")

    val initRDD = rdd.map(x => x.split("\t"))
    val newRdd = initRDD.map(x => (x(0).toLong, x))
    val groupRdd = initRDD.map(x => (x(2), x)).groupByKey().map(x => x._2).map(x => (x.head(0).toLong, x))


    val startTime = System.currentTimeMillis();
    //val noGroupData = cluster(sc, newRdd)
    val noGroupData = groupCluster(sc, newRdd)
    val endTime = System.currentTimeMillis();


    println(endTime - startTime)

    println(newRdd.count() + " = " + groupRdd.count())


  }

  def groupCluster(sc: SparkContext, initRDD: RDD[Tuple2[Long, Array[String]]], sim: Double = 0.6, tfCount: Int = 10) = {
    //合并聚类
    val groupRdd = initRDD.map(x => (x._2(2), x._2)).groupByKey().map(x => x._2).map(x => (x.head(0).toLong, x))
    val groupInitRdd = groupRdd.map(x => (x._2.head(0).toLong, x._2.head))
    val data = cluster(sc, groupInitRdd)
    val bGroup = sc.broadcast(groupRdd.filter(x => x._2.size > 1).collect())
    val newData = data.map(x => {
      var resX = x._3.toList
      x._3.foreach(y => {
        val id = y(0)
        val groupX = bGroup.value.filter(x => x._1.toString.equals(id))
        if (groupX.size > 0) {
          val n = groupX.head._2.toList
          resX = resX ::: n
        }
      })
      (x._1, x._2, resX)
    })
    newData
  }


  def cluster(sc: SparkContext, initRDD: RDD[Tuple2[Long, Array[String]]], sim: Double = 0.6, tfCount: Int = 10) = {
    //分词
    val documents = initRDD.map(a => {
      (a._1, BaseAnalysis.parse(a._2(1)).toStringWithOutNature(" "))
    }).map(a => (a._1, a._2.split(" ").toSeq))

    //计算TF
    val hashingTF = new HashingTF(Math.pow(2, 18).toInt)
    //计算TF
    val tf_num_pairs = documents.map {
      case (num, seq) =>
        val tf = hashingTF.transform(seq)
        (num, tf)
    }
    //    tf_num_pairs.cache()

    val idf = new IDF().fit(tf_num_pairs.values)
    val num_idf_pairs = tf_num_pairs.mapValues(v => idf.transform(v)).map(a => (a._1, a._2.toSparse))


    //构建hashmap索引 ,特征排序取前10个
    val indexArray_pairs = num_idf_pairs.map(a => {
      val indices = a._2.indices
      val values = a._2.values
      val result = indices.zip(values).sortBy(-_._2).take(tfCount).map(_._1).toArray
      (a._1, result)
    })
    //(id,[特征ID])
    indexArray_pairs.cache()

    //倒排序索引 (词ID,[文章ID])
    val index_idf_pairs = indexArray_pairs.flatMap(a => {
      val result = a._2.map((_, a._1))
      result
    }).groupByKey()

    //倒排序
    val b_content = index_idf_pairs.collect.toMap
    //广播全局变量
    val b_index_idf_pairs = sc.broadcast(b_content)
    //广播TF-IDF特征
    val b_idf_parirs = sc.broadcast(num_idf_pairs.collect.toMap)


    //相似度计算 indexArray_pairs(id,[特征ID]) b_index_idf_pairs( 124971 CompactBuffer(21520885, 21520803, 21521903, 21521361, 21524603))
    val docSims = indexArray_pairs.flatMap(a => {
      //将包含特征的所有文章ID
      var idfs: List[(Long, SV)] = List()
      //将包含特征的所有文章ID
      var ids: List[Long] = List()
      //遍历特征，通过倒排序索引取包含特征的所有文章,除去自身
      a._2.foreach(b => {
        ids = ids ++ b_index_idf_pairs.value.get(b).get.toList
      })
      //b_idf_parirs(tf-idf特征),遍边文章，获取对应的TF-IDF特征
      ids.foreach(b => {
        idfs = idfs ++ List((b, b_idf_parirs.value.get(b).get))
      })
      //获取当前文章TF-IDF特征
      val sv1 = b_idf_parirs.value.get(a._1).get
      import breeze.linalg._
      //构建当前文章TF-IDF特征向量
      val bsv1 = new SparseVector[Double](sv1.indices, sv1.values, sv1.size)
      //遍历相关文章
      val result = idfs.map {
        case (id2, idf2) =>
          val sv2 = idf2.asInstanceOf[SV]
          //对应相关文章的特征向量
          val bsv2 = new SparseVector[Double](sv2.indices, sv2.values, sv2.size)
          //计算余弦值
          val cosSim = bsv1.dot(bsv2) / (norm(bsv1) * norm(bsv2))
          // 文章1，文章2，相似度
          (a._1, id2, cosSim)
      }
      result.filter(a => a._3 >= sim)
    })
    //取出所有，有相似度的文章
    val vertexrdd = docSims.map(a => {
      (a._2, a._1)
    })


    //图计算
    val graph = Graph.fromEdgeTuples(vertexrdd, 1)
    val graphots = Graph.graphToGraphOps(graph).connectedComponents().vertices
    //聚类初始化 计算文章向量  init_rdd(id,(id,content,title))
    val simrdd = initRDD.join(graphots).map(a => {
      (a._2._2, (a._2._1, a._1))
    })

    val simrddtop = simrdd.groupByKey().filter(a => a._2.size >= 3).sortBy(-_._2.size).take(50)
    val simrdd2 = sc.parallelize(simrddtop, 18)


    val resultRDD = simrdd2.map(a => {
      val data = a._2.toList.take(10)
      val titles = data.map(b => (b._1(2))).toArray
      val str = mostSimilartyTitle(titles)
      val guids = a._2.map(b => Array(b._2 + "")).toArray
      val title = a._2.map(b => b._1(0) + "\t" + b._1(2))
      var item: Array[String] = null


      if (!"".equals(str)) {
        for (i <- 0 until data.size if (item != null)) {
          if (data(i)._1(2).equals(str))
            item = data(i)._1
        }
        if (item == null)
          item = data(0)._1
      } else {
        item = data(0)._1
      }
      (item(2), Array(item(0), item(1), item(2), a._2.size, item.length), guids)
    })
    resultRDD
  }


  //相似度比对 最短编辑距离
  def ld(s: String, t: String): Int = {
    var sLen: Int = s.length
    var tLen: Int = t.length
    var cost: Int = 0
    var d = Array.ofDim[Int](sLen + 1, tLen + 1)
    var ch1: Char = 0
    var ch2: Char = 0
    if (sLen == 0)
      tLen
    if (tLen == 0)
      sLen
    for (i <- 0 to sLen) {
      d(i)(0) = i
    }
    for (i <- 0 to tLen) {
      d(0)(i) = i
    }
    for (i <- 1 to sLen) {
      ch1 = s.charAt(i - 1)
      for (j <- 1 to tLen) {
        ch2 = t.charAt(j - 1)
        if (ch1 == ch2) {
          cost = 0
        } else {
          cost = 1
        }
        d(i)(j) = Math.min(Math.min(d(i - 1)(j) + 1, d(i)(j - 1) + 1), d(i - 1)(j - 1) + cost)
      }
    }
    return d(sLen)(tLen)
  }

  def similarity(src: String, tar: String): Double = {
    val a: Int = ld(src, tar)
    1 - a / (Math.max(src.length, tar.length) * 1.0)
  }

  //选出一组字符串 中相似度最高的
  def mostSimilartyTitle(strs: Array[String]): String = {
    var map: Map[String, Double] = Map()
    for (i <- 0 until strs.length) {
      for (j <- i + 1 until strs.length) {
        var similar = similarity(strs(i), strs(j))
        if (map.contains(strs(i)))
          map += (strs(i) -> (map.get(strs(i)).get + similar))
        else
          map += (strs(i) -> similar)
        if (map.contains(strs(j)))
          map += (strs(j) -> (map.get(strs(j)).get + similar))
        else
          map += (strs(j) -> similar)
      }
    } //end of for
    if (map.size > 0)
      map.toSeq.sortWith(_._2 > _._2)(0)._1
    else
      ""
  }

}
