from pyspark.sql import SparkSession
from loadData import load_data
from pyspark.ml.feature import Word2VecModel
from pyspark.ml.feature import CountVectorizerModel
from pyspark.ml.feature import IDFModel
from util.sortByTfidf import sort_by_tfidf
from util.appendIndex import append_index
from pyspark.ml.feature import BucketedRandomProjectionLSHModel
from dataStorage import data_storage
import findspark
findspark.init(spark_home='/usr/local/spark/',python_path='/home/master/LoadData/venv/bin/python')

spark = SparkSession.builder.appName('SparkHBaseRDD').master('local[*]').getOrCreate()
sc=spark.sparkContext

print('加载数据')
s_list, e_list = load_data()

s = spark.createDataFrame(s_list)
e = spark.createDataFrame(e_list)

print('导入模型')
w2v_model_e = Word2VecModel.load("models/word2vec_model/python.word2vec")
vectors = w2v_model_e.getVectors()

cv_model = CountVectorizerModel.load('models/CV.model')
cv_result_s = cv_model.transform(s)
cv_result_e = cv_model.transform(e)

idf_model = IDFModel.load('models/IDF.model')
tfidf_result_e = idf_model.transform(cv_result_e)
tfidf_result_s = idf_model.transform(cv_result_s)

print('计算权重')
keywords_by_tfidf_e = tfidf_result_e.rdd.mapPartitions(sort_by_tfidf).toDF(['id', 'index', 'weights'])
keywords_by_tfidf_s = tfidf_result_s.rdd.mapPartitions(sort_by_tfidf).toDF(['id', 'index', 'weights'])

keywords_list_with_idf = list(zip(cv_model.vocabulary, idf_model.idf.toArray()))
append_index(keywords_list_with_idf)

rdd = sc.parallelize(keywords_list_with_idf)

idf_keywords = rdd.toDF(['keywords', 'idf', 'index'])

keywords_result_s = keywords_by_tfidf_s.join(idf_keywords, idf_keywords.index == keywords_by_tfidf_s.index).select(['id', 'keywords', 'weights'])
keywords_result_e = keywords_by_tfidf_e.join(idf_keywords, idf_keywords.index == keywords_by_tfidf_e.index).select(["id", "keywords", "weights"])

keywords_vector_s = keywords_result_s.join(vectors, vectors.word == keywords_result_s.keywords, 'inner')
keywords_vector_e = keywords_result_e.join(vectors, vectors.word == keywords_result_e.keywords, 'inner')

def compute_vector(row):
    return row.id, row.keywords, row.weights * row.vector

article_keyword_vectors_e = keywords_vector_e.rdd.map(compute_vector).toDF(["id", "keywords", "weightingVector"])
article_keyword_vectors_e.registerTempTable('temptable')
article_keyword_vectors_e = spark.sql("select id, collect_set(weightingVector) vectors from temptable group by id")
article_keyword_vectors_s = keywords_vector_s.rdd.map(compute_vector).toDF(["id", "keywords", "weightingVector"])
article_keyword_vectors_s.registerTempTable('temptable')
article_keyword_vectors_s = spark.sql("select id, collect_set(weightingVector) vectors from temptable group by id")

def compute_avg_vectors(row):
    x = 0
    for i in row.vectors:
        x += i
    return row.id, x / len(row.vectors)

article_vector_s = article_keyword_vectors_s.rdd.map(compute_avg_vectors).toDF(['id', 'articlevector'])
article_vector_e = article_keyword_vectors_e.rdd.map(compute_avg_vectors).toDF(['id', 'articlevector'])

print('计算相似性')
train_e = article_vector_e.select(['id', 'articlevector'])
train_s = article_vector_s.select(['id', 'articlevector'])
LSHmodel = BucketedRandomProjectionLSHModel.load("models/LSH.model")
similar = LSHmodel.approxSimilarityJoin(train_e, train_s, 2.0, distCol='EuclideanDistance')
similar.show()

data_storage(similar.collect())