import os
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.ml.feature import Word2Vec, Word2VecModel
from pyspark.sql.functions import format_number as fmt
from pyspark.mllib.stat import Statistics
import time
import numpy as np


"""向量计算"""


os.environ['PYSPARK_PYTHON']='/usr/python/bin/python3.7'


# mysql数据库读取
def mysql_data_reading(spark, db, table):
    tableDF = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return tableDF

# 写入mysql数据库
def dataframe_mysql_write(df,conf):
    # type_clean_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'middata_achievement_wide', mode='overwrite', properties={"user": "root", "password": "root"})

    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"], conf["table"], mode='overwrite', properties=auth_mysql)


def words(partitions):
    import os

    import jieba
    import jieba.posseg as pseg
    import codecs

    # abspath = "/root/data/word"

    # 结巴加载词库
    # user_dict_path = os.path.join(abspath, "key_word.txt")
    user_dict_path = "/root/data/word/key_word.txt"
    # user_dict_path = "F:\kechuang\offline\key_word_operation\key_word.txt"
    jieba.load_userdict(user_dict_path)

    # 加载停用词
    # stopwords_path = os.path.join(abspath, "stop_word.txt")
    # stopwords_path = "F:\kechuang\offline\key_word_operation\stop_word.txt"
    stopwords_path = "/root/data/word/stop_word.txt"

    def get_stopwords_list():
        """返回stopwords列表"""
        stopwords_list = [i.strip()
                          for i in codecs.open(stopwords_path).readlines()]
        return stopwords_list

    # 所有的停用词列表
    stopwords_list = get_stopwords_list()

    def cut_sentence(sentence):
        """对切割之后的词语进行过滤，去除停用词，保留名词，英文和自定义词库中的词，长度大于2的词"""
        # print(sentence,"*"*100)
        # eg:[pair('今天', 't'), pair('有', 'd'), pair('雾', 'n'), pair('霾', 'g')]
        seg_list = pseg.lcut(sentence)
        seg_list = [i for i in seg_list if i.flag not in stopwords_list]
        filtered_words_list = []
        for seg in seg_list:
            # print(seg)
            if len(seg.word) <= 1:
                continue
            elif seg.flag == "eng":
                if len(seg.word) <= 2:
                    continue
                else:
                    filtered_words_list.append(seg.word)
            elif seg.flag.startswith("n"):
                filtered_words_list.append(seg.word)
            elif seg.flag in ["x", "eng"]:  # 是自定一个词语或者是英文单词
                filtered_words_list.append(seg.word)
        return filtered_words_list

    for row in partitions:
        yield (cut_sentence(row.summary),)

# 生成向量和保存向量
def make_vector_save(df, hdfs_path, file_path):
    # 训练向量模型
    print("====================正在训练模型========================")
    word2Vec = Word2Vec(vectorSize=100, inputCol="words", outputCol="model")
    model = word2Vec.fit(df)
    # 保存模型
    print("====================正在保存模型========================")
    model.save(hdfs_path)
    model.save(file_path)

# 读取模型和测试模型效果
def load_model(model_path):
    # 读取模型
    # model = Word2VecModel.load("hdfs:///model/kechuang/achievement.word2vec_model")
    model = Word2VecModel.load(model_path)

    # print("====================正在查询与关键字相似的20个单词========================")
    # model.findSynonyms("浮船坞", 20).select("word",fmt("similarity", 5).alias("similarity")).show()
    # model.findSynonyms("华南理工大学", 20).show()

    # 查看模型状态
    vectors = model.getVectors()
    print("====================模型向量数量========================")
    print(vectors.count())
    # print("====================向量的样子===========================")
    # print(vectors.head(100))
    # vectors.show(200)
    return vectors


def vector_operation(df,vectors):
    # 关联
    word_weight = df.join(vectors, df.keywords == vectors.word, "inner")
    print(word_weight.count())
    # 计算关键词的向量（向量乘以权重）
    print("=========================根据权重调整向量======================")
    keywords_vector = word_weight.rdd.map(lambda r: (r.achievement_ID, r.keywords, r.weights * r.vector)).toDF(["achievement_ID", "keywords", "vector"])
    keywords_vector.show()
    # 通过hive使用sql运算
    keywords_vector.registerTempTable("tempTable")

    def map(row):
        x = 0
        for v in row.vectors:
            x += v
        #  将平均向量作为sku的向量
        return row.achievement_ID, x / len(row.vectors)

    vector_df = spark.sql("select achievement_ID, collect_set(vector) vectors from tempTable group by achievement_ID").rdd.map(map).toDF(["achievement_ID", "vector"])
    print("==================================画像向量集======================================")
    vector_df.show()
    print("==================================画像向量集数量======================================")
    print(vector_df.count())

    temp_df = vector_df.withColumnRenamed("achievement_ID", "achievement_ID2").withColumnRenamed("vector", "vector2")
    print("==================================temp_df:======================================")
    temp_df.show()

    # start =time.time()
    vector_join = vector_df.join(temp_df, vector_df.achievement_ID!=temp_df.achievement_ID2, how="outer")
    # print("==================================关联所需时间:======================================")
    # print(time.time()-start)
    def mapPartitions(partition):
        for row in partition:
            vector1 = row.vector
            vector2 = row.vector2

            sim = np.dot(vector1, vector2) / (np.linalg.norm(vector1) * (np.linalg.norm(vector2)))

            yield row.achievement_ID, row.achievement_ID2, float(sim)

    similarity = vector_join.rdd.mapPartitions(mapPartitions).toDF(["achievement_ID", "achievement_ID2", "sim"])

    similarity.show(60)
    print("="*20,"向量相似度集数量",similarity.count(),"="*20)
    print("==========================保存到mysql数据库中===================================")
    conf = {"url": "jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false", "table": "achievement_similarity"}
    dataframe_mysql_write(similarity, conf)
    print("=========================列出所有指定ID与所有ID的相似度=================================")
    start =time.time()
    # similarity.where("achievement_ID=1332147998699").show()
    similarity_ID = similarity.where("achievement_ID=59345393698892")
    similarity_ID.sort("sim", ascending=False).show()
    print("查询一个ID并且对结果排序所需时间：",time.time()-start)


if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()


    # 读取用户summary数据
    print("==================================正在读取mysql数据库================================")
    conf = {"db": "jdbc:mysql://192.168.0.211:3306/kechuang_middata", "table": "keywords_achievement_summary"}
    summary_df = mysql_data_reading(spark, conf["db"], conf["table"])

    # 切词
    print("==================================正在分词================================")
    doc = summary_df.rdd.mapPartitions(words)
    doc = doc.toDF(["words"])
    print("==================================分词结果================================")
    doc.show(truncate=False)
    doc.printSchema()

    # print("==================================正在写入mysql数据库================================")
    # vector_conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table": "achievement_summary"}
    # dataframe_mysql_write(doc, vector_conf)
    # print("==================================写入mysql数据库成功================================")

    # 生成向量并保存向量模型
    hdfs_path = "hdfs:///model/kechuang/achievement.word2vec_model"
    file_path = "file:///root/model/kechuang/achievement.word2vec_model"
    make_vector_save(doc, hdfs_path, file_path)


    # 读取数据库获取关键词权重值
    print("==================================正在读取mysql数据库================================")
    conf = {"db": "jdbc:mysql://192.168.0.211:3306/kechuang_middata", "table": "keywords_achievement_weight"}
    weight_df = mysql_data_reading(spark, conf["db"], conf["table"])

    # 读取模型
    print("==================================正在读取模型================================")
    # hdfs_path = "hdfs:///model/kechuang/achievement.word2vec_model"
    vectors = load_model(hdfs_path)

    vector_operation(weight_df, vectors)



