import os
import sys
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark
from pyspark.sql.types import *
from pyspark.ml.feature import CountVectorizer
from pyspark.ml.feature import IDF
from functools import partial


os.environ['PYSPARK_PYTHON']='/usr/python/bin/python3.7'
# 读取数据库
def mysql_data_loading(spark,db, table):

    # 读取数据库相应的表
    df = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return df

# 将数据写入数据库
def mysql_data_writting(df, db, table):
    df.write.jdbc(db, table, mode='overwrite', properties={"user": "root", "password": "root"})

# 将数据写入hive数据库
def hive_data_writting(df, spark, sql_create_table, sql_insert, sql_select):
    # 建立数据库
    spark.sql("use kechuang_middata")
    # 建立临时表
    df.registerTempTable("tempTable")
    # 查看临时表结构
    # spark.sql("desc tempTable").show()
    # 建表
    spark.sql(sql_create_table)
    # 将临时表数据插入到hive表中
    spark.sql(sql_insert)
    # 查询hive表储存情况
    spark.sql(sql_select)


def words(partitions):
    # 先分词
    import os
    import jieba
    import jieba.posseg as pseg
    import codecs

    # abspath = "/root/data/word"

    # 结巴加载词库
    # user_dict_path = os.path.join(abspath, "key_word.txt")
    user_dict_path = "/root/data/word/key_word.txt"
    jieba.load_userdict(user_dict_path)

    # 加载停用词
    # stopwords_path = os.path.join(abspath, "stop_word.txt")
    stopwords_path = "/root/data/word/stop_word.txt"
    def get_stopwords_list():
        """返回stopwords列表"""
        stopwords_list = [i.strip()
                          for i in codecs.open(stopwords_path).readlines()]
        return stopwords_list

    # 所有的停用词列表
    stopwords_list = get_stopwords_list()

    def cut_sentence(sentence):
        """对切割之后的词语进行过滤，去除停用词，保留名词，英文和自定义词库中的词，长度大于2的词"""

        seg_list = pseg.lcut(sentence)
        seg_list = [i for i in seg_list if i.flag not in stopwords_list]
        filtered_words_list = []
        for seg in seg_list:
            # print(seg)
            if len(seg.word) <= 1:
                continue
            elif seg.word == "\r\n":
                continue
            elif seg.flag == "eng":
                if len(seg.word) <= 2:
                    continue
                else:
                    filtered_words_list.append(seg.word)
            elif seg.flag.startswith("n"):
                filtered_words_list.append(seg.word)
            elif seg.flag in ["x", "eng"]:  # 是自定一个词语或者是英文单词
                filtered_words_list.append(seg.word)

        return filtered_words_list

    for row in partitions:
        yield row.ID, row.NAME, cut_sentence(row.summary)

# 计算tfidf
def keyword_tfidf(df):
    #计算词频
    cv = CountVectorizer(inputCol="words", outputCol="rawFeatures", vocabSize=60000 * 20, minDF=1.0)
    cv_model = cv.fit(df)
    cv_result = cv_model.transform(df)
    # 展示次品统计结果
    cv_result.show()
    # print("==========cv_model.vocabulary:==============")
    # print(cv_model.vocabulary)
    #
    # len(cv_model.vocabulary)



    # 计算IDF
    idf = IDF(inputCol="rawFeatures", outputCol="features")
    idfModel = idf.fit(cv_result)
    rescaledData = idfModel.transform(cv_result)
    print("==========rescaledData训练结果展示：==============")
    # rescaledData.show()
    rescaledData.select("words", "features").show()
    keywords_list_with_idf = list(zip(cv_model.vocabulary, idfModel.idf.toArray()))

    # keywords_list_with_idf = keywords_list_with_idf[1:]
    # print(keywords_list_with_idf)

    # 计算TFIDF
    def _tfidf(partition, kw_list):
        # print(kw_list)
        # partition.first()
        for row in partition:
            words_length = row.rawFeatures.values.sum()
            # print(words_length)
            for index in row.rawFeatures.indices:
                word, idf = kw_list[int(index)]
                # 计算TF值
                tf = row.rawFeatures[int(index)] / words_length
                # 计算该词的TFIDF值
                tfidf = float(tf) * float(idf)
                yield row.expert_ID, word, tfidf

    tfidf = partial(_tfidf, kw_list=keywords_list_with_idf)
    keyword_tfidf = cv_result.rdd.mapPartitions(tfidf)
    keyword_tfidf = keyword_tfidf.toDF(["expert_ID", "keyword", "tfidf"])
    print("===========keyword_tfidf:=======")
    keyword_tfidf.show()
    print("===========keyword_tfidf orderBy:===========")
    keyword_tfidf.orderBy("tfidf", ascending=False).show()



    return keyword_tfidf

if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()

    # 读取数据
    db_middata = "jdbc:mysql://192.168.0.211:3306/kechuang_middata"
    table_summary = "keywords_expert_summary"
    # table_summary = "keywords_expert_summary"
    ret = mysql_data_loading(spark, db_middata,table_summary)

    # 分词
    doc = ret.rdd.mapPartitions(words)
    doc = doc.toDF(["expert_ID", "expert_NAME", "words"])
    doc.show(100)

    # 计算词频和IDF
    keywords_tfidf = keyword_tfidf(doc)

    # 将计算出的tfidf存入mysql中
    # table_tfidf = "keywords_expert_tfidf"
    table_tfidf = "keywords_expert_tfidf"
    print("=================开始将数据存入mysql=====================")
    mysql_data_writting(keywords_tfidf, "jdbc:mysql://192.168.0.211:3306/kechuang_middata", table_tfidf)
    print("=================存入mysql结束=====================")

    # 将计算出的tfidf数据写入hive中
    sql_tfidf = """CREATE TABLE IF NOT EXISTS keywords_expert_tfidf(
    expert_ID STRING,
    keywords STRING,
    tfidf DOUBLE
    )"""
    sql_insert = "INSERT INTO keywords_expert_tfidf SELECT * FROM tempTable"
    sql_select = "select * from keywords_expert_tfidf"
    print("=================开始将数据存入hive=====================")
    hive_data_writting(keywords_tfidf, spark, sql_tfidf, sql_insert, sql_select)
    print("=================存入结束=====================")

    keywords_tfidf.show()
    print("="*15,"算出tfidf的ID数据总和",keywords_tfidf.groupBy("expert_ID").count().count(),"="*15)

    Result_mysql = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "keywords_expert_tfidf").option("user", "root").option("password", "root").load()
    print("="*15,"mysql存入的tfidf的ID数据总和",Result_mysql.groupBy("expert_ID").count().count(),"="*15)

    Result_hive = spark.sql("SELECT * from keywords_expert_tfidf")
    print("="*15,"hive存入的tfidf的ID数据总和",Result_hive.groupBy("expert_ID").count().count(),"="*15)
