import os
import sys
from pyspark import SparkConf
from pyspark.sql import SparkSession
from functools import partial
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0, os.path.join(BASE_DIR))
# from key_word_operation import SparkSessionBase
import pyspark.sql.functions as F
import pyspark
import gc
import logging

os.environ['PYSPARK_PYTHON']='/usr/python/bin/python3.7'


# 分词
def segmentation(partition):
    import os
    import jieba
    import jieba.analyse
    import jieba.posseg as pseg
    import codecs

    # abspath = "/root/data/word"

    # 结巴加载词库
    # user_dict_path = os.path.join(abspath, "key_word.txt")
    user_dict_path = "/root/data/word/key_word.txt"
    # user_dict_path = "F:\kechuang\offline\key_word_operation\key_word.txt"
    jieba.load_userdict(user_dict_path)

    # 加载停用词
    # stopwords_path = os.path.join(abspath, "stop_word.txt")
    # stopwords_path = "F:\kechuang\offline\key_word_operation\stop_word.txt"
    stopwords_path = "/root/data/word/stop_word.txt"
    def get_stopwords_list():
        """返回stopwords列表"""
        stopwords_list = [i.strip()
                          for i in codecs.open(stopwords_path).readlines()]
        return stopwords_list

    # 所有的停用词列表
    stopwords_list = get_stopwords_list()

#     set1 = [{"广大":"广州大学"}]  # 别名集合
# def get_replace_name(wp):
#
#     for

    class TextRank(jieba.analyse.TextRank):
        def __init__(self, window=20, word_min_len=2):
            super(TextRank, self).__init__()
            self.span = window  # 窗口大小
            self.word_min_len = word_min_len  # 单词的最小长度
            # 要保留的词性

            self.pos_filt = frozenset(
                ('n', 'x', 'eng', 'f', 's', 't', 'nr', 'ns', 'nt', "nw", "nz", "PER", "LOC", "ORG"))

        def pairfilter(self, wp):
            """过滤条件，返回True或者False"""

            if wp.flag == "eng":
                if len(wp.word) <= 2:
                    return False
                # 如果不是停用词 词性输入保留列表中的 切单词长度>2 这些词留下
            if wp.flag in self.pos_filt and len(wp.word.strip()) >= self.word_min_len \
                    and wp.word.lower() not in stopwords_list:
                return True




    textrank_model = TextRank(window=10, word_min_len=2)
    allowPOS = ('n', "x", 'eng', 'nr', 'ns', 'nt', "nw", "nz", "c")

    for row in partition:
        tags = textrank_model.textrank(row.summary, topK=20, withWeight=True, allowPOS=allowPOS, withFlag=False)
        print(tags)
        for tag in tags:
            yield row.ID, row.NAME, tag[0], tag[1]


# 将TextRank数据写入到hive数据库中
def hive_data_writting(df, spark, sql_create_table, sql_insert, sql_select):
    # 建立数据库
    spark.sql("use kechuang_middata")
    # 建立临时表
    df.registerTempTable("tempTable")
    # 查看临时表结构
    # spark.sql("desc tempTable").show()
    # 建表
    spark.sql(sql_create_table)
    # 将临时表数据插入到hive表中
    spark.sql(sql_insert)
    # 查询hive表储存情况
    spark.sql(sql_select)



if __name__ == '__main__':

    # 读取数据库数据
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()

    print("==================正在读取数据库======================")
    # 专家集合
    # ret = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "keywords_expert_summary").option("user", "root").option("password", "root").load()
    # 项目集合
    ret = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "keywords_expert_summary").option("user", "root").option("password", "root").load()

    print("==================正在分词计算权重===================")
    mapPartitions = partial(segmentation)
    keywords_weights = ret.rdd.mapPartitions(mapPartitions)
    keywords_weights = keywords_weights.toDF(["expert_ID", "expert_NAME", "tag", "weights"])
    # keywords_weights = keywords_weights.toDF(["exppert_ID", "expert_NAME", "tag", "weights"])

    print("==================正在写入数据库======================")
    keywords_weights.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'keywords_expert_textrank', mode='overwrite', properties={"user": "root", "password": "root"})


    print("==================正在写入hive数据库中=====================")
    sql_textrank = """CREATE TABLE IF NOT EXISTS keywords_expert_textrank(
    expert_ID STRING,
    expert_NAME STRING,
    keywords STRING,
    textrank DOUBLE
    )"""
    sql_insert = "INSERT INTO keywords_expert_textrank SELECT * FROM tempTable"
    sql_select = "select * from keywords_expert_textrank"

    hive_data_writting(keywords_weights, spark, sql_textrank, sql_insert, sql_select)
    print("==================正在写入hive数据库成功=====================")

    keywords_weights.show()
    print("="*15,"算出textrank的ID数据总和",keywords_weights.groupBy("expert_ID").count().count(),"="*15)

    Result_mysql = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "keywords_expert_textrank").option("user", "root").option("password", "root").load()
    print("="*15,"mysql存入的textrank的ID数据总和",Result_mysql.groupBy("expert_ID").count().count(),"="*15)

    Result_hive = spark.sql("SELECT * from keywords_expert_textrank")
    print("="*15,"hive存入的textrank的ID数据总和",Result_hive.groupBy("expert_ID").count().count(),"="*15)





















