from pyspark import SparkConf
from pyspark.sql import SparkSession
"""对计算出的textrank和tfidf值进行加权平均"""

# 将数据存入mysql
def mysql_data_writting(df, db, table):
    df.write.jdbc(db, table, mode='overwrite', properties={"user": "root", "password": "root"})
    # df.write.jdbc("jdbc:mysql://192.168.0.211:3306/kechuang_middata", "keywords_weight", mode='overwrite', properties={"user": "root", "password": "root"})


def weighted_mean_sql(spark):
    spark.sql("use kechuang_middata")
    # sql = "select keywords_expert_textrank.expert_name, keywords_expert_textrank.keywords from keywords_expert_textrank"
    sql_weighted_mean = """
    select
    keywords_expert_textrank.expert_ID,
    keywords_expert_textrank.expert_NAME,
    keywords_expert_textrank.keywords,
    keywords_expert_textrank.textrank,
    keywords_expert_tfidf.tfidf,
    (keywords_expert_textrank.textrank+keywords_expert_tfidf.tfidf)/2 weights
    from keywords_expert_textrank,keywords_expert_tfidf
    where keywords_expert_textrank.expert_ID=keywords_expert_tfidf.expert_ID and
    keywords_expert_textrank.keywords=keywords_expert_tfidf.keywords
    """
    print("===================开始加权平均计算===============================")
    df = spark.sql(sql_weighted_mean)
    # df.show()
    # df[(df.expert_ID == "1278984515921")].show(200)
    df.printSchema()



    # 保存一份到mysql数据库
    print("=============开始保存到mysql数据库中==============")
    # # df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'keywords_expert_weight', mode='overwrite', properties={"user": "root", "password": "root"})
    #
    df.write.format('jdbc').options(url='jdbc:mysql://192.168.0.211:3306/kechuang_middata', driver='com.mysql.cj.jdbc.Driver',dbtable='keywords_expert_weight', user='root', password='root', useSSL=False).mode('overwrite').save()
    print("=============成功保存==============")

    df.show()
    print("="*15,"算出weights的ID数据总和",df.groupBy("expert_ID").count().count(),"="*15)

    Result_mysql = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "keywords_expert_weight").option("user", "root").option("password", "root").load()
    print("="*15,"mysql存入的weights的ID数据总和",Result_mysql.groupBy("expert_ID").count().count(),"="*15)


    # 保存在hive中
    # df.registerTempTable("tempTable")
    # sql_create = """CREATE TABLE IF NOT EXISTS keywords_weight(
    # expert_ID STRING,
    # expert_NAME STRING,
    # keywords STRING,
    # textrank DOUBLE,
    # tfidf DOUBLE,
    # weights DOUBLE
    # )"""
    #
    # print("=============开始保存到hive数据库中==============")
    # spark.sql(sql_create)
    # spark.sql("INSERT INTO keywords_weight SELECT * FROM tempTable")
    # print("=============成功保存==============")
    #
    # spark.sql("select * from keywords_weight limit 100")

    return df


if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()

    df = weighted_mean_sql(spark)

    # print("=============开始保存到mysql数据库中==============")
    # mysql_data_writting(df, "jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false", "aaa")
    #
    #




