from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import concat_ws
from pyspark.sql.types import *

def mysql_data_reading(spark, db, table):
    tableDF = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return tableDF

# 写入mysql数据库
def dataframe_mysql_write(df,conf):
    # type_clean_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'middata_project_wide', mode='overwrite', properties={"user": "root", "password": "root"})

    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"], conf["table"], mode='overwrite', properties=auth_mysql)


def select_cloumn_Format(achievement_info, actor_df, item):
    """
    ID	                            NUMBER	    主键
    ACHIEVEMENT_NAME	            VARCHAR2	成果名称
    TYPE	                        NUMBER	    成果类型
    SCOPE	                        VARCHAR2	技术领域
    SCOPE_NAME	                    VARCHAR2	技术领域名称
    RESEARCH_BEGIN_DATE	            DATE	    研究起始时间
    RESEARCH_END_DATE	            DATE	    研究截止时间
    LINKMAN	                        VARCHAR2	联系人
    APPLY_AREA	                    VARCHAR2	推广应用领域
    TECHNICAL_PRINCIPLE	            VARCHAR2	技术原理
    PERFORMANCE_INDEX	            VARCHAR2	性能指标
    KEY_TECHNOLOGIES_INFO	        VARCHAR2	成果关键技术介绍
    CREATIVITY_PROGRESSIVENESS	    VARCHAR2	成果技术先进性
    ACHIEVEMENT_APPLICATION	        VARCHAR2	成果转化应用情况
    POPULARIZATION_SCOPE_CONDITION	VARCHAR2	推广应用范围、条件和前景
    REMARK	                        VARCHAR2	备注
    PRINCIPAL_ORGANIZATION	        NUMBER	    成果填报人所在单位组织   对应用户表
    CREATIVE_STAFF_LIST	            VARCHAR2	项目参与人员姓名列表
    MAJOR_LIST	                    VARCHAR2	项目参与人员专业列表（已去重）
    WORK_UNIT_LIST	                VARCHAR2	项目参与人员工作单位列表（已去重）
    """
    a_i_s = achievement_info.select("ID", "ACHIEVEMENT_NAME", "TYPE", "SCOPE_NAME", "RESEARCH_BEGIN_DATE", "RESEARCH_END_DATE", "LINKMAN", "APPLY_AREA", "TECHNICAL_PRINCIPLE", "PERFORMANCE_INDEX", "KEY_TECHNOLOGIES_INFO", "CREATIVITY_PROGRESSIVENESS", "ACHIEVEMENT_APPLICATION", "POPULARIZATION_SCOPE_CONDITION", "PRINCIPAL_ORGANIZATION", "CREATIVE_STAFF_LIST", "MAJOR_LIST", "WORK_UNIT_LIST")
    # 去除测试数据
    a_i_s = a_i_s[(a_i_s.ID != 72739057371124)]
    # 格式化
    schema_ai_df = a_i_s. \
        withColumn("ID", a_i_s.ID.cast(StringType())). \
        withColumn("TYPE", a_i_s.TYPE.cast(IntegerType())). \
        withColumn("RESEARCH_BEGIN_DATE", a_i_s.RESEARCH_BEGIN_DATE.cast(StringType())). \
        withColumn("RESEARCH_END_DATE", a_i_s.RESEARCH_END_DATE.cast(StringType())). \
        withColumn("PRINCIPAL_ORGANIZATION", a_i_s.PRINCIPAL_ORGANIZATION.cast(StringType()))
    # 挑选字典所需的字段
    actor = actor_df.select("ID", "NAME")
    # 格式化重命名
    schema_a_df = actor.withColumn("ID", actor.ID.cast(StringType())).withColumnRenamed("ID", "PRINCIPAL_ORGANIZATION")

    ai_clean_df = schema_ai_df.join(schema_a_df, "PRINCIPAL_ORGANIZATION", "left_outer")
    ai_clean_df = ai_clean_df.drop("PRINCIPAL_ORGANIZATION").withColumnRenamed("NAME","PRINCIPAL_ORGANIZATION")

    # 读取成果类型的字典表  S_CODE_ITEM  350135124065752
    item_df = item[(item.CODE == "350135124065752")]
    item_df = item_df.select("VALUE", "NAME")
    item_df = item_df.withColumn("VALUE", item_df.VALUE.cast(IntegerType())).withColumnRenamed("VALUE", "TYPE")
    ai_clean_df = ai_clean_df.join(item_df, "TYPE", "left_outer")
    ai_clean_df = ai_clean_df.drop("TYPE").withColumnRenamed("NAME", "TYPE")
    ai_clean_df = ai_clean_df[(ai_clean_df.ID != 60212561038586)]
    ai_clean_df = ai_clean_df[(ai_clean_df.ID != 1507974251265)]
    ai_clean_df = ai_clean_df[(ai_clean_df.ID != 4127406459865)]

    # 将处理号的宽表保存一份到数据库
    conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table": "achievement_wide"}
    dataframe_mysql_write(ai_clean_df, conf)

    return ai_clean_df


# 将描述性文字的数据合并为一个字段，为提取关键字做准备
def merge_column(df):
    merge_df = df.select("ID","ACHIEVEMENT_NAME",
                         concat_ws(",",
                                   df.ACHIEVEMENT_NAME,
                                   df.TYPE,
                                   df.SCOPE_NAME,
                                   df.APPLY_AREA,
                                   df.TECHNICAL_PRINCIPLE,
                                   df.PERFORMANCE_INDEX,
                                   df.KEY_TECHNOLOGIES_INFO,
                                   df.CREATIVITY_PROGRESSIVENESS,
                                   df.ACHIEVEMENT_APPLICATION,
                                   df.POPULARIZATION_SCOPE_CONDITION,
                                   df.PRINCIPAL_ORGANIZATION,
                                   df.CREATIVE_STAFF_LIST,
                                   df.MAJOR_LIST,
                                   df.WORK_UNIT_LIST,
                                   ).alias("summary")
                         )
    merge_df = merge_df.withColumnRenamed("ACHIEVEMENT_NAME","NAME")
    return merge_df

if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).getOrCreate()

    # 数据库配置  kechaung_old  P_ACHIEVEMENT_INFO
    mysql_kechaung_old_conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechaung_old', "table_ai": 'P_ACHIEVEMENT_INFO', "table_actor": "M_ACTOR", "table_item": "S_CODE_ITEM"}
    # 读取数据库
    # achievement_info = spark.read.format("jdbc").option("url", 'jdbc:mysql://192.168.0.211:3306/kechaung_old').option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", 'P_ACHIEVEMENT_INFO').option("user", "root").option("password", "root").load()
    print("============================开始读取数据库================================")
    achievement_info = mysql_data_reading(spark, mysql_kechaung_old_conf['url'], mysql_kechaung_old_conf['table_ai'])
    # actor_df = spark.read.format("jdbc").option("url", 'jdbc:mysql://192.168.0.211:3306/kechaung_old').option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", 'M_ACTOR').option("user", "root").option("password", "root").load()
    actor_df = mysql_data_reading(spark, mysql_kechaung_old_conf['url'], mysql_kechaung_old_conf['table_actor'])
    # 读取成果类型的字典表  S_CODE_ITEM  350135124065752
    item = mysql_data_reading(spark, mysql_kechaung_old_conf['url'], mysql_kechaung_old_conf['table_item'])

    print("============================开始数据清洗和特征提取================================")
    # 特征提取
    ai_clean_df = select_cloumn_Format(achievement_info, actor_df, item)

    # 合拼字段
    print("============================正在合并字段=====================================")
    merge_df = merge_column(ai_clean_df)
    print("==============================正在将处理结果存入数据库==================================")
    conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table": "keywords_achievement_summary"}
    dataframe_mysql_write(merge_df, conf)
    # merge_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'keywords_achievement_summary',mode='overwrite', properties={"user": "root", "password": "root"})
    print("==============================存入结束==================================")












