from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import concat_ws
from pyspark.sql.types import *


def mysql_data_reading(spark, db, table):
    tableDF = spark.read.format("jdbc").\
        option("url", db).\
        option("driver","com.mysql.cj.jdbc.Driver").\
        option("dbtable", table).\
        option("user", "root").\
        option("password", "root").load()

    return tableDF

# 写入mysql数据库
def dataframe_mysql_write(df,conf):


    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"],
                  conf["table"],
                  mode='overwrite',
                  properties=auth_mysql)


def data_clean(df):
    select_df = df.select("ID", "NAME", "ORGANIZATION", "DESCRIPTION", "POSITION", "PROFESSIONALTITLE", "EDUCATEBACKGROUND", "EDUCATION_EXPERIENCE", "DEGREELEVEL", "GRADUATECOLLEGE", "SPECIALFIELDSTUDY", "SPECIALFIELDENGAGED", "WORKUNIT_EXPERIENCE", "WORKEXPERIENCE", "WORKUNIT", "WORK_POSITION", "WORKRESULT", "WORK_TYPES", "BOOK_NAMEs", "TITLEs", "TECHNIQUE_DOMAIN", "EXPERT_GROUPs", "RIGHT_TYPEs", "PATENT_TYPEs", "PROJECT_NAMEs", "TECHNIQUE_DOMAINs", "REWARD_PROJECT_NAMEs", "LAST_FIVEYEARS_PROJECT", "REWARD_NAMEs", "REWARD_GRADEs", "REWARD_UNITs", "LAST_FIVEYEARS_REWARD", "LAST_FIVEYEARS_ACHIEVENMENT", "ACHIEVEMENT_PROJECT_NAMEs")

    # 将描述性数据合并为一个字段
    merge_df = select_df.select("ID", "NAME",
                         concat_ws(",",
                                   select_df.ORGANIZATION,
                                   select_df.DESCRIPTION,
                                   select_df.POSITION,
                                   select_df.PROFESSIONALTITLE,
                                   select_df.EDUCATEBACKGROUND,
                                   select_df.EDUCATION_EXPERIENCE,
                                   select_df.DEGREELEVEL,
                                   select_df.GRADUATECOLLEGE,
                                   select_df.SPECIALFIELDSTUDY,
                                   select_df.SPECIALFIELDENGAGED,
                                   select_df.WORKUNIT_EXPERIENCE,
                                   select_df.WORKEXPERIENCE,
                                   select_df.WORKUNIT,
                                   select_df.WORK_POSITION,
                                   select_df.WORKRESULT,
                                   select_df.WORK_TYPES,
                                   select_df.PATENT_TYPEs,
                                   select_df.PROJECT_NAMEs,
                                   select_df.TECHNIQUE_DOMAINs,
                                   select_df.REWARD_PROJECT_NAMEs,
                                   select_df.LAST_FIVEYEARS_PROJECT,
                                   select_df.REWARD_NAMEs,
                                   select_df.REWARD_GRADEs,
                                   select_df.REWARD_UNITs,
                                   select_df.LAST_FIVEYEARS_REWARD,
                                   select_df.LAST_FIVEYEARS_ACHIEVENMENT,
                                   select_df.ACHIEVEMENT_PROJECT_NAMEs
                                   ).alias("summary")
                         )
    return merge_df




if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).enableHiveSupport().getOrCreate()

    # 读取专家宽表
    # expert_df = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "expert_wide_all").option("user", "root").option("password", "root").load()

    conf_expert_table = {"db": "jdbc:mysql://192.168.0.211:3306/kechaung_old", "table": "expert_wide_all"}
    expert_df = mysql_data_reading(spark, conf_expert_table["db"], conf_expert_table["table"])

    # 数据清洗
    print("================================开始数据清洗===========================================")
    clean_df = data_clean(expert_df)

    # 将清洗好的数据保存在数据库中
    # clean_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'keywords_expert_summary', mode='overwrite', properties={"user": "root", "password": "root"})
    conf_expert_summary = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'keywords_expert_summary'}
    dataframe_mysql_write(clean_df,conf_expert_summary)












