from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import concat_ws
from pyspark.sql.types import *

def mysql_data_reading(spark, db, table):
    tableDF = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return tableDF

# 写入mysql数据库
def dataframe_mysql_write(df,conf):
    # type_clean_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'middata_project_wide', mode='overwrite', properties={"user": "root", "password": "root"})

    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"], conf["table"], mode='overwrite', properties=auth_mysql)


def left_join_select_cloumn(p_pubilcinfo, p_baseinfo):
    """
    ID	                NUMBER	    标识
    PROJECT_NUMBER	    VARCHAR2	项目编号
    NAME	            VARCHAR2	项目名称
    PRINCIPAL	        NUMBER	    项目负责人ID         与用户M_ACTOR表关联
    SCOPE	            NUMBER	    技术领域            与S_CODE_ITEM表关联，筛选CODE字段  1236599281137
    TYPE	            NUMBER	    项目类别            与S_CODE_ITEM表关联，筛选CODE字段  1227616614500
    BUILD_DATE	        DATE	    项目立项日期
    END_DATE	        DATE	    项目结束日期
    SUMMARIZE	        VARCHAR2	项目概述
    CONTENT	            VARCHAR2	主要研究内容
    GUIDE_LINE	        VARCHAR2	主要技术经济指标
    RESULT	            VARCHAR2	预期获得的成果和知识产权
    BEGIN_DATE	        DATE	    开始时间
    FINISH_DATE	        DATE	    完成时间
    PROJECT_MONEY	    NUMBER	    工程配套资金（万元）       宽表需要加上资金的区间分别加上
    OWNER_MONEY	        NUMBER	    自筹经费（万元）
    APPLY_MONEY	        NUMBER	    申请补助经费（万元）
    TECHNIC_ANALYSIS	VARCHAR2	工程项目难点分析
    BENEFIT_ANALYSIS	VARCHAR2	科技成果应用社会效益和经济效益分析
    COOPERATE_SUPPORT	VARCHAR2	工程进度与项目科研进度的配合及其他必要支撑条件
    PRODUCTION_PLAN	    VARCHAR2	科技成果推广应用计划
    """
    # 特征提取
    p_pi_s = p_pubilcinfo.select("ID", "PROJECT_NUMBER", "NAME", "PRINCIPAL", "SCOPE", "TYPE", "BUILD_DATE", "END_DATE", "BEGIN_DATE", "FINISH_DATE")
    p_bi_s = p_baseinfo.select("PROJECT_ID", "SUMMARIZE", "CONTENT", "GUIDE_LINE", "RESULT", "PROJECT_MONEY", "OWNER_MONEY", "APPLY_MONEY", "TECHNIC_ANALYSIS", "BENEFIT_ANALYSIS", "COOPERATE_SUPPORT", "PRODUCTION_PLAN")
    # 重命名字段
    p_bi_s = p_bi_s.withColumnRenamed("PROJECT_ID", "ID")
    # 左连接
    left_join_df = p_pi_s.join(p_bi_s, "ID", "left_outer")

    return left_join_df

def data_clean(spark, df):
    schema_df = df. \
        withColumn("ID", df.ID.cast(StringType())). \
        withColumn("PRINCIPAL", df.PRINCIPAL.cast(StringType())). \
        withColumn("SCOPE", df.SCOPE.cast(IntegerType())). \
        withColumn("TYPE", df.TYPE.cast(IntegerType())). \
        withColumn("BUILD_DATE", df.BUILD_DATE.cast(StringType())). \
        withColumn("END_DATE", df.END_DATE.cast(StringType())). \
        withColumn("BEGIN_DATE", df.BEGIN_DATE.cast(StringType())). \
        withColumn("FINISH_DATE", df.FINISH_DATE.cast(StringType())). \
        withColumn("PROJECT_MONEY", df.PROJECT_MONEY.cast(IntegerType())). \
        withColumn("OWNER_MONEY", df.OWNER_MONEY.cast(IntegerType())). \
        withColumn("APPLY_MONEY", df.APPLY_MONEY.cast(IntegerType()))

    # 读取用户表（M_ACTOR）和两张字典表（S_CODE_ITEM的技术领域和项目类别）
    # 用户表
    # actor = mysql_data_reading(spark, 'jdbc:mysql://192.168.0.211:3306/kechaung_old', "M_ACTOR")
    # 字典表
    code_item = mysql_data_reading(spark, 'jdbc:mysql://192.168.0.211:3306/kechaung_old', "S_CODE_ITEM")
    # 技术领域
    code_item_scope = code_item[(code_item.CODE == "1236599281137")]
    code_item_scope = code_item_scope.select("VALUE", "NAME")
    # 项目类别
    code_item_type = code_item[(code_item.CODE == "1227616614500")]
    code_item_type = code_item_type.select("VALUE", "NAME")

    # 关联表
    code_item_scope = code_item_scope. \
        withColumn("VALUE", code_item_scope.VALUE.cast(IntegerType())).withColumnRenamed("VALUE","SCOPE"). \
        withColumn("NAME", code_item_scope.NAME.cast(StringType())).withColumnRenamed("NAME","SCOPE_NAME")
    scope_clean_df = schema_df.join(code_item_scope, "SCOPE", "left_outer")
    scope_clean_df = scope_clean_df.drop("SCOPE").withColumnRenamed("SCOPE_NAME","SCOPE")

    code_item_type = code_item_type. \
        withColumn("VALUE", code_item_type.VALUE.cast(IntegerType())).withColumnRenamed("VALUE","TYPE"). \
        withColumn("NAME", code_item_type.NAME.cast(StringType())).withColumnRenamed("NAME","TYPE_NAME")
    type_clean_df = scope_clean_df.join(code_item_type, "TYPE", "left_outer")
    type_clean_df = type_clean_df.drop("TYPE").withColumnRenamed("TYPE_NAME","TYPE")

    # 去除异常数据
    type_clean_df = type_clean_df[(type_clean_df.ID != 1238486768847)]
    return type_clean_df


# 将描述性文字的数据合并为一个字段，为提取关键字做准备
def merge_column(df):
    merge_df = df.select("ID","NAME",
                         concat_ws(",",
                                   df.NAME,
                                   df.SUMMARIZE,
                                   df.CONTENT,
                                   df.GUIDE_LINE,
                                   df.RESULT,
                                   df.TECHNIC_ANALYSIS,
                                   df.BENEFIT_ANALYSIS,
                                   df.COOPERATE_SUPPORT,
                                   df.PRODUCTION_PLAN,
                                   df.SCOPE,
                                   df.TYPE
                                   ).alias("summary")
                         )
    return merge_df







if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).getOrCreate()

    # 数据库配置  kechaung_old  P_PUBLICINFO  P_BASEINFO
    mysql_kechaung_old_conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechaung_old', "table_pi": 'P_PUBLICINFO', "table_bi": 'P_BASEINFO'}
    # 读取两张表
    # p_pubilcinfo = spark.read.format("jdbc").option("url", 'jdbc:mysql://192.168.0.211:3306/kechaung_old').option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", 'P_PUBLICINFO').option("user", "root").option("password", "root").load()
    print("============正在读取数据mysql数据库=====================")
    p_pubilcinfo = mysql_data_reading(spark, mysql_kechaung_old_conf['url'], mysql_kechaung_old_conf['table_pi'])
    # p_baseinfo = spark.read.format("jdbc").option("url", 'jdbc:mysql://192.168.0.211:3306/kechaung_old').option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", 'P_BASEINFO').option("user", "root").option("password", "root").load()
    p_baseinfo = mysql_data_reading(spark, mysql_kechaung_old_conf['url'], mysql_kechaung_old_conf['table_bi'])

    # 特征提取和对两张表进行左连接
    # left_join_df = p_pubilcinfo.join(p_baseinfo, "ID", "left_outer")
    print("============正在连接表=====================")
    left_join_df = left_join_select_cloumn(p_pubilcinfo, p_baseinfo)
    # 数据清洗
    print("============正在数据清洗=====================")
    clean_df = data_clean(spark, left_join_df)
    # 将宽表写入mysql数据库
    wide_conf = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table_12":'trip_tag12'}
    dataframe_mysql_write(clean_df, wide_conf)

    # 挑选出描述性文字类型数据拼接在一起
    print("======================正在拼接字段=====================")
    merge_df = merge_column(clean_df)
    print("====================正在将拼接字段后的表写入数据库========================")
    summary_conf = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'keywords_project_summary'}
    dataframe_mysql_write(merge_df, summary_conf)








