import os
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import StringType, IntegerType, DateType
from pyspark.sql.functions import lit, months_between, concat_ws
from pyspark.sql.functions import regexp_replace
import numpy as np


os.environ['PYSPARK_PYTHON']='/usr/python/bin/python3.7'

# 连接mysql读取数据
def mysql_data_load(spark, db, table):

    tableDF = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return tableDF
# 写入mysql数据库
def dataframe_mysql_write(df,conf):

    # filter_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'middata', mode='overwrite', properties={"user": "root", "password": "root"})

    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"], conf["table"], mode='overwrite', properties=auth_mysql)



# dataframe左连接
def table_left_outer_join(table1, table2, column):
    #lj_df = actor_df.join(user_df, "ID", "left_outer")
    loj_df = table1.join(table2, column, "left_outer")
    return loj_df

"""
ID	                            decimal	    标识
AGE	                            decimal	    年龄
SEX	                            decimal	    性别
BIRTHDAY	                    datetime	出生日期
WORKUNIT	                    varchar	    工作单位
DESCRIPTION                     text        个人简介
POSITION	                    varchar	    职务
PROFESSIONALTITLE	            decimal	    职称              需要对应上文字
EDUCATEBACKGROUND	            decimal	    最高学历            需要对应上文字
DEGREELEVEL	                    decimal	    最后学位            需要对应上文字
GRADUATECOLLEGE	                varchar	    毕业院校
SPECIALFIELDSTUDY	            decimal	    所学专业            需要对应上文字
SPECIALFIELDENGAGED             decimal     现从事专业          需要对应上文字
WORKEXPERIENCE	                text	    主要工作经历
WORKRESULT	                    text	    主要工作成绩


特征提取，格式化，数据清洗
"""
def select_clean1(spark, df):

    # 提取所需字段
    #select_df = loj_df.select("ID","NAME","TYPE","AGE","BIRTHDAY","SEX","WORKUNIT","DESCRIPTION","POSITION","PROFESSIONALTITLE","EDUCATEBACKGROUND","DEGREELEVEL","GRADUATECOLLEGE","SPECIALFIELDSTUDY","SPECIALFIELDENGAGED","WORKEXPERIENCE","WORKRESULT")
    select_df = df.select("ID","NAME","TYPE","AGE","BIRTHDAY","SEX","WORKUNIT","DESCRIPTION","POSITION","PROFESSIONALTITLE","EDUCATEBACKGROUND","DEGREELEVEL","GRADUATECOLLEGE","SPECIALFIELDSTUDY","SPECIALFIELDENGAGED","WORKEXPERIENCE","WORKRESULT")

    # 过滤组织数据和测试数据
    select_df = select_df[(select_df.NAME != "蔡文虎1")&(select_df.ID != "7651163907543")]
    print("select_df11111")
    print(select_df.count())
    select_df = select_df[(select_df.TYPE != "com.comit.monkey.domain.Organization")]
    print("select_df22222")
    print(select_df.count())

    # 格式化数据
    schema_df = select_df. \
        withColumn("ID", select_df.ID.cast(StringType())). \
        withColumn("NAME", select_df.NAME.cast(StringType())). \
        withColumn("TYPE", select_df.TYPE.cast(StringType())). \
        withColumn("AGE", select_df.AGE.cast(IntegerType())). \
        withColumn("BIRTHDAY", select_df.BIRTHDAY.cast(DateType())). \
        withColumn("SEX", select_df.SEX.cast(IntegerType())). \
        withColumn("WORKUNIT", select_df.WORKUNIT.cast(StringType())). \
        withColumn("DESCRIPTION", select_df.DESCRIPTION.cast(StringType())). \
        withColumn("POSITION", select_df.POSITION.cast(StringType())). \
        withColumn("PROFESSIONALTITLE", select_df.PROFESSIONALTITLE.cast(IntegerType())). \
        withColumn("EDUCATEBACKGROUND", select_df.EDUCATEBACKGROUND.cast(IntegerType())). \
        withColumn("DEGREELEVEL", select_df.DEGREELEVEL.cast(IntegerType())). \
        withColumn("GRADUATECOLLEGE", select_df.GRADUATECOLLEGE.cast(StringType())). \
        withColumn("SPECIALFIELDSTUDY", select_df.SPECIALFIELDSTUDY.cast(IntegerType())). \
        withColumn("SPECIALFIELDENGAGED", select_df.SPECIALFIELDENGAGED.cast(IntegerType())). \
        withColumn("WORKEXPERIENCE", select_df.WORKEXPERIENCE.cast(StringType())). \
        withColumn("WORKRESULT", select_df.WORKRESULT.cast(StringType()))

    # 筛选数据，过滤角色为role（权限）的用户
    filter_df = schema_df[(schema_df.TYPE != "com.comit.monkey.domain.Role")]
    # print("filter_df")
    # print(filter_df.count())
    # 添加一列当前时间用于计算月份差
    df_today = filter_df.withColumn("today", lit("2019-11-06"))

    # 根据用户生日计算所有用户月份差
    df_months = df_today.select(months_between(df_today.today, df_today.BIRTHDAY).alias('months'), "ID","NAME","TYPE","AGE","BIRTHDAY","SEX","WORKUNIT","DESCRIPTION","POSITION","PROFESSIONALTITLE","EDUCATEBACKGROUND","DEGREELEVEL","GRADUATECOLLEGE","SPECIALFIELDSTUDY","SPECIALFIELDENGAGED","WORKEXPERIENCE","WORKRESULT")

    # 调整字段数据结构
    schema_df1 = df_months.withColumn("months", df_months.months.cast(IntegerType()))

    # 转换为pandas的dataframe方便运算
    months_pd = schema_df1.toPandas()
    # 月份差除以12计算年龄
    months_pd["AGE"] = np.array(months_pd['months'])/12
    # 转换回spark的dataframe
    age_df = spark.createDataFrame(months_pd)
    age_df = age_df.drop('months')

    # 格式化
    age_df = age_df. \
        withColumn("ID", age_df.ID.cast(StringType())). \
        withColumn("NAME", age_df.NAME.cast(StringType())). \
        withColumn("TYPE", age_df.TYPE.cast(StringType())). \
        withColumn("AGE", age_df.AGE.cast(IntegerType())). \
        withColumn("BIRTHDAY", age_df.BIRTHDAY.cast(DateType())). \
        withColumn("SEX", age_df.SEX.cast(IntegerType())). \
        withColumn("WORKUNIT", age_df.WORKUNIT.cast(StringType())). \
        withColumn("DESCRIPTION", age_df.DESCRIPTION.cast(StringType())). \
        withColumn("POSITION", age_df.POSITION.cast(StringType())). \
        withColumn("PROFESSIONALTITLE", age_df.PROFESSIONALTITLE.cast(IntegerType())). \
        withColumn("EDUCATEBACKGROUND", age_df.EDUCATEBACKGROUND.cast(IntegerType())). \
        withColumn("DEGREELEVEL", age_df.DEGREELEVEL.cast(IntegerType())). \
        withColumn("GRADUATECOLLEGE", age_df.GRADUATECOLLEGE.cast(StringType())). \
        withColumn("SPECIALFIELDSTUDY", age_df.SPECIALFIELDSTUDY.cast(IntegerType())). \
        withColumn("SPECIALFIELDENGAGED", age_df.SPECIALFIELDENGAGED.cast(IntegerType())). \
        withColumn("WORKEXPERIENCE", age_df.WORKEXPERIENCE.cast(StringType())). \
        withColumn("WORKRESULT", age_df.WORKRESULT.cast(StringType()))
    # print("age_df")
    # print(age_df.count())
    # age_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'middata', mode='overwrite', properties={"user": "root", "password": "root"})

    # age_df = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", "middata").option("user", "root").option("password", "root").load()
    return age_df



# 数据第二层清洗，将数字数据全部改为描述性数据
def clean2(df1, df2, df3, df4):

    # 处理“职称”字段，获取字典中职称相应的id
    title_df = df1[(df1.DESCRIPTION == "职称")]
    title_pd = title_df.toPandas()
    title_id = title_pd.loc[0,"ID"]
    # 获取职称表
    titles_df = df2[(df2.CODE == title_id)]
    titles_df = titles_df.select("VALUE", "NAME")
    titles_df = titles_df. \
        withColumn("VALUE", titles_df.VALUE.cast(IntegerType())).withColumnRenamed("VALUE","PROFESSIONALTITLE"). \
        withColumn("NAME", titles_df.NAME.cast(StringType())).withColumnRenamed("NAME","PROFESSIONALTITLE_NAME")
    # 关联并删除多余字段
    title_clean_df = table_left_outer_join(df4, titles_df, "PROFESSIONALTITLE")
    title_clean_df = title_clean_df.drop("PROFESSIONALTITLE").withColumnRenamed("PROFESSIONALTITLE_NAME","PROFESSIONALTITLE")
    # print("title_clean_df")
    # print(title_clean_df.count())

    # 处理“最高学历”字段
    education_df = df1[(df1.DESCRIPTION == "学历")]
    education_pd = education_df.toPandas()
    education_id = education_pd.loc[0, "ID"]
    educations_df = df2[(df2.CODE == education_id)]
    educations_df = educations_df.select("VALUE", "NAME")
    educations_df = educations_df. \
        withColumn("VALUE", educations_df.VALUE.cast(IntegerType())).withColumnRenamed("VALUE","EDUCATEBACKGROUND"). \
        withColumn("NAME", educations_df.NAME.cast(StringType())).withColumnRenamed("NAME", "EDUCATEBACKGROUND_NAME")
    education_clean_df = table_left_outer_join(title_clean_df, educations_df, "EDUCATEBACKGROUND")
    education_clean_df = education_clean_df.drop("EDUCATEBACKGROUND").withColumnRenamed("EDUCATEBACKGROUND_NAME","EDUCATEBACKGROUND")
    # print("education_clean_df")
    # print(education_clean_df.count())

    # 处理“最后学历”字段
    degree_df = df1[(df1.DESCRIPTION == "学位")]
    degree_pd = degree_df.toPandas()
    degree_id = degree_pd.loc[0, "ID"]
    degree_df = df2[(df2.CODE == degree_id)]
    degrees_df = degree_df.select("VALUE", "NAME")
    degrees_df = degrees_df. \
        withColumn("VALUE", degrees_df.VALUE.cast(IntegerType())).withColumnRenamed("VALUE","DEGREELEVEL"). \
        withColumn("NAME", degrees_df.NAME.cast(StringType())).withColumnRenamed("NAME", "DEGREELEVEL_NAME")
    degree_clean_df = table_left_outer_join(education_clean_df, degrees_df, "DEGREELEVEL")
    degree_clean_df = degree_clean_df.drop("DEGREELEVEL").withColumnRenamed("DEGREELEVEL_NAME","DEGREELEVEL")
    # print("degree_clean_df")
    # print(degree_clean_df.count())

    # 处理“所学专业”字段
    # 先格式化字典数据
    technology_df = df3.select("ID","NAME")
    technology_df = technology_df. \
        withColumn("ID",technology_df.ID.cast(IntegerType())).withColumnRenamed("ID","SPECIALFIELDSTUDY"). \
        withColumn("NAME", technology_df.NAME.cast(StringType())).withColumnRenamed("NAME", "SPECIALFIELDSTUDY_NAME")
    special_clean_df = table_left_outer_join(degree_clean_df, technology_df, "SPECIALFIELDSTUDY")
    special_clean_df = special_clean_df.drop("SPECIALFIELDSTUDY").withColumnRenamed("SPECIALFIELDSTUDY_NAME", "SPECIALFIELDSTUDY")
    # print("special_clean_df")
    # print(special_clean_df.count())

    # 处理“现从事专业”字段
    technology_df = technology_df. \
        withColumnRenamed("SPECIALFIELDSTUDY","SPECIALFIELDENGAGED"). \
        withColumnRenamed("SPECIALFIELDSTUDY_NAME", "SPECIALFIELDENGAGED_NAME")
    degaged_df = table_left_outer_join(special_clean_df, technology_df, "SPECIALFIELDENGAGED")
    print("degaged_df11111")
    print(degaged_df.count())
    degaged_df = degaged_df.drop("SPECIALFIELDENGAGED").withColumnRenamed("SPECIALFIELDENGAGED_NAME", "SPECIALFIELDENGAGED")

    print("degaged_df")
    print(degaged_df.count())
    # degaged_df.show()

    sex_df = df2[(df2.CODE == 1224470250122)]
    sex_df = sex_df.select("VALUE", "NAME")
    sex_df = sex_df. \
        withColumn("VALUE", sex_df.VALUE.cast(IntegerType())).withColumnRenamed("VALUE", "SEX"). \
        withColumn("NAME", sex_df.NAME.cast(StringType())).withColumnRenamed("NAME", "SEX_NAME")
    # 关联并删除多余字段
    sex_clean_df = table_left_outer_join(degaged_df, sex_df, "SEX")
    sex_clean_df = sex_clean_df.drop("SEX").withColumnRenamed("SEX_NAME","SEX")
    print("sex_clean_df")
    print(sex_clean_df.count())

    return sex_clean_df

# 将描述性文字的数据合并为一个字段，为提取关键字做准备
def merge_column(df):
    merge_df = df.select("ID","NAME",
                         concat_ws(",",
                                   df.WORKUNIT,
                                   df.DESCRIPTION,
                                   df.POSITION,
                                   df.PROFESSIONALTITLE,
                                   df.EDUCATEBACKGROUND,
                                   df.DEGREELEVEL,
                                   df.GRADUATECOLLEGE,
                                   df.SPECIALFIELDSTUDY,
                                   df.SPECIALFIELDENGAGED,
                                   df.WORKEXPERIENCE,
                                   df.WORKRESULT
                                   ).alias("summary")
                         )
    return merge_df




if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).getOrCreate()


    # com.mysql.cj.jdbc.Driver

    # actor_df = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechaung_old").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "M_ACTOR").option("user", "root").option("password", "root").load()

    # user_df = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechaung_old").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "M_USER").option("user", "root").option("password", "root").load()

    db = "jdbc:mysql://192.168.0.211:3306/kechaung_old"
    table_actor = "M_ACTOR"
    table_user = "M_USER"
    # 从mysql中读取两张用户数据的表
    actor_df = mysql_data_load(spark, db, table_actor)
    user_df = mysql_data_load(spark, db, table_user)

    # 使用左关联关联两张表
    column1 = "ID"
    au_df = table_left_outer_join(actor_df, user_df,column1)

    # 特征提取,数据第一次清洗
    print("===================================开始第一次清洗==================================")
    age_df = select_clean1(spark, au_df)

    # 将处理结果存入到mysql数据库
    mysql_conf = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'middata'}
    # dataframe_mysql_write(age_df, mysql_conf)

    # 数据第二次清洗, 先读取对应的字典表
    print("===================================开始第二次清洗==================================")
    dict_table = {"table_code": "S_CODE", "table_code_item":"S_CODE_ITEM", "table_technology":"S_TECHNOLOGY"}
    code_df = mysql_data_load(spark, db, dict_table["table_code"])
    code_item_df = mysql_data_load(spark, db, dict_table["table_code_item"])
    technology_df = mysql_data_load(spark, db, dict_table["table_technology"])

    clean_df = clean2(code_df, code_item_df, technology_df, age_df)

    # clean_df.show()
    clead_df_mysql_conf = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'expert_wide_clean'}
    print("===================================开始写入数据库==================================")
    dataframe_mysql_write(clean_df, clead_df_mysql_conf)

    # 将所有描述性的字段数据合并在一起
    merge_df = merge_column(clean_df)

    conf_merge = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'keywords_personas_summary'}

    dataframe_mysql_write(merge_df, conf_merge)



