import os
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import StringType, IntegerType, DateType
from pyspark.sql.functions import lit, months_between, concat_ws
from pyspark.sql.functions import regexp_replace
import numpy as np

os.environ['PYSPARK_PYTHON']='/usr/python/bin/python3.7'


def mysql_data_reading(spark, db, table):
    tableDF = spark.read.format("jdbc").option("url", db).option("driver","com.mysql.cj.jdbc.Driver").option("dbtable", table).option("user", "root").option("password", "root").load()

    return tableDF

# 写入mysql数据库
def dataframe_mysql_write(df,conf):

    auth_mysql = {"user": "root", "password": "root"}
    df.write.jdbc(conf["url"], conf["table"], mode='overwrite', properties=auth_mysql)

def age_compute(df):
    # 添加一列当前时间用于计算月份差
    df_today = df.withColumn("today", lit("2019-11-28"))

    # 根据用户生日计算所有用户月份差
    df_months = df_today.select(months_between(df_today.today, df_today.BIRTHDAY).alias('months'), "ID","NAME","SEX", "BIRTHDAY","PROJECT_NAMEs","DEGREELEVEL","WORKUNIT","POSITION","PROFESSIONALTITLE","ORGANIZATION", "LAST_FIVEYEARS_ACHIEVENMENT_TIME","LAST_FIVEYEARS_ACHIEVENMENT","LAST_FIVEYEARS_PROJECT_TIME","LAST_FIVEYEARS_PROJECT","LAST_FIVEYEARS_REWARD_TIME", "LAST_FIVEYEARS_REWARD", "EDUCATION_TIME", "GRADUATECOLLEGE", "SPECIALFIELDSTUDY", "EDUCATION_EXPERIENCE", "WORK_TIME", "WORKUNIT_EXPERIENCE", "WORKEXPERIENCE", "WORK_POSITION", "TECHNIQUE_DOMAIN", "WORK_TYPES", "WORK_TYPES_TIME")

    # 调整字段数据结构
    schema_df1 = df_months.withColumn("months", df_months.months.cast(IntegerType()))

    # 转换为pandas的dataframe方便运算
    months_pd = schema_df1.toPandas()
    # 月份差除以12计算年龄
    months_pd["AGE"] = np.array(months_pd['months'])/12
    # 转换回spark的dataframe
    age_df = spark.createDataFrame(months_pd)
    age_df = age_df.drop('months')

    age_df = age_df. \
        withColumn("ID", age_df.ID.cast(StringType())). \
        withColumn("BIRTHDAY", age_df.BIRTHDAY.cast(DateType())). \
        withColumn("AGE", age_df.AGE.cast(IntegerType()))


    return age_df




if __name__ == '__main__':
    conf = SparkConf()
    config = (("spark.executor.memory", "2g"),
              ("spark.executor.cores", "2"))
    conf.setAll(config)
    spark = SparkSession.builder.config(conf=conf).getOrCreate()

    # 读取表格
    # expert_new_df = spark.read.format("jdbc").option("url", "jdbc:mysql://192.168.0.211:3306/kechuang_middata").option("driver", "com.mysql.cj.jdbc.Driver").option("dbtable", "expert_new").option("user", "root").option("password", "root").load()
    conf = {"url": 'jdbc:mysql://192.168.0.211:3306/kechuang_middata', "table": 'expert_new'}
    expert_new_df = mysql_data_reading(spark, conf["url"], conf["table"])
    # 计算年龄
    age_df = age_compute(expert_new_df)
    # 写入数据库
    conf_write = {"url":'jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', "table":'expert_new'}
    dataframe_mysql_write(age_df, conf_write)
    # age_df.write.jdbc('jdbc:mysql://192.168.0.211:3306/kechuang_middata?useSSL=false', 'expert_new_1', mode='overwrite', properties={"user": "root", "password": "root"})















