# -*- coding: UTF-8 -*-
# -*- coding: UTF-8 -*-
#t_jd_recommend_resume
import os
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
import pyspark.sql.functions as F
os.environ['JAVA_HOME'] = '/usr/java/jdk1.8.0_181-cloudera'
spark_session = SparkSession.builder.master("local[*]").appName("hive_test_1") \
    .config("hive.metastore.uris", "thrift://127.0.0.1:9083") \
    .enableHiveSupport().getOrCreate()

def get_re_push():

    sql = "select id,jd_id,resume_id,jd_name,company_name,resume_name,current_company,created_at,updated_at,project_id from rcn_prod.t_delivery_order  "
    print(sql)
    # we got a dataframe like pandas.Dataframe
    t_delivery_order = spark_session.sql(sql)


    sql = "select  *  from rcn_prod.t_delivery_order_operation where sub_stage == 401 and created_at > \"2022-01-01 00:00:00\"   "
    print(sql)
    # we got a dataframe like pandas.Dataframe
    order_operation = spark_session.sql(sql)
    order_operation = order_operation.withColumnRenamed('id','opeartion_id')
    #order_operation.toPandas().to_csv('excel/repush_t_delivery_order_operation.csv')
    relation = order_operation['order_id'] == t_delivery_order['id']
    print({'order_operation':order_operation.count()})


    df = order_operation
    expr = [F.countDistinct(c).alias(c) for c in df.columns]
    #we apply those functions
    countdf =  df.select(*expr)
    #this df has just one row
    countdf.show()
    print({'t_delivery_order':t_delivery_order.count()})
    df = t_delivery_order
    expr = [F.countDistinct(c).alias(c) for c in df.columns]
    #we apply those functions
    countdf =  df.select(*expr)
    #this df has just one row
    countdf.show()
    names = []
    # 修改重名 的列
    # 否则join 有问题
    for c1 in order_operation.columns:
        for c2 in t_delivery_order.columns:
            if c2 == 'id':
                continue
            if c2 == c1:

                names.append((c2))
    for n  in names:
        t_delivery_order = t_delivery_order.withColumnRenamed(n,'delivery_order_'+n)

    join_res = order_operation.join(t_delivery_order,relation)
    print({'join_res':join_res.count()})
    df = join_res
    # expr = [F.countDistinct(c).alias(c) for c in df.columns]
    # #we apply those functions
    # countdf =  df.select(*expr)
    # #this df has just one row
    # countdf.show()

    # for n in countdf.columns:
    #     if 'delivery_order_'+n in countdf.columns:
    #         countdf.select(n).show()
    #         countdf.select('delivery_order_'+n).show()
    join_res.toPandas().to_csv('excel/repush.csv')
    g_sum = join_res.select('resume_id','jd_id').distinct().groupby('resume_id').agg(F.collect_list(df.jd_id).alias('jds')) 
   # g_sum.show()
    g_sum.toPandas().to_csv('excel/resume2jd.csv')

get_re_push()
def get_user_jd_resume():

    sql = "select id,jd_id,resume_id,jd_name,company_name,resume_name,current_company,created_at,updated_at,project_id from rcn_prod.t_delivery_order  "
    print(sql)
    # we got a dataframe like pandas.Dataframe
    t_delivery_order = spark_session.sql(sql)

    """
    CREATE TABLE `t_delivery_order_operation` (
      `id` varchar(32) NOT NULL,
      `order_id` varchar(32) NOT NULL COMMENT '订单ID',
      `project_id` varchar(32) DEFAULT NULL COMMENT '项目ID',
      `project_type` int(11) DEFAULT NULL COMMENT '项目类型',
      `tenant_id` varchar(32) NOT NULL COMMENT '操作人租户ID',
      `user_id` varchar(32) NOT NULL COMMENT '操作人用户ID',
      `user_name` varchar(32) NOT NULL COMMENT '操作人名称',
      `operation_type` int(11) NOT NULL COMMENT '操作类型',
      `operation_param` longtext COMMENT '操作参数',
      `before_main_stage` int(11) NOT NULL COMMENT '操作前主环节',
      `before_sub_stage` int(11) NOT NULL COMMENT '操作前子环节',
      `before_main_status` int(11) NOT NULL COMMENT '操作前主状态',
      `before_sub_status` int(11) NOT NULL COMMENT '操作前子状态',
      `main_stage` int(11) NOT NULL COMMENT '主环节',
      `sub_stage` int(11) NOT NULL COMMENT '子环节',
      `main_status` int(11) NOT NULL COMMENT '主状态',
      `sub_status` int(11) NOT NULL COMMENT '子状态',
      `remark` longtext COMMENT '备注',
      `created_by` varchar(32) NOT NULL COMMENT '创建人',
      `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
      `jd_id` varchar(32) DEFAULT NULL COMMENT '职位ID',
      `jd_code` varchar(32) DEFAULT NULL COMMENT '职位编码',
      `interview_round` int(11) DEFAULT NULL,
      PRIMARY KEY (`id`),
      KEY `idx_order_id` (`order_id`) USING BTREE,
      KEY `idx_updated_at` (`created_at`) USING BTREE,
      KEY `idx_main_status` (`main_status`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='订单操作表';
    """

    sql = "select  *  from rcn_prod.t_delivery_order_operation where created_at > \"2022-01-01 00:00:00\"   "
    print(sql)
    # we got a dataframe like pandas.Dataframe
    order_operation = spark_session.sql(sql)
    order_operation = order_operation.withColumnRenamed('id','opeartion_id')
    relation = order_operation['order_id'] == t_delivery_order['id']
    print({'order_operation':order_operation.count()})


    df = order_operation
    expr = [F.countDistinct(c).alias(c) for c in df.columns]
    #we apply those functions
    countdf =  df.select(*expr)
    #this df has just one row
    countdf.show()
    print({'t_delivery_order':t_delivery_order.count()})
    df = t_delivery_order
    expr = [F.countDistinct(c).alias(c) for c in df.columns]
    #we apply those functions
    countdf =  df.select(*expr)
    #this df has just one row
    countdf.show()
    names = []
    # 修改重名 的列
    # 否则join 有问题
    for c1 in order_operation.columns:
        for c2 in t_delivery_order.columns:
            if c2 == 'id':
                continue
            if c2 == c1:

                names.append((c2))
    for n  in names:
        t_delivery_order = t_delivery_order.withColumnRenamed(n,'delivery_order_'+n)

    join_res = order_operation.join(t_delivery_order,relation)
    print({'join_res':join_res.count()})
    df = join_res
    expr = [F.countDistinct(c).alias(c) for c in df.columns]
    #we apply those functions
    countdf =  df.select(*expr)
    #this df has just one row
    countdf.show()

    for n in countdf.columns:
        if 'delivery_order_'+n in countdf.columns:
            countdf.select(n).show()
            countdf.select('delivery_order_'+n).show()

    join_res.toPandas().to_csv('excel/data_expression.csv')
    countdf.toPandas().to_csv('excel/unique_data.csv')

def get_offer_data():
    # sql = "select  *  from rcn_prod.t_delivery_order_operation where sub_stage==404 and created_at > \"2021-10-01 00:00:00\"   "
    # offer_order = spark_session.sql(sql)
    # sql2 = 'order'
    # sql2 = "select id,jd_id,resume_id,jd_name,company_name,resume_name,current_company,created_at,updated_at,project_id from rcn_prod.t_delivery_order  "
    # print(sql)
    #
    # delivery = spark_session.sql(sql2)
    # print('before',delivery.count())
    # delivery2 = delivery.filter(delivery['id'].isin(list(offer_order.select('order_id'))))
    # print('after', delivery2.count())
    # delivery2.toPandas().to_csv('excel/offer.csv')

    sql2 = "select id,jd_id,resume_id,jd_name,company_name,resume_name,current_company,created_at,updated_at,project_id from rcn_prod.t_delivery_order "+ \
           "where id in (select  order_id  from rcn_prod.t_delivery_order_operation where sub_stage==404 and created_at > \"2021-10-01 00:00:00\"  ) "
    print(sql2)
    delivery = spark_session.sql(sql2)
    delivery.toPandas().to_csv('excel/offer.csv')

#get_offer_data()
