# -*- coding: UTF-8 -*-
import os
import time
import findspark
findspark.init()
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
import pyspark.sql.functions as F
os.environ['JAVA_HOME'] = '/usr/java/jdk1.8.0_181-cloudera'
spark_session = SparkSession.builder.master("local[*]").appName("hive_test_1") \
    .config("hive.metastore.uris", "thrift://127.0.0.1:9083") \
    .enableHiveSupport().getOrCreate()
import time

def create_hive(db,table,dataframe):
    dataframe.write.format("hive").mode("overwrite").saveAsTable('{}.{}'.format(db, table))

def get_detail_table_sql(limit_n=10000):
    sql = 'select  t_delivery_order_operation.created_at,t_delivery_order.id, t_delivery_order_operation.user_id , t_delivery_order.jd_id,  resume_id ,resume_name ,t_delivery_order.sub_stage,before_sub_stage From  t_delivery_order_operation  left join t_delivery_order on t_delivery_order.id  == t_delivery_order_operation.order_id  where t_delivery_order.sub_stage ==  401 and t_delivery_order_operation.sub_stage == 401 limit {}'.format(limit_n)
    sql = sql.replace(' t_delivery_order_operation ',' rcn_prod.t_delivery_order_operation ')
    sql = sql.replace(' t_delivery_order ',' rcn_prod.t_delivery_order ')
    return sql

def get_detail_hive(n):
    sql = get_detail_table_sql(n)

    df = spark_session.sql(sql)

    return df


def get_self_join_sql(table=None):
    if not table:
        sql = 'select after.id  as next_op_id , after.resume_id , after.jd_id as jd_next,after.user_id as next_user, after.created_at  as next_time ,after.sub_stage as next_stage,  my_test.history_order_operation.id  as prev_op_id ,   my_test.history_order_operation.jd_id as jd_prev, my_test.history_order_operation.created_at as prev_time , my_test.history_order_operation.user_id as prev_user  , my_test.history_order_operation.sub_stage as prev_stage  from  my_test.history_order_operation as  after full join   my_test.history_order_operation  on after.resume_id =my_test.history_order_operation.resume_id where my_test.history_order_operation.created_at <  after.created_at   '
        return sql


    sql = 'select after.id  as next_op_id , after.resume_id , after.jd_id as jd_next,after.user_id as next_user, after.created_at  as next_time ,after.sub_stage as next_stage,  my_test.history_order_operation.id  as prev_op_id ,   my_test.history_order_operation.jd_id as jd_prev, my_test.history_order_operation.created_at as prev_time , my_test.history_order_operation.user_id as prev_user  , my_test.history_order_operation.sub_stage as prev_stage  from  {} as  after full join   my_test.history_order_operation  on after.resume_id =my_test.history_order_operation.resume_id where my_test.history_order_operation.created_at <  after.created_at   '
    return sql.format(table)


def get_self_join_df(table,output_tmp_table='self_join'):
    """
     自连接明细表
    :param table:  db.table or tmpleate table
    :return:
    """


    df2 = spark_session.sql(get_self_join_sql(table))
    df2.registerTempTable(output_tmp_table)
    return df2


def get_last_prev_operation_from_self_join(df2):
    """
    去掉多余的 prev 记录
    :param df2:
    :return:
    """
    # last = df2.groupby('next_op_id','resume_id','jd_next','next_user','next_time','next_stage').agg({'prev_op_id':'last','jd_prev':'last','prev_user':'last','prev_time':'last'})
    last = df2.groupby('next_op_id', 'resume_id', 'jd_next', 'next_user', 'next_time', 'next_stage') \
        .agg(F.last('prev_op_id').alias('prev_op_id'),
             F.last('jd_prev').alias('jd_prev'),
             F.last('prev_user').alias('prev_user'), F.last('prev_time').alias('prev_time'))
    return last


def get_operation_by_date(last, date):
    # 2022-04-01
    last_today = last.filter(last['next_time'].like('%{}%'.format(date)))
    return last_today

class timer:
    def __init__(self):
        self.start = time.time()
        self.current  = time.time()

    def get_time_from_pass(self,reset):
        a =  int(time.time() - self.current)
        if reset:
            self.current = time.time()
        return a
myt  = timer()
detail_df = get_detail_hive(100000)
print({'detail_hive':detail_df.count()})
print('spend',myt.get_time_from_pass(True))
detail_df.registerTempTable('temp_detail')

self_join_df = get_self_join_df('temp_detail')
print({'self_join_df':self_join_df.count()})
print('spend',myt.get_time_from_pass(True))
prev = get_last_prev_operation_from_self_join(self_join_df)
print({'prev':prev.count()})
print('spend',myt.get_time_from_pass(True))
date = '2022-04-01'
last_today = get_operation_by_date(prev, date)
print({'last_today':last_today.count()})
print('spend',myt.get_time_from_pass(True))
last_today.toPandas().to_csv(f'excel/{date}.csv')


# 2s 1w rows
#df.toPandas().to_csv('excel/history_order_operation.csv')

# hive_database = 'my_test'
# hive_table1 = 'history_order_operation'
# hive_table2 = 'history_order_operationtoday'
# df.show(10)
# create_hive(hive_database,hive_table1,df)

#/home/ai/anaconda3/envs/bigdata/bin/python3.7 get_pipeline.py
