#coding:utf-8
import sys
#import datetime
import os
import time
from pyspark.sql import HiveContext
from pyspark.sql import SparkSession
import time
import subprocess
from hdfs import InsecureClient
import math
import json
from utils.hdfs_utils import *
import pyspark.sql.functions as fn
from datetime import datetime, date, timedelta

target_table_name = None
hdfsOperator = None
nnList = []
flumeDataPath=None
hiveTempPath=None
partitionParam=None
currProtocol=None
filename=None
limitNum=None
database=None
handle_error_date=None
def main():
    initArgs()
    start_transform()

#初始化所需要的参数
def initArgs():
    global nnList
    global target_table_name
    global hdfsOperator
    global flumeDataPath
    global hiveTempPath
    global partitionParam
    global currProtocol
    global filename
    global limitNum
    global database
    global handle_error_date
    if(len(sys.argv) < 11):
        print("Parameters are empty or invalid.")
        exit(0)
    nnList=sys.argv[1].split(",")
    target_table_name_temp=sys.argv[2]
    hdfsOperator=sys.argv[3]
    flumeDataPath_temp=sys.argv[4]
    hiveTempPath_temp=sys.argv[5]
    partitionParam=sys.argv[6]
    currProtocol=sys.argv[7]
    filename=sys.argv[8]
    limitNum=int(sys.argv[9])
    database=sys.argv[10]
    handle_error_date=sys.argv[11]
    target_table_name=target_table_name_temp.format(database)
    flumeDataPath=flumeDataPath_temp.format(database)
    hiveTempPath=hiveTempPath_temp.format(database)

def start_transform():
    sc = SparkSession.builder.appName(target_table_name).enableHiveSupport().getOrCreate()
    os.environ["HADOOP_USER_NAME"] = hdfsOperator
    client = getHDFSClient(nnList, hdfsOperator)
    moveFiles(client, hiveTempPath, flumeDataPath, currProtocol, limitNum)
    repartition = getRepartitionByFileNum(client, hiveTempPath, int(float(partitionParam)))
    print("partition num is " + str(repartition))
    time_now = time.strftime("%Y-%m-%d %H:%M:%S")
    sql=readFile('hql/'+ database +'/' + filename).format(time_now,database)
    print(sql)
    yesterday = date.today() + timedelta(days = -5)
    start_day=int(yesterday.strftime("%Y%m%d"))
    next_day = date.today() + timedelta(days = 1)
    end_day=int(next_day.strftime("%Y%m%d"))
    now_day=int(time.strftime("%Y%m%d"))
    print("condition start_day={0},end_day={1}".format(start_day, end_day))
    df=sc.sql(sql)
    temp_table_name = "spark_temp_{0}".format(currProtocol)
    df.createOrReplaceTempView(temp_table_name)
    correctSql="select * from {0}".format(temp_table_name)
    if (True if handle_error_date.lower() == 'true' else False):
        correctSql="select * from {0} where part_time >= {1} and part_time <= {2}".format(temp_table_name, start_day, end_day)
    correctDF=sc.sql(correctSql)
    print("correctSql:"+correctSql)
    print("database name is:"+target_table_name)
    correctDF.repartition(repartition).write.insertInto(target_table_name)

    if (True if handle_error_date.lower() == 'true' else False):
        print("handle error date data start ...")
        errorSql="select * from {0} where part_time < {1} or part_time > {2}".format(temp_table_name, start_day, end_day)
        errorDF=sc.sql(errorSql)
        print(now_day)
        errorDF=errorDF.withColumn("part_time", fn.lit(now_day))
        errorDF.repartition(repartition).write.insertInto(target_table_name + "_errordate")
        print("handle error date data end ...")

    print("insert successed, clean tempfolder data.")
    backupDataPath="/user/data/{0}/backup".format(database)
    backupData(client, hiveTempPath, backupDataPath, currProtocol)
    #deleteFiles(client, hiveTempPath)

if __name__ == "__main__":
    main()
