import requests
import os
import datetime
import time
import numpy as np
import pandas as pd
from config import config_scene
import warnings
warnings.filterwarnings("ignore")
config01 = config_scene()

###########################################################################


if __name__ == '__main__':

    url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=1002b68d-0e1f-45cc-97e5-b985f08af747"
    headers = {"Content-Type":"application/json"}
    proxies = {'https': 'http://10.210.14.9:3128'}

    try:
        start_t = time.time()

        date_i01 = int(datetime.datetime.today().strftime('%Y%m%d'))
        custflow_result01 = pd.read_csv('custflow_result_fina.csv')
        date_i02 = custflow_result01.sdt.unique()[0]

        if date_i02 == date_i01:

            custflow_result01 = custflow_result01.astype(str)
            print('custflow_result_fina:', custflow_result01.shape,custflow_result01.columns)

            from pyspark import SQLContext, HiveContext
            from pyspark import SparkContext, SparkConf
            from pyspark.sql import SparkSession

            app_name = "Rick_test"
            conf = SparkConf().setAppName(app_name).setMaster("yarn")
            conf.set("spark.yarn.queue", "root.yhtech.testrun")
            conf.set("spark.driver.memory", '4G')  # 一般这个 2G 就够了 不需要调整
            conf.set("spark.executor.memory", '8G')  # 2个G的去调
            conf.set("deploy-mode", 'cluster')
            conf.set("spark.dynamicAllocation.enabled", True)  # 打开动态   也可以直接设置False 直接关闭 动态分配
            conf.set("spark.dynamicAllocation.minExecutors", 10)  # 最小
            conf.set("spark.dynamicAllocation.maxExecutors", 15)  # 最大
            conf.set("spark.executor.cores", 2)
            conf.set("spark.sql.crossJoin.enabled", True)
            conf.set("spark.rpc.message.maxSize", 1024)
            conf.set("spark.driver.maxResultSize", "4g")
            conf.set("spark.port.maxRetries", 128)

            sc = SparkContext(conf=conf)
            hc = HiveContext(sc)
            spark = SparkSession(sc)
            print('spark ok!')

            meta_lst = [tuple(custflow_result01.iloc[x].tolist()) for x in range(len(custflow_result01))]
            spark_df = spark.createDataFrame(meta_lst, ["shop_id", "date_id", "start_time", "custflow", "sdt"])
            spark_df.write.mode("append").insertInto("data_mining.data_mining_order_forecast_temp")

        msg =  str(config01.start_day) + ' data_tospark temp task succeed and runtime:' + str(round((time.time()-start_t)/60,2))
        print(msg)
        r=requests.post(url, headers= headers,json={"msgtype":"text","text":{"content":msg}},proxies=proxies)

    except Exception as e:
        # 处理其他异常
        msg = f"data_tospark temp failed: {e}"
        print(msg)
        r=requests.post(url, headers= headers,json={"msgtype":"text","text":{"content":msg}},proxies=proxies)









