####修改判定方式
# %%writefile  ./abnormal_detect_new_rule_ec2.py
###新规则验证，多进程处理
from typing import Iterable

import numpy as np
import pandas as pd
import time
import datetime
import multiprocessing

from botocore.exceptions import ClientError
from pyspark import Row

from athena_opt import submit, sql_getdata, sql_pandas_data
from boto3.session import Session
import os
import argparse
import sys
import gc
import copy
from pyspark.sql import SparkSession
from spark_utils import spark_rows_to_df


def spark_for_each_par(rows: Iterable[Row]):
    data = spark_rows_to_df(rows)
    rs = Rule_error_detection_pro(data, '2023-01-18')
    print(rs)



def query_athena_material(p_date):
    sql = f'''
    select
        distinct material
    from(
        select
            material, 
            count(1) over(partition by sn,p_date) as day_count
        from
            ods.iot_fridge
        where 
            p_date = '{p_date}'  and sn not like '%XC%'
    )
    where 
        day_count >= 100 
    '''
    tmp = sql_pandas_data(sql)
    return tmp


def query_athena_data(p_date, material):
    sql = f'''
    select 
        sn, timestamp, coldroomsensortempreture, freezeroomsensortempreture , changingsensortempreture, 
        coldroomtargettempreture, freezeroomtargettempreture, changingroomtargettempreture,
        freezeheat, colddoor, changingdoor, freezedoorb
    from
    (
        select
            sn, timestamp, coldroomsensortempreture, freezeroomsensortempreture , changingsensortempreture, 
            coldroomtargettempreture, freezeroomtargettempreture, changingroomtargettempreture,
            freezeheat, colddoor, changingdoor,  freezedoorb,
            count(1) over(partition by sn,p_date) as day_count
        from
            query.iot_fridge_query
        where 
            p_date = '{p_date}' and material = '{material}' and sn not like '%XC%'
    )
    where day_count>=100
    '''
    tmp = sql_pandas_data(sql)
    return tmp


def Data_type_delete(data):
    col_names = ['freezeheat', 'colddoor', 'changingdoor', 'freezedoorb']
    for i in col_names:
        data[i] = data.loc[:, i].apply(lambda x: 1 if x == True else 0)
    col_names = ['colddoor', 'changingdoor', 'freezedoorb']
    for i in col_names:
        data = data[data[i] == 0]
    return data


def Data_type_transform(data):
    col_names = ['coldroomsensortempreture', 'coldroomtargettempreture', 'changingsensortempreture',
                 'changingroomtargettempreture', 'freezeroomsensortempreture', 'freezeroomtargettempreture']
    data[col_names] = data[col_names].astype('float')
    return data


def Data_dayfreeze_delete(data):
    if sum(data["freezeheat"].values) > 0:
        fh_list = data[data["freezeheat"] == 1].index.to_list()
        t_index = len(data) - 1
        for i in fh_list:
            s_index = i - 1
            d_index = i + 1
            if s_index < 0:
                s_index = 0
            if d_index > t_index:
                d_index = t_index
            data.loc[s_index:d_index + 1, 'freezeheat'] = 1
        data = data[data["freezeheat"] == 0]
    return data


def Data_type_transform_string(data):
    col_names = ['sn', 'timestamp']
    data[col_names] = data[col_names].astype('string')
    return data


def Data_fill_pro(data):
    col_names = data.columns
    for i in col_names:
        data[i] = data[i].fillna(method='bfill')
    return data


def error_cnt_pro(data):
    res = {}
    num = 0
    error_dur = 12
    for i in range(len(data)):
        if data[i] == True:
            num += 1
        else:
            if num >= error_dur:  # 如果有连续的
                res[i - num + 1] = num
            num = 0
    if num >= error_dur:
        res[i - num + 1] = num
    return res


def new_find_errror_pro(data, p_date):
    data_set = []
    if len(data) > 0:
        data = Data_type_delete(data)  ##开关门数据去除
        data = Data_type_transform_string(data)
        sns = set(data['sn'].values)
        for sn in sns:
            data1 = data[data['sn'] == sn]  # 单个sn处理
            data1 = data1.drop_duplicates(subset=["timestamp"], inplace=False, keep='first')  # 时间去重
            data1 = data1.sort_values(by=["timestamp"])  # 时间重排序
            data1 = data1.reset_index(drop=True)  # 重置索引
            total_len = len(data1['timestamp'].values)
            if total_len >= 100:
                col_names = ['timestamp', 'coldroomsensortempreture', 'coldroomtargettempreture',
                             'changingsensortempreture',
                             'changingroomtargettempreture', 'freezeroomsensortempreture',
                             'freezeroomtargettempreture']  # 冷藏室传感器温度  冷藏室设定温度  变温传感器温度  变温设定温度  冷冻传感器温度  冷冻设定温度
                data1 = Data_dayfreeze_delete(data1)
                data1 = data1[col_names]
                data1 = Data_fill_pro(data1)
                data1 = Data_type_transform(data1)
                lccur = data1['coldroomsensortempreture'].mean()  # 冷藏室传感器平均温度
                lcset = data1['coldroomtargettempreture'].mean()  # 冷藏室设定平均温度
                if ((lcset > -1) and (lcset < 20)) and abs(lccur) < 50:
                    tmp = (abs(
                        data1['coldroomsensortempreture'].values - data1['coldroomtargettempreture'].values) > 5) | (
                                      data1['coldroomsensortempreture'].values < 0)
                    tmp_list = tmp.tolist()
                    lc_error = error_cnt_pro(tmp_list)
                    for k, v in lc_error.items():
                        s1 = data1.loc[k, 'timestamp']
                        s2 = data1.loc[k + v - 1, 'timestamp']
                        data_set.append([sn, p_date, s1, s2, 'cold'])
                ldcur = data1['freezeroomsensortempreture'].mean()  # 变温传感器平均温度
                ldset = data1['freezeroomtargettempreture'].mean()  # 变温设定平均温度
                if ((ldset > -50) and (ldset < 0)) and (abs(ldcur) < 50):
                    tmp = data1['freezeroomsensortempreture'].values - data1['freezeroomtargettempreture'].values > 5
                    tmp_list = tmp.tolist()
                    ld_error = error_cnt_pro(tmp_list)
                    for k, v in ld_error.items():
                        s1 = data1.loc[k, 'timestamp']
                        s2 = data1.loc[k + v - 1, 'timestamp']
                        data_set.append([sn, p_date, s1, s2, 'freeze'])
                bwset = data1['changingroomtargettempreture'].mean()  # 冷冻传感器平均温度
                bwcur = data1['changingsensortempreture'].mean()  # 冷冻设定平均温度
                if ((bwset > -50) and (bwset < 20)) and (abs(bwcur) < 50):
                    tmp = abs(
                        data1['changingsensortempreture'].values - data1['changingroomtargettempreture'].values) > 5
                    tmp_list = tmp.tolist()
                    bw_error = error_cnt_pro(tmp_list)
                    for k, v in bw_error.items():
                        s1 = data1.loc[k, 'timestamp']
                        s2 = data1.loc[k + v - 1, 'timestamp']  # 连续的时间内温度变化
                        data_set.append([sn, p_date, s1, s2, 'changing'])
            else:
                continue
    for x in locals().keys():
        del locals()[x]
    gc.collect()
    return data_set


def Rule_error_detection_pro(data, p_date):
    results = new_find_errror_pro(data, p_date)
    results = list(filter(None, results))
    return results


def upload_file_to_s3(bucket, file_name, p_date):
    region_name = "cn-northwest-1"
    FORMAL_S3_CONFIG = {'access_key': 'AKIAWTBG3E42IRH4OLX3', 'secret_key': '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'}
    session = Session(FORMAL_S3_CONFIG['access_key'], FORMAL_S3_CONFIG['secret_key'], region_name=region_name)
    s3_client = session.client('s3')
    if p_date in file_name:
        file_1 = file_name.split(".")[0]
        object_name = "rule_error_ec2/http_error/" + file_1
        try:
            s3_client.upload_file(file_name, bucket, object_name)
            print('文件推送至s3成功')
        except ClientError as e:
            print('aws_s3文件上传出错{}'.format(e))
            s3_client.close()
            return False
    else:
        return False
    s3_client.close()
    return True


def mk_date_dir(p_date):
    file_dir = "p_date=" + p_date
    if os.path.exists(file_dir):
        print("文件夹{0}存在".format(file_dir))
    else:
        os.mkdir(file_dir)
    return file_dir


def parse_arguments(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--p_date', type=str,
                        help='date of read data.',
                        default='')
    args, _ = parser.parse_known_args(argv)
    return args


def main():
    p_date = '2023-01-18'
    if len(p_date) != 10:
        print("p_date错误，请重新设置")
        return
    col_name = ['sn',
                'timestamp',
                'coldroomsensortempreture',
                'freezeroomsensortempreture',
                'changingsensortempreture',
                'coldroomtargettempreture',
                'freezeroomtargettempreture',
                'changingroomtargettempreture',
                'freezeheat',
                'colddoor',
                'changingdoor',
                'freezedoorb']
    spark = SparkSession.builder.master("local[*]").appName("testApp").getOrCreate()
    spark_df = spark.read. \
        parquet(
        "/Users/ljc/Downloads/part-00026-d3e51df3-c95f-401e-a378-7fc3733727ae.c000.snappy.parquet"). \
        select(col_name). \
        repartition("sn"). \
        sortWithinPartitions("timestamp")

    spark_df.foreachPartition(spark_for_each_par)

    # print(spark_pd)


    # df = pd.DataFrame([])
    # material = query_athena_material(p_date)
    # print("物料总数：", material.shape[0])
    # step = 4
    # pro_cnt = 0
    # for i in range(material.shape[0]):
    #     try:
    #         data = spark_pd
    #         print("当前物料: {0}, 进度{1}".format(material.iloc[i, 0], i + 1))
    #         data = data.fillna(np.nan)
    #         data = data.replace({np.nan: None})
    #         result = Rule_error_detection_pro(data, p_date)
    #         print(result)
    #         # if len(result) < 1:
    #         #     print("无故障数据")
    #         #     continue
    #         # e1 = [t for st in result for t in st]
    #         # e2 = np.array(e1).reshape(-1, 5)
    #         # res = pd.DataFrame(data=e2, columns=['sn', 'p_date', 'start_time', 'end_time', 'room'])
    #         # df = df.append(res)
    #     except Exception as e:
    #         print(e)
    #         continue
    # if len(df) > 0:
    #     file_dir = mk_date_dir(p_date)
    #     local_error = file_dir + '/rule_result_' + p_date + "-" + datetime.datetime.now().strftime('%H-%M-%S') + '.csv'
    #     df.to_csv(local_error, encoding='utf_8_sig', index=False)
    #     fn = len(set(df['sn'].values))
    #     print("检测出异常设备：", fn)
    #     bucket = "fuxiong"
    #     upload_file_to_s3(bucket, local_error, p_date)
    #     df.drop(df.index, inplace=True)
    # for x in locals().keys():
    #     del locals()[x]
    # gc.collect()


if __name__ == "__main__":
    main()
    # multiprocessing.set_start_method('spawn')
    # start_time = time.time()  # 程序开始时间
    # main(parse_arguments(sys.argv[1:]))
    # time.sleep(10)
    # end_time = time.time()  # 程序结束时间
    # run_time = end_time - start_time  # 程序的运行时间，单位为秒
    # print("处理时间:%*.3fs" % (3, run_time))
