####修改判定方式
# %%writefile  ./abnormal_detect_new_rule_ec2.py
###新规则验证，多进程处理
from typing import Iterable

import numpy as np
import pandas as pd
import time
import datetime
import multiprocessing

from botocore.exceptions import ClientError
from pyspark import Row
from pyspark.sql import SparkSession

from athena_opt import submit, sql_getdata, sql_pandas_data
from boto3.session import Session
import os
import argparse
import sys
import gc
import copy
from spark_utils import spark_rows_to_df


def spark_for_each_par(rows: Iterable[Row]):
    data = spark_rows_to_df(rows)
    data = data.fillna(np.nan)
    data = data.replace({np.nan: None})
    data = data.sort_values(by=["sn"])  # sn重排序
    data = data.reset_index(drop=True)  # 重置索引
    result = new_find_errror_pro(data, '2023-01-18')
    if len(result) < 1:
        print("无故障数据")
    print(result)



def query_athena_material(p_date):
    sql = f'''
    select
        distinct material
    from(
        select
            material, 
            count(1) over(partition by sn,p_date) as day_count
        from
            ods.iot_fridge
        where 
            p_date = '{p_date}'  and sn not like '%XC%'
    )
    where 
        day_count >= 100 
    '''
    tmp = sql_pandas_data(sql)
    return tmp


def query_athena_data(p_date, material):
    sql = f'''
    select 
        sn, timestamp, coldroomsensortempreture, freezeroomsensortempreture , changingsensortempreture, 
        coldroomtargettempreture, freezeroomtargettempreture, changingroomtargettempreture,
        freezeheat, colddoor, changingdoor, freezedoorb
    from
    (
        select
            sn, timestamp, coldroomsensortempreture, freezeroomsensortempreture , changingsensortempreture, 
            coldroomtargettempreture, freezeroomtargettempreture, changingroomtargettempreture,
            freezeheat, colddoor, changingdoor,  freezedoorb,
            count(1) over(partition by sn,p_date) as day_count
        from
            query.iot_fridge_query
        where 
            p_date = '{p_date}' and material = '{material}' and sn not like '%XC%'
    )
    where day_count>=100
    '''
    tmp = sql_pandas_data(sql)
    return tmp


def Data_type_delete(data):
    col_names = ['freezeheat', 'colddoor', 'changingdoor', 'freezedoorb']
    for i in col_names:
        data[i] = data.loc[:, i].apply(lambda x: 1 if x == True else 0)
    col_names = ['colddoor', 'changingdoor', 'freezedoorb']
    for i in col_names:
        data = data[data[i] == 0]
    return data


def Data_type_transform(data):
    col_names = ['coldroomsensortempreture', 'coldroomtargettempreture', 'changingsensortempreture',
                 'changingroomtargettempreture', 'freezeroomsensortempreture', 'freezeroomtargettempreture']
    data[col_names] = data[col_names].astype('float', errors='ignore')
    return data


def Data_dayfreeze_delete(data):
    if sum(data["freezeheat"].values) > 0:
        fh_list = data[data["freezeheat"] == 1].index.to_list()
        t_index = len(data) - 1
        for i in fh_list:
            s_index = i - 1
            d_index = i + 1
            if s_index < 0:
                s_index = 0
            if d_index > t_index:
                d_index = t_index
            data.loc[s_index:d_index + 1, 'freezeheat'] = 1
        data = data[data["freezeheat"] == 0]
    return data


def Data_type_transform_string(data):
    col_names = ['sn', 'timestamp']
    data[col_names] = data[col_names].astype('string', errors='ignore')
    return data


def Data_fill_pro(data):
    col_names = data.columns
    for i in col_names:
        data[i] = data[i].fillna(method='bfill')
    return data


def error_cnt_pro(data):
    res = {}
    num = 0
    error_dur = 12
    for i in range(len(data)):
        if data[i] == True:
            num += 1
        else:
            if num >= error_dur:  # 如果有连续的
                res[i - num + 1] = num
            num = 0
    if num >= error_dur:
        res[i - num + 1] = num
    return res


def new_find_errror_pro(data, p_date):
    data_set = []
    if len(data) > 0:
        data = Data_type_delete(data)  ##开关门数据去除
        data = Data_type_transform_string(data)
        sns = set(data['sn'].values)
        for sn in sns:
            # time.sleep(1)
            data1 = data[data['sn'] == sn]  # 单个sn处理
            data1 = data1.drop_duplicates(subset=["timestamp"], inplace=False, keep='first')  # 时间去重
            data1 = data1.sort_values(by=["timestamp"])  # 时间重排序
            data1 = data1.reset_index(drop=True)  # 重置索引
            total_len = len(data1['timestamp'].values)
            try:
                if total_len >= 100:
                    col_names = ['timestamp', 'coldroomsensortempreture', 'coldroomtargettempreture',
                                 'changingsensortempreture',
                                 'changingroomtargettempreture', 'freezeroomsensortempreture',
                                 'freezeroomtargettempreture']  # 冷藏室传感器温度  冷藏室设定温度  变温传感器温度  变温设定温度  冷冻传感器温度  冷冻设定温度
                    data1 = Data_dayfreeze_delete(data1)
                    data1 = data1[col_names]
                    data1 = Data_fill_pro(data1)
                    data1 = Data_type_transform(data1)
                    data1 = data1.reset_index(drop=True)  # 重置索引
                    lccur = data1['coldroomsensortempreture'].mean()  # 冷藏室传感器平均温度
                    lcset = data1['coldroomtargettempreture'].mean()  # 冷藏室设定平均温度
                    if ((lcset > -1) and (lcset < 20)) and abs(lccur) < 50:
                        tmp = (abs(
                            data1['coldroomsensortempreture'].values - data1['coldroomtargettempreture'].values) > 5) | (
                                      data1['coldroomsensortempreture'].values < 0)
                        tmp_list = tmp.tolist()
                        lc_error = error_cnt_pro(tmp_list)
                        for k, v in lc_error.items():
                            s1 = data1.loc[k, 'timestamp']
                            s2 = data1.loc[k + v - 1, 'timestamp']
                            data_set.append([sn, p_date, s1, s2, 'cold'])
                    ldcur = data1['freezeroomsensortempreture'].mean()  # 变温传感器平均温度
                    ldset = data1['freezeroomtargettempreture'].mean()  # 变温设定平均温度
                    if ((ldset > -50) and (ldset < 0)) and (abs(ldcur) < 50):
                        tmp = data1['freezeroomsensortempreture'].values - data1['freezeroomtargettempreture'].values > 5
                        tmp_list = tmp.tolist()
                        ld_error = error_cnt_pro(tmp_list)
                        for k, v in ld_error.items():
                            s1 = data1.loc[k, 'timestamp']
                            s2 = data1.loc[k + v - 1, 'timestamp']
                            data_set.append([sn, p_date, s1, s2, 'freeze'])
                    bwset = data1['changingroomtargettempreture'].mean()  # 冷冻传感器平均温度
                    bwcur = data1['changingsensortempreture'].mean()  # 冷冻设定平均温度
                    if ((bwset > -50) and (bwset < 20)) and (abs(bwcur) < 50):
                        tmp = abs(
                            data1['changingsensortempreture'].values - data1['changingroomtargettempreture'].values) > 5
                        tmp_list = tmp.tolist()
                        bw_error = error_cnt_pro(tmp_list)
                        for k, v in bw_error.items():
                            s1 = data1.loc[k, 'timestamp']
                            s2 = data1.loc[k + v - 1, 'timestamp']  # 连续的时间内温度变化
                            data_set.append([sn, p_date, s1, s2, 'changing'])
            except Exception as e:
                print(e)
                pd.set_option('display.max_columns', None)  # 显示所有列
                pd.set_option('display.max_rows', None)  # 显示所有行
                pd.set_option('display.width', None)  # 自动调整列宽
                pd.set_option('display.max_colwidth', None)  # 显示所有单元格的内容
                print(data1)

            else:
                continue
    # for x in locals().keys():
    #     del locals()[x]
    # gc.collect()
    return data_set


def Rule_error_detection_pro(data, p_date):
    results = []
    kernel_num = 4
    pool = multiprocessing.Pool(processes=kernel_num)  # 多进程

    def get_result(result):
        results.append(result)

    chunk = len(data) // kernel_num
    tmp_data = {}
    for start in range(0, len(data), chunk):
        stop = start + chunk
        if stop > len(data):
            stop = len(data)
        tmp_data[start] = copy.deepcopy(data[start:stop])
    for k, v in tmp_data.items():
        pool.apply_async(new_find_errror_pro, args=(v, p_date,), callback=get_result)
    pool.close()
    pool.join()
    results = list(filter(None, results))
    data.drop(data.index, inplace=True)
    tmp_data.clear()
    del tmp_data
    del data
    gc.collect()
    return results


def upload_file_to_s3(bucket, file_name, p_date):
    region_name = "cn-northwest-1"
    FORMAL_S3_CONFIG = {'access_key': 'AKIAWTBG3E42IRH4OLX3', 'secret_key': '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'}
    session = Session(FORMAL_S3_CONFIG['access_key'], FORMAL_S3_CONFIG['secret_key'], region_name=region_name)
    s3_client = session.client('s3')
    if p_date in file_name:
        file_1 = file_name.split(".")[0]
        object_name = "rule_error_ec2/http_error/" + file_1
        try:
            s3_client.upload_file(file_name, bucket, object_name)
            print('文件推送至s3成功')
        except ClientError as e:
            print('aws_s3文件上传出错{}'.format(e))
            s3_client.close()
            return False
    else:
        return False
    s3_client.close()
    return True


def mk_date_dir(p_date):
    file_dir = "p_date=" + p_date
    if os.path.exists(file_dir):
        print("文件夹{0}存在".format(file_dir))
    else:
        os.mkdir(file_dir)
    return file_dir


def parse_arguments(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--p_date', type=str,
                        help='date of read data.',
                        default='')
    args, _ = parser.parse_known_args(argv)
    return args


def main():
    p_date = '2023-01-18'
    if len(p_date) != 10:
        print("p_date错误，请重新设置")
        return
    col_name = ['sn',
                'timestamp',
                'coldroomsensortempreture',
                'freezeroomsensortempreture',
                'changingsensortempreture',
                'coldroomtargettempreture',
                'freezeroomtargettempreture',
                'changingroomtargettempreture',
                'freezeheat',
                'colddoor',
                'changingdoor',
                'freezedoorb']
    spark = SparkSession.builder.master("local[1]").appName("testApp").getOrCreate()
    spark_df = spark.read.parquet(
        "/Users/ljc/Downloads/part-00026-d3e51df3-c95f-401e-a378-7fc3733727ae.c000.snappy.parquet"). \
        select(col_name). \
        repartition(1). \
        cache()

    spark_df.createOrReplaceTempView("t1")


    print("count:",spark_df.count())
    spark.sql("select count(distinct sn) from t1").show()
    # print("distinct:",spark_df.groupby("sn").max().collect())


    spark_df.foreachPartition(spark_for_each_par)


if __name__ == "__main__":
    main()
