from flask import *
import pandas as pd
import requests
import hashlib
import re
import numpy as np
from datetime import datetime, timedelta
import arrow
import pymysql
from jobs.sql_mehods import *
import traceback
from sklearn.linear_model import LinearRegression
import random


class Source(object):
    def __init__(self):
        # self.server = 'http://192.168.253.198:9005/'
        # self.username = "test"
        # self.password = "123456"
        self.server = 'http://10.20.7.227:8001/'
        self.username = "ysrd2"
        self.password = "ysrd2"

    def login(self):
        md5 = hashlib.md5()
        md5.update(self.password.encode(encoding='utf-8'))
        password = md5.hexdigest()

        payload = {
            "version": None,
            "data": {
                "username": self.username,
                "password": password
            }
        }
        res = requests.post(url=self.server + 'north/login', json=payload).json()
        if res['error_code'] == 0:
            data = res['error_msg']
            token = re.findall('token.*', data)
            token = token[0].replace('token：', '')
        else:
            raise Exception('login error_code != 0')
        return token

    def logout(self, token):
        payload = {
            "version": None,
            "data": None
        }
        headers = {
            'token': token
        }
        res = requests.post(url=self.server + 'north/logout', data=payload, headers=headers)
        print('登出成功')

    def get_config(self, token):
        """获取配置"""
        payload = {
            "version": None,
            "data": {
                'version': None
            }
        }
        headers = {
            'token': token
        }
        res = requests.post(url=self.server + 'north/config_get', headers=headers, json=payload).json()
        return res

    def get_data(self, token, dt, nodes):
        time_stamp = dt.timestamp()
        payloads = {
            "version": None,
            'data': {
                'rec_time': int(time_stamp),
                'device_guids': nodes
            }
        }
        headers = {
            'token': token
        }
        res = requests.post(url=self.server + 'north/offline_data_get_device', json=payloads, headers=headers,
                            timeout=120).json()
        if res.get('error_code') == 0:
            return res.get('data')
        else:
            return None

    # 程序运行主函数
    def do_get(self, start, end):
        date_list = pd.date_range(
            start=start,
            end=end,
            freq='5min'
        ).to_pydatetime()

        message = []
        for dt in date_list:
            try:
                token = self.login()
                node_data = self.get_config(token)
                nodes = self.parse_nodes(node_data)
                data = self.get_data(token, dt, nodes)
                # write data to db
                self.logout(token)
                message.append({"datetime": dt, 'result': True})
            except Exception as e:
                message.append({"datetime": dt, 'result': False, 'error': traceback.format_exc()})
                continue

    def parse_nodes(self, data):
        nodes = []
        data = data['data']['nodes']
        for i in data:
            for j in i['nodes']:
                nodes.append(j['guid'])
        return nodes


elec5min = Blueprint('elec5min', __name__)


# 对具有负值的设备进行信息处理  正则化处理  最后给出所需排查的清单
@elec5min.route("/get_abnoaml_enterprise", methods=['GET', 'POST'])
def get_abnoaml_enterprise():
    db = MySQLHelper(host='mmservice-05.mysql.hotgrid.cn', port=3306,
                     user='electricity_api_service', password='GJlfh7&#jg',
                     db='electricity_data', charset='utf8')
    sql = f"""select abnormal_msg from abnormal_device_table"""
    data = db.execute_charts(sql)
    list_guid = []
    for k in data['abnormal_msg']:
        pattern = r"guid: (\w+-\w+-\w+-\w+-\w+)"
        matches = re.findall(pattern, k)
        for j in matches:
            list_guid.append(j)

    from collections import Counter

    counter = Counter(list_guid)

    duplicates = {item for item, count in counter.items() if count > 1}

    duplicates_str = ', '.join("'{}'".format(item) for item in duplicates)

    sql_abnornal_entprise = f"""
SELECT elec_enterprise_info.id, elec_enterprise_info.ent_name ,
elec_dev_info.id as  dev_id,elec_dev_info.dev_name,elec_dev_info.dev_code,'异常' as  status
FROM elec_enterprise_info 
RIGHT JOIN elec_dev_info ON elec_enterprise_info.id = elec_dev_info.ent_id 
RIGHT JOIN elecdata_basic_info ON elec_dev_info.id = elecdata_basic_info.devid
WHERE elecdata_basic_info.guid in ({duplicates_str});"""
    result = db.execute_select(sql_abnornal_entprise)
    res = pd.DataFrame(result)
    res['count'] = 1
    result_sum = res.groupby('ent_name')['count'].sum()
    res_sum = result_sum.reset_index()

    return jsonify({"data": result, "length": len(result)})


# 异常数据监测
@elec5min.route("/unnomaldev", methods=['GET', 'POST'])
def unnomaldev():
    db = MySQLHelper(host='mmservice-05.mysql.hotgrid.cn', port=3306,
                     user='electricity_api_service', password='GJlfh7&#jg',
                     db='electricity_data', charset='utf8')
    sql = f"""select * from abnormal_device_table"""
    data = db.execute_charts(sql)
    return jsonify({"data": data})


# 本地五分钟数据流水监测
@elec5min.route("/5minsql", methods=['GET', 'POST'])
def get5min():
    #     连接数据库与  拿取数据
    db = MySQLHelper(host='mmservice-05.mysql.hotgrid.cn', port=3306,
                     user='electricity_api_service', password='GJlfh7&#jg',
                     db='electricity_data', charset='utf8')
    dev_list = request.args.get("devlist")
    start_time = request.args.get("start_time")
    end_time = request.args.get("end_time")
    data_anonyed = request.args.get("data_anonyed")
    dev_list = dev_list.split(',')
    # 获取所查询的所在行业
    industry_type_id_sql = f"""select industry_type_id from elec_enterprise_info where id = 
    (select ent_id from elec_dev_info where dev_code in ({"'"+dev_list[0]+"'"}))"""
    industry_type_id = db.execute_select(industry_type_id_sql)[0]['industry_type_id']
    dev_list = '"' + '","'.join(dev_list) + '"'
    # 从应有的设备list中选出未工作的设备
    all_work_guid_sql = f"""select guid from elecdata_basic_info
        where data_type_id = 8
and dev_code in ({dev_list})"""
    all_work_guid = db.execute_charts(all_work_guid_sql)
    # 只能选择对应的日期
    from datetime import datetime
    data_object = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
    data_year = data_object.year
    data_month = "{:02d}".format(data_object.month)
    sql = f"""SELECT 
    t1.guid, 
    t1.data_time, 
    t2.data_time AS next_data_time, 
    round((t2.value - t1.value),2) AS value 
FROM 
    elecdata_data_5m_{data_year}{data_month}  AS t1
JOIN 
    elecdata_data_5m_{data_year}{data_month}  AS t2 
ON 
    t1.guid = t2.guid 
    AND DATE_ADD(t1.data_time, INTERVAL 5 MINUTE) = t2.data_time
WHERE 
    t1.guid in (
        select guid from elecdata_basic_info
        where data_type_id = 8
and dev_code in ({dev_list}))
 and t1.data_time >='{start_time}'
and t1.data_time <='{end_time}' 
"""
    print(sql,"aasd")
    # and t1.project_id = 225
    # 执行 SELECT 查询语句
    data = db.execute_select(sql)
    try:
        df_data = pd.DataFrame(data)
        df_data['data_time'] = pd.to_datetime(df_data['data_time'])
    except Exception as e:
        # 假数据
        df_data = {'guid': ['aaa', 'bbb'],
                   'data_time': ['2021-01-01 00:00:00', '2024-01-02 00:00:00'],
                   'next_data_time': ['2021-01-03', '2021-01-04'],
                   'value': [1, 2]}
    time_range = pd.date_range(start=start_time, end=end_time, freq="5T")
    time_list_15min = pd.date_range(start=start_time, end=end_time, freq="15T")
    # indirect_list 产治一体 5
    # production_list 产污 2
    # pollution_control_list 治污 3
    # summation_list 企业总电 1
    # all_in_one_list 间接用电 4
    out_result = {
        "time_list_5min": time_range.strftime('%Y-%m-%d %H:%M:%S').tolist(),
        "summation_list": [],
        "warn_info": [],
        "production_list": [],
        "indirect_list": [],
        "pollution_control_list": [],
        "all_in_one_list": [],
        "time_list": time_list_15min.strftime('%Y-%m-%d %H:%M:%S').tolist()
    }

    try:
        # 进行数据脱敏：
        if data_anonyed == "1":
            industry_desensitization_coeffcientsql = """select * from industry_desensitization_coeffcient"""
            industry_desensitization_coeff_value = float(
                db.execute_select(industry_desensitization_coeffcientsql)[0]['industry_desensitization_coeff_value'])
            industry_getmax_sql = f"""select * from industry_desensitization where id={industry_type_id}"""
            industry_maxstandard_value = db.execute_select(industry_getmax_sql)[0]['industry_des']
            industry_maxstandard_value = float(industry_maxstandard_value)
            df_data['value'] = df_data['value'].apply(
                lambda x: round(
                    x * industry_desensitization_coeff_value / industry_maxstandard_value if x != np.NaN else x,
                    1))
            df_data['value'].fillna('-99', inplace=True)
        # end data anonyed

        guid_list = df_data.groupby('guid').groups.keys()
        guid_list = list(guid_list)
        # 并成一系列的guid 最后并给上对应的标签
        # 帮我筛选出剩余的guid呗
        # 将两个列表转换为集合  通过使用python的集合进行筛选
        all_work_guid_set = set(all_work_guid['guid'])
        guid_list_set = set(guid_list)
        # 使用减法找出在 all_work_guid_set 中存在，但不在 guid_list_set 中的元素
        remaining_guid_set = all_work_guid_set - guid_list_set
        # 将结果转回列表
        remaining_guid_list = list(remaining_guid_set)
        # 获取df_data['data_time']的第一个值
        first_data_time = df_data['data_time'].iloc[0]
        # 创建新的DataFrame
        new_df = pd.DataFrame({
            'guid': remaining_guid_list,
            'data_time': [first_data_time] * len(remaining_guid_list),
            'next_data_time': [first_data_time] * len(remaining_guid_list),
            'value': [0] * len(remaining_guid_list),
        })
        # 合并新旧DataFrame
        df_data = pd.concat([df_data, new_df], ignore_index=True)

        guid_list = list(df_data.groupby('guid').groups.keys())
        # 查询这些guid_list对应的设备名 以及设备类型
        get_dev_msg_list_sql = f"""select edi.id as dev_id,edi.rated_power as rated_power, '总正向有功电度' as data_type, edi.dev_code as dev_code,  ebi.guid as guid,edi.dev_name as dev_name,edi.dev_type as dev_type,edi.dev_type_name as dev_type_name  from elec_dev_info edi left join 
        elecdata_basic_info ebi on edi.id = ebi.devid 
        where ebi.data_type_id = '8' and ebi.guid in ({"'" + "','".join(guid_list) + "'"})"""
        get_dev_msg_list = db.execute_charts(get_dev_msg_list_sql)

        grouped = df_data.groupby('guid')
        a = 0
        for guid, group in grouped:
            pd.to_numeric(group['value'], errors='coerce')
            group.set_index('data_time', inplace=True)
            group = group.reindex(time_range)
            # 根据新的日期范围创建新列
            group['data_time_new'] = pd.Series(time_range)
            groups = group
            group['value'] = group['value'] * 10
            group['value'].fillna("-99", inplace=True)  # 使用指定的值来填充'value'列的缺失值
            group['valuepre'] = chazhi(group['value'])
            # 进行插值缺失处理
            group['value'].replace("-99", np.nan, inplace=True)
            group['value'] = group['value'] / 10
            group['value'].fillna("-99", inplace=True)  # 使用指定的值来填充'value'列的缺失值
            group['valuepre'] = group['valuepre']/10

            # 对原数据不能改动
            group.loc[(group['value'].notna()) & (group['value'] != 0) & (group['value'] != '-99') & (
                    group['value'] != group['valuepre']), 'valuepre'] = group['value']


            # 15分钟模型
            groups['data_time'] = pd.to_datetime(groups['data_time_new'])
            df = groups.reindex(time_range)
            # 确保 data_time 列是 datetime 类型并排序
            df.sort_values('data_time_new', inplace=True)
            # # 将 data_time 设为索引
            # 重采样为5分钟的间隔，并用 NaN 填充缺失值
            df_resampled = df.resample('5T').asfreq()
            # 再次重采样为15分钟的间隔，并求每个间隔的 value 总和
            df_resampled_15min = df_resampled.resample('15T').sum()


            # 对 'valuepre' 列的值保留一位小数
            df_resampled_15min['valuepre'] = df_resampled_15min['valuepre'].round(1)
            # 实时末尾mask处理
            # 如果数据全都是0，则不做处理
            if not (df_resampled_15min['valuepre'] == 0).all():
                # 创建一个同样大小的mask，初始值均为False
                mask = np.full(df_resampled_15min.shape[0], False)

                # 从末尾开始遍历，直到遇到非零值或者更改了5个值
                count_changes = 0
                for i in range(df_resampled_15min.shape[0] - 1, -1, -1):
                    if df_resampled_15min['valuepre'].iloc[i] == 0 and count_changes < 3:
                        mask[i] = True
                        count_changes += 1
                    else:
                        break

                print(mask, "目前的mask的状态")

                # 将mask为True的位置（即末尾连续为0且不超过五个的部分）的值设为'-99'
                df_resampled_15min.loc[mask, 'valuepre'] = -99

            if get_dev_msg_list['dev_type'][a] == 1:
                out_result["summation_list"].append({
                    "guid": guid,
                    "data_type_id": 8,
                    "data_type": "总正向有功电度",
                    "dev_id": get_dev_msg_list['dev_id'][a],
                    "deta_type": get_dev_msg_list['data_type'][a],
                    "dev_code": get_dev_msg_list['dev_code'][a],
                    "rated_power": get_dev_msg_list['rated_power'][a],
                    "dev_name": get_dev_msg_list['dev_name'][a],
                    "dev_type": get_dev_msg_list['dev_type'][a],
                    "dev_type_name": get_dev_msg_list['dev_type_name'][a],
                    "values_5min": group['value'].tolist(),
                    "values_5min_pre": group['valuepre'].tolist(),
                    "values": df_resampled_15min['valuepre'].tolist(),
                    "originvalues": df_resampled_15min['valuepre15m'].tolist(),
                })
            elif get_dev_msg_list['dev_type'][a] == 2:
                out_result["production_list"].append({
                    "guid": guid,
                    "data_type_id": 8,
                    "data_type": "总正向有功电度",
                    "dev_id": get_dev_msg_list['dev_id'][a],
                    "deta_type": get_dev_msg_list['data_type'][a],
                    "dev_code": get_dev_msg_list['dev_code'][a],
                    "rated_power": get_dev_msg_list['rated_power'][a],
                    "dev_name": get_dev_msg_list['dev_name'][a],
                    "dev_type": get_dev_msg_list['dev_type'][a],
                    "dev_type_name": get_dev_msg_list['dev_type_name'][a],
                    "values_5min": group['value'].tolist(),
                    "values_5min_pre": group['valuepre'].tolist(),
                    "values": df_resampled_15min['valuepre'].tolist(),
                })
            elif get_dev_msg_list['dev_type'][a] == 3:
                out_result["pollution_control_list"].append({
                    "guid": guid,
                    "data_type_id": 8,
                    "data_type": "总正向有功电度",
                    "dev_id": get_dev_msg_list['dev_id'][a],
                    "deta_type": get_dev_msg_list['data_type'][a],
                    "dev_code": get_dev_msg_list['dev_code'][a],
                    "rated_power": get_dev_msg_list['rated_power'][a],
                    "dev_name": get_dev_msg_list['dev_name'][a],
                    "dev_type": get_dev_msg_list['dev_type'][a],
                    "dev_type_name": get_dev_msg_list['dev_type_name'][a],
                    "values_5min": group['value'].tolist(),
                    "values_5min_pre": group['valuepre'].tolist(),
                    "values": df_resampled_15min['valuepre'].tolist(),
                })
            elif get_dev_msg_list['dev_type'][a] == 4:
                out_result["all_in_one_list"].append({
                    "guid": guid,
                    "data_type_id": 8,
                    "data_type": "总正向有功电度",
                    "dev_id": get_dev_msg_list['dev_id'][a],
                    "deta_type": get_dev_msg_list['data_type'][a],
                    "dev_code": get_dev_msg_list['dev_code'][a],
                    "rated_power": get_dev_msg_list['rated_power'][a],
                    "dev_name": get_dev_msg_list['dev_name'][a],
                    "dev_type": get_dev_msg_list['dev_type'][a],
                    "dev_type_name": get_dev_msg_list['dev_type_name'][a],
                    "values_5min": group['value'].tolist(),
                    "values_5min_pre": group['valuepre'].tolist(),
                    "values": df_resampled_15min['valuepre'].tolist(),
                })
            elif get_dev_msg_list['dev_type'][a] == 5:
                out_result["indirect_list"].append({
                    "guid": guid,
                    "data_type_id": 8,
                    "data_type": "总正向有功电度",
                    "dev_id": get_dev_msg_list['dev_id'][a],
                    "deta_type": get_dev_msg_list['data_type'][a],
                    "dev_code": get_dev_msg_list['dev_code'][a],
                    "rated_power": get_dev_msg_list['rated_power'][a],
                    "dev_name": get_dev_msg_list['dev_name'][a],
                    "dev_type": get_dev_msg_list['dev_type'][a],
                    "dev_type_name": get_dev_msg_list['dev_type_name'][a],
                    "values_5min": group['value'].tolist(),
                    "values_5min_pre": group['valuepre'].tolist(),
                    "values": df_resampled_15min['valuepre'].tolist(),
                })
            a += 1
    except:
        pass
    return jsonify({"result": out_result})


# 插值缺失封装 函数
def chazhi(data):
    resulkt = []
    data = [np.nan if value == "-99" else float(value) for value in data]
    if np.all(np.isnan(data)):
        print("All values are NaN")
    else:
        # 创建数组索引
        indices = np.arange(len(data))

        # 获取有效（非nan）的值和对应的索引
        valid_data = np.array([data[i] for i in range(len(data)) if not np.isnan(data[i])])
        valid_indices = np.array([indices[i] for i in range(len(data)) if not np.isnan(data[i])])

        # 使用线性插值
        interpolated_data = np.interp(indices, valid_indices, valid_data)
        resulkt = process_and_replace_data(interpolated_data.tolist())
        print(resulkt)
    return resulkt


def process_and_replace_data(data, threshold=0.1, sequence_length=20, non_zero_percentage=0.4):
    processed_data = process_data(data, threshold)

    # 获取非零数据
    non_zero_data = [item for item in processed_data if item != 0]

    i = 0
    while i < len(processed_data) - sequence_length:
        sub_sequence = processed_data[i:i + sequence_length]

        # 计算非零元素的数量和其在子序列中的比例
        non_zero_count = sum([1 for item in sub_sequence if item != 0])
        non_zero_ratio = non_zero_count / sequence_length

        # 如果非零元素的比例超过了指定的百分比
        if non_zero_ratio > non_zero_percentage:
            # 替换子序列中的零元素为非零元素的随机值
            processed_data[i:i + sequence_length] = [random.choice(non_zero_data) if item == 0 else item for item in
                                                     sub_sequence]

        i += sequence_length

    return processed_data


def process_data(data, threshold=0.1):
    processed_data = []
    model = LinearRegression()

    # 检测连续递增或递减的数值
    for i in range(len(data)):
        if i >= 2:
            # 取当前点以及前两个点进行线性回归
            X = np.array(range(3)).reshape(-1, 1)
            y = data[i - 2:i + 1]
            model.fit(X, y)
            preds = model.predict(X)

            # 如果当前点到线性模型的残差小于阈值，则判定为在同一直线上
            if abs(preds[-1] - data[i]) < threshold:
                processed_data.append(0)
                continue

        processed_data.append(data[i])

    return processed_data


@elec5min.route('/5minloacl', methods=['GET', 'POST'])
def execute_script():
    source = Source()
    source.username = "ysrd2"
    source.password = "ysrd2"
    starttime = request.args.get('starttime')
    endtime = request.args.get('endtime')
    try:
        token = source.login()
        config = source.get_config(token)
        nodes = source.parse_nodes(config)
        message = []
        for dt in pd.date_range(start=starttime, end=endtime, freq='5min').to_pydatetime():
            try:
                data = source.get_data(token, dt, nodes)
                # TODO: write data to db
                message.append({"datetime": str(dt), 'result': True, 'data': data})
            except Exception as e:
                message.append({"datetime": str(dt), 'result': False, 'error': traceback.format_exc()})

        return jsonify(message), 200
    except Exception as e:
        return jsonify({"error": str(e)}), 500
