#
import argparse
from typing import Dict, Tuple, List
import random
import torch
from torch.utils.data import Dataset, DataLoader
#
from core.lrp_config import LrpConfig as LC
from apps.dhlp.data_vis import DataVis

class DhlpDs(Dataset):
    def __init__(self):
        super(DhlpDs, self).__init__()

    def __len__(self):
        return 0
    
    def __getitem__(self, idx):
        return None
    
    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'数据集生成程序')
        rec_db, dev_recs, raw_datas = DhlpDs.load_raw_datas(params=params)
        # 1. 找到记录数最多的设备
        did = DhlpDs.get_max_recs_dev(dev_recs=dev_recs)
        # 2. 找到该设备所有记录
        tls_recs, tls_labels = DhlpDs.get_recs(did=did, dev_recs=dev_recs, raw_datas=raw_datas)
        norm_recs, norm_labels = DhlpDs.generate_norm_recs(tls_recs)
        recs = tls_recs + norm_recs
        labels = tls_labels + norm_labels
        # 随机取出10%作为测试数据，90%作为训练数据
        train_datas, train_labels, test_datas, test_labels = DhlpDs.split_train_test(recs=recs, labels=labels)
        # 生成数据帧
        frames, frame_labels = DhlpDs.generate_frames(train_datas=train_datas, train_labels=train_labels)
        # 网格搜索阈值和有效点数临界值最优组合
        tlf_mu, tlf_std = DhlpDs.get_mu_std(frames=frames, frame_labels=frame_labels)
        thresholds = torch.linspace(tlf_mu-5.0*tlf_std, tlf_mu+5.0*tlf_std, 0.1*tlf_std) # 大于此值时视为有效点
        limits = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10]) # 超过此有效点数时进行雷电预警
        scores = torch.zeros((thresholds.shape[0], limits.shape[0]), dtype=torch.float32) # 每个组合的分数
        cnt = len(train_datas)
        for i1 in range(thresholds.shape[0]):
            for i2 in range(limits.shape[0]):
                for idx in range(cnt):
                    frames, frame_labels = DhlpDs.get_rec_frames(train_datas[idx], train_labels[idx])
                    rec_score = 0
                    cnt2 = len(frames)
                    for idx2 in range(cnt2):
                        rst = DhlpDs.process_frame(frames[idx2], thresholds[i1], limits[i2])
                        if frame_labels[idx] == 1 and rst == 1:
                            if idx+LC.C_frame_size>=80*60 and idx+LC.C_frame_size<90*60:
                                rec_score += 1
                            else:
                                rec_score -= 1
                        if frame_labels[idx] == 0 and rst == 1:
                            rec_score -= 1
                    if rec_score > 0:
                        scores[i1][i2] += 1
                    else:
                        scores[i1][i2] -= 1
        # 生成数据集

    @staticmethod
    def process_frame(frame:torch.Tensor, threshold:float, limit:int) -> int:
        '''
        以5分钟为一帧，步长为1分钟，对每个数点判断：
        如果大于threshold，则有效点数加1
        如果总有效点数大于limit，则返回1，否则返回0
        '''
        seg_size = 5*60
        stride = 60
        cnt = frame.shape[0]
        rst = 0
        pts = 0
        for idx in range(0, cnt-seg_size+1, stride):
            seg = frame[idx : idx+seg_size]
            pts = 0
            for val in seg:
                if val > threshold:
                    pts += 1
            if pts > limit:
                rst = 1
        return rst
        

    @staticmethod
    def get_mu_std(frames:List, frame_labels:List) -> Tuple[float, float]:
        '''
        将所有雷电前的数据帧内容合成一个大的数组，从而求出其均值和标准差
        '''
        tl_frames = None
        cnt = len(frames)
        print(f'### cnt: {cnt};')
        for idx in range(cnt):
            if frame_labels[idx] == 1:
                if tl_frames is None:
                    tl_frames = frames[idx]
                else:
                    tl_frames = torch.hstack((tl_frames, frames[idx]))
        print(f'### tl_frames: {tl_frames.shape};')
        max_val = tl_frames.max()
        min_val = tl_frames.min()
        print(f'############ max: {max_val}; min: {min_val};')
        return tl_frames.mean(), tl_frames.std()
    
    @staticmethod
    def get_rec_frames(rec:torch.Tensor, rec_type:int) -> Tuple[List, List]:
        '''
        获取一条记录下的数据帧及数据帧类型
        '''
        frames, frame_labels = [], []
        amount = rec.shape[0]
        for head in range(0, amount-LC.C_frame_size+1, LC.C_stride):
            frame, frame_type = DhlpDs.extract_frame(rec, rec_type, head, LC.C_frame_size)
            frames.append(frame)
            frame_labels.append(frame_type)
        return frames, frame_labels


    @staticmethod
    def generate_frames(train_datas:List, train_labels:List) -> Tuple[List, List]:
        '''
        [List, List]: 原始数据torch.Tensor，类别：0---正常；1---雷电前（可用于预测雷电，不可用于预测雷电的视为正常）；2---雷电中；
        '''
        frames, frame_labels = [], []
        cnt = len(train_datas)
        for idx in range(cnt):
            amount = train_datas[idx].shape[0]
            for head in range(0, amount-LC.C_frame_size+1, LC.C_stride):
                frame, frame_type = DhlpDs.extract_frame(train_datas[idx], train_labels[idx], head, LC.C_frame_size)
                # frame = train_datas[idx][head : head + LC.C_frame_size]
                # # 判断数据帧类型：
                # if train_labels[idx] == 1:
                #     if head+LC.C_frame_size < 80*60: # 在雷电发生10分钟之前为正常帧
                #         frame_type = 0
                #     elif head+LC.C_frame_size >= 80*60 and head+LC.C_frame_size<90*60: # 在雷电发生前10分钟之内
                #         frame_type = 1
                #     elif head+LC.C_frame_size >=90*60 and head+LC.C_frame_size<100*60: # 在雷电发生后10分钟之内
                #         frame_type = 2
                #     else:
                #         frame_type = 0
                # else:
                #     frame_type = 0
                frames.append(frame)
                frame_labels.append(frame_type)
            print(f'处理第{idx+1}条记录......')
        return frames, frame_labels
    
    @staticmethod
    def extract_frame(rec:torch.Tensor, rec_type:int, idx:int, frame_size:int) -> Tuple[torch.Tensor, int]:
        frame = rec[idx : idx + frame_size]
        if rec_type ==1:
            if idx+LC.C_frame_size < 80*60:
                frame_type = 0
            elif idx+LC.C_frame_size >= 80*60 and idx+LC.C_frame_size<90*60:
                frame_type = 1
            elif idx+LC.C_frame_size>=90*60 and idx+LC.C_frame_size<100*60:
                frame_type = 2
            else:
                frame_type = 0
        else:
            frame_type = 0
        return frame, frame_type

    @staticmethod
    def split_train_test(recs:List, labels:List) -> Tuple[List, List, List, List]:
        '''
        将数据集划分为训练集和测试集
        '''
        seed = 102
        rvo = random.Random(seed)
        train_datas, test_datas = [], []
        train_labels, test_labels = [], []
        cnt = len(recs)
        for idx in range(cnt):
            rv = rvo.random()
            if rv > 0.9:
                test_datas.append(recs[idx])
                test_labels.append(labels[idx])
            else:
                train_datas.append(recs[idx])
                train_labels.append(labels[idx])
        return train_datas, train_labels, test_datas, test_labels

    @staticmethod
    def generate_norm_recs(recs:List) -> Tuple[List, List]:
        norm_recs, norm_labels = [], []
        cnt = len(recs)
        for rec in recs:
            left = rec[0:60*60]
            print(f'### {type(rec)}; left: {type(left)};')
            rv = random.randint(0, cnt-1)
            right = recs[rv][-60*60:]
            new_rec = torch.hstack((left, right))
            norm_recs.append(new_rec)
            norm_labels.append(0)
        return norm_recs, norm_labels


    @staticmethod
    def get_recs(did:str, dev_recs:Dict, raw_datas:Dict) -> Tuple[List, List]:
        '''
        所有原始记录的列表：
        recs: [ndarray, ndarray, ....]
        labels: [0, 1, ......] 0-代表正常; 1-代表雷电;
        '''
        recs, labels = [], []
        tls_ids = dev_recs[did]['tls']
        for rid in tls_ids:
            data = raw_datas[rid]
            recs.append(data)
            labels.append(1)
        norm_ids = dev_recs[did]['norms']
        for rid in norm_ids:
            data = raw_datas[rid]
            recs.append(data)
            labels.append(0)
        return recs, labels

    @staticmethod
    def load_raw_datas(params:Dict = {}) -> Tuple[Dict, Dict, Dict]:
        # 1.1. 读入：{rec_id, {数据记录描述信息：开始时间、地点......}}
        csv_fn = 'work/datas/v0/time_periods.csv'
        rec_db = DataVis.get_rec_db(csv_fn=csv_fn)
        # 1.2. 读入: {'devid': {'tls': [雷电原始数据ID列表], 'norms': [正常数据ID列表]}}
        dev_recs = DataVis.get_devs_recs(csv_fn=csv_fn)
        # 2.1. 读入原始数据: {rec_id, ndarray}
        electric_fn = 'work/datas/v0/electric.txt'
        raw_datas = DataVis.get_raw_datas(electric_fn=electric_fn)
        return rec_db, dev_recs, raw_datas

    @staticmethod
    def get_max_recs_dev(dev_recs:Dict) -> str:
        # 按照数据记录条数从多到少排充 did=F56AC57A1181CD
        dids = sorted(dev_recs.keys(), key=lambda dev_id: len(dev_recs[dev_id]["tls"]) + len(dev_recs[dev_id]["norms"]), reverse=True)
        return dids[0]
