# 数据可视化类
from typing import Dict, List, Tuple
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import torch
import torch.nn.functional as F
from core.lrp_config import LrpConfig as LC
#
import apps.dhlp.umap_engine as UE

class DataVis(object):
    def __init__(self):
        self.name = 'apps.dhlp.data_vis.DataVis'

    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'雷电预测程序V1 v0.0.1')

    @staticmethod
    def demo():
        # 读入：{rec_id, {数据记录描述信息：开始时间、地点......}}
        csv_fn = 'work/datas/v0/time_periods.csv'
        rec_db = DataVis.get_rec_db(csv_fn=csv_fn)
        # 读入: {'devid': {'tls': [雷电原始数据ID列表], 'norms': [正常数据ID列表]}}
        dev_recs = DataVis.get_devs_recs(csv_fn=csv_fn)
        # 按照数据记录条数从多到少排充
        dids = sorted(dev_recs.keys(), key=lambda dev_id: len(dev_recs[dev_id]["tls"]) + len(dev_recs[dev_id]["norms"]), reverse=True)
        # 读入原始数据: {rec_id, ndarray}
        electric_fn = 'work/datas/v0/electric.txt'
        raw_datas = DataVis.get_raw_datas(electric_fn=electric_fn)
        print(f'dids: {len(dids)}; {type(dids[0])}; \n{dids[0]};')
        # 读入指定设备的雷电数据记录编号列表
        tl_ids = dev_recs[dids[0]]['tls']
        # 读入指定设备的正常数据记录编号列表
        norm_ids = dev_recs[dids[0]]['norms']
        for tl_id in tl_ids:
            print(f'### {tl_id}; {raw_datas[tl_id].shape}; ?????????')
        # print(f'设备记录数统计')
        # for k, v in dev_recs.items():
        #     if len(v['tls'])+len(v['norms']) > 100:
        #         print(f'{k}: 雷电数量: {len(v['tls'])} vs 正常数量: {len(v['norms'])};')
        # max_did, tl_recs, norm_recs = TlpAppV1.get_max_did(dev_recs=dev_recs)
        # print(f'最多记录设备：{max_did}: {len(tl_recs) + len(norm_recs)};')
        # recs = tl_recs + norm_recs
        # train_ids, test_ids = TlpAppV1.split_train_test(recs=recs, ratio=0.9)
        # print(f'训练数据: {len(train_ids)}; 测试数据: {len(test_ids)};')
        # # 获取变化数据记录
        # electric_fn = 'work/tlp/datas/v1/electric.txt'
        # TlpAppV1.get_raw_datas(electric_fn=electric_fn)
        # # 训练数据处理
        # for rec_id in train_ids:
        #     print(f'{rec_db[rec_id]['start_time']}')

    @staticmethod
    def umap_analysis() -> None:
        electric_fn = 'work/datas/v0/electric.txt'
        raw_datas = DataVis.get_raw_datas(electric_fn=electric_fn)
        Xs, ys = None, None
        for rec_id in ['4060', '4061', '4062', '4063', '4064']:
            # rec_id = '4060' # ### 4060, 4061,4062
            X, y = DataVis.process_rec_raw_data(raw_datas=raw_datas, rec_id=rec_id)
            if Xs is None:
                Xs = X
            else:
                Xs = torch.vstack((Xs, X))
            if ys is None:
                ys = y
            else:
                ys = torch.vstack((ys, y))
        print(f'### X: {Xs.shape}; y: {ys.reshape((-1,)).shape}; ?????????????????????????????????????????????????????????????????')
        # embedding = UE.UAMP(Xs.numpy(),dim=2,min_dist=0.01,spread=1,n_neighbors=30)
        tsne = TSNE(n_components=2, learning_rate='auto',
                  init='random', perplexity=3)# .fit_transform(Xs)
        # tsne = TSNE(n_components=2, init='pca', random_state=501)
        X_tsne = tsne.fit_transform(Xs)
        x_min, x_max = X_tsne.min(0), X_tsne.max(0)
        X_norm = (X_tsne - x_min) / (x_max - x_min)  # 归一化
        plt.figure(figsize=(8, 8))
        for i in range(X_norm.shape[0]):
            plt.text(X_norm[i, 0], X_norm[i, 1], str(int(ys[i].detach().cpu().item())), color=plt.cm.Set1(int(ys[i].detach().cpu().item())), 
                    fontdict={'weight': 'bold', 'size': 9})
        plt.xticks([])
        plt.yticks([])
        plt.show()

        # from sklearn import manifold, datasets
        # digits = datasets.load_digits(n_class=6)
        # X, y = digits.data, digits.target
        # n_samples, n_features = X.shape
        # # X = np.loadtxt("work\datasets\mnist2500\mnist2500_X.txt")
        # # y = np.loadtxt("work/datasets/mnist2500/mnist2500_labels.txt")
        # # tsne = TSNE(n_components=2, init='pca', random_state=501)
        # tsne = TSNE(n_components=2, learning_rate='auto',
        #            init='random', perplexity=3)
        # X_tsne = tsne.fit_transform(X)
        # x_min, x_max = X_tsne.min(0), X_tsne.max(0)
        # X_norm = (X_tsne - x_min) / (x_max - x_min)  # 归一化
        # plt.figure(figsize=(8, 8))
        # for i in range(X_norm.shape[0]):
        #     plt.text(X_norm[i, 0], X_norm[i, 1], str(y[i]), color=plt.cm.Set1(int(y[i])), 
        #             fontdict={'weight': 'bold', 'size': 9})
        # plt.xticks([])
        # plt.yticks([])
        # plt.show()

        # print(embedding.shape)
        # UE.draw_pic(embedding,ys.reshape((-1,)).numpy(),name = "./work/lrp001.jpg")
        # plt.show()
        
        # mnist_datas = np.loadtxt("work\datasets\mnist2500\mnist2500_X.txt")
        # mnist_labs = np.loadtxt("work/datasets/mnist2500/mnist2500_labels.txt")
        # print(f'X: {mnist_datas.shape}; y: {mnist_labs.shape}; {mnist_labs[0]};') # X: (2500, 784); y: (2500,)
        # # mnist_datas = mnist_datas[:500,:]
        # embedding = UE.UAMP(mnist_datas,dim=2,min_dist=0.01,spread=1,n_neighbors=30)
        # print(embedding.shape)
        # UE.draw_pic(embedding,mnist_labs,name = "final-d0.01n-30_new.jpg")
        # plt.show()
    
    @staticmethod
    def process_rec_raw_data(raw_datas:Dict, rec_id:str) -> Tuple[torch.Tensor, torch.Tensor]:
        data = raw_datas[rec_id]
        print(f'data: {type(data)}; {data.shape}; ?????????????????????')
        margin_idx = 90*60
        tl_start_idx = margin_idx # 90 * 60
        X, y = None, None
        # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
        cnt = data.shape[0]
        frame_size = 5*60 # 30 * 60
        stride = frame_size # 30*60
        ahead_time = 5 * 60
        for idx in range(0, cnt-frame_size+1, stride):
            if idx+frame_size >= tl_start_idx - ahead_time and idx <= tl_start_idx:
                Xi = data[idx : idx + frame_size]
                yi = torch.tensor([1.0]) # 雷电前
            # elif idx+frame_size > data.shape[0] - margin_idx:
            #     yi = torch.tensor([2.0]) # 雷电中
            elif idx+frame_size < tl_start_idx-ahead_time:
                Xi = data[idx : idx + frame_size]
                yi = torch.tensor([0.0]) # 正常数据
            else:
                continue
            if X is None:
                X = Xi
                print(f'### X: {X.shape}; ???')
            else:
                # Xi = data[idx*stride : idx*stride + frame_size]
                if Xi.shape[0] < frame_size:
                    total_padding = frame_size - Xi.shape[0]
                    Xi = F.pad(Xi, (0, total_padding))
                print(f'### 000 X: {X.shape}; {Xi.shape}; ???')
                X = torch.vstack((X, Xi))
            if y is None:
                y = yi
            else:
                y = torch.vstack((y, yi))
        return X, y

    @staticmethod
    def get_raw_datas(electric_fn:str) -> Dict:
        '''
        从electric.txt文件中获取所有记录，格式：
        {
            rec_id: [1.0, 0.0, 2.0],
            ......
        }
        '''
        raw_datas = {}
        num = 0

        with open(electric_fn, 'r', encoding='utf-8') as rfd:
            for row in rfd:
                arrs0 = row.strip().split(' ')
                rec_id = arrs0[0]
                arrs1 = arrs0[1].split(',')
                rec = torch.tensor([float(xi) for xi in arrs1])
                raw_datas[rec_id] = rec
                num += 1
                if num % 1000 == 0:
                    print(f'处理完成{num}条数据......')
        return raw_datas

    @staticmethod
    def get_rec_db(csv_fn:str) -> Dict:
        '''
        获取记录数据基本信息
        '''
        rec_db = {}
        first_row = True
        with open(csv_fn, 'r', encoding='utf-8') as rfd:
            for row in rfd:
                if first_row:
                    first_row = False
                    continue
                arrs1 = row.strip().split(',')
                if len(arrs1) <= 1:
                    continue
                rec_id = arrs1[LC.IDV1_id]
                rec_db[rec_id] = {
                    'id': arrs1[LC.IDV1_id],
                    'start_time': arrs1[LC.IDV1_start_time],
                    'did': arrs1[LC.IDV1_did],
                    'dname': arrs1[LC.IDV1_dname],
                    'device_lng': arrs1[LC.IDV1_device_lng],
                    'device_lat': arrs1[LC.IDV1_device_lat],
                    'max_elevel': arrs1[LC.IDV1_max_elevel],
                    'warning_start_time': arrs1[LC.IDV1_warning_start_time],
                    'warning_end_time': arrs1[LC.IDV1_warning_end_time],
                    'first_lightning_time': arrs1[LC.IDV1_warning_end_time],
                    'first_lightning_lng': arrs1[LC.IDV1_first_lightning_lng],
                    'first_lightning_lat': arrs1[LC.IDV1_first_lightning_lat],
                    'first_lightning_distance': arrs1[LC.IDV1_first_lightning_distance],
                    'first_lightning_strength': arrs1[LC.IDV1_first_lightning_distance],
                    'first_lw_level': arrs1[LC.IDV1_first_lw_level],
                    'last_lightning_time': arrs1[LC.IDV1_last_lightning_time],
                    'last_lightning_lng': arrs1[LC.IDV1_last_lightning_lng],
                    'last_lightning_lat': arrs1[LC.IDV1_last_lightning_lat],
                    'last_lightning_distance': arrs1[LC.IDV1_last_lightning_distance],
                    'last_lightning_strength': arrs1[LC.IDV1_last_lightning_strength],
                    'last_lw_level': arrs1[LC.IDV1_last_lw_level]
                }
        return rec_db

    @staticmethod
    def split_train_test(recs:List, ratio:float = 0.9) -> Tuple[List, List]:
        '''
        将原始记录按照ratio比例，划分为训练数据和测试数据
        '''
        train_datas, test_datas = [], []
        random.shuffle(recs)
        cnt = len(recs)
        split_idx = int(cnt * ratio)
        train_datas = recs[:split_idx]
        test_datas = recs[split_idx:]
        return train_datas, test_datas

    @staticmethod
    def get_max_did(dev_recs:Dict) -> Tuple[str, List, List]:
        '''
        从get_devs_recs返回的值中，找出记录条数最多的设备编号
        '''
        max_num = 0
        max_did = None
        for k, v in dev_recs.items():
            num = len(v['tls']) + len(v['norms'])
            if num > max_num:
                max_num = num
                max_did = k
        return max_did, dev_recs[max_did]["tls"], dev_recs[max_did]["norms"]

    @staticmethod
    def get_devs_recs(csv_fn:str) -> Dict:
        '''
        求出所有设备雷电记录条数和正常记录条数
        返回值：Dict
        {
            'devid': {
                'tls': [1, 2, 3, ...],
                'norms': [11, 22, 33, ...]
            }
        }
        '''
        # csv_fn = 'work/tlp/datas/v1/time_periods.csv'
        devs = {}
        max_num = 0
        with open(csv_fn, 'r', encoding='utf-8') as rfd:
            for row in rfd:
                arrs0 = row.strip().split(',')
                if len(arrs0) <= 1:
                    continue
                rec_id = arrs0[LC.IDV1_id]
                did = arrs0[LC.IDV1_did]
                if did not in devs:
                    if arrs0[LC.IDV1_start_time] == '':
                        devs[did] = {
                            'tls': [],
                            'norms': [rec_id]
                        }
                    else:
                        devs[did] = {
                            'tls': [rec_id],
                            'norms': []
                        }
                else:
                    if arrs0[LC.IDV1_start_time] == '':
                        devs[did]['norms'].append(rec_id)
                    else:
                        devs[did]['tls'].append(rec_id)
        return devs