import os
import torch
import torch.nn as nn
from torch import Tensor
from pandas import DataFrame
from accelerate import Accelerator
from torch.utils.data import Dataset


# TEC 数据集
class TECDataset(Dataset):
    
    def __init__(self, 
                 data: DataFrame,       # 原始数据 
                 sample_stride: int,    # 样本划分时的步长
                 seq_len: int,          # 输入时序长度
                 pred_len: int):        # 预测时序长度    
        super(TECDataset, self).__init__()
        
        # 处理后的数据
        self.tec_data = []
        
        # 分站点读取特征、日期和位置数据并进行数据处理
        data = data.groupby('station')
        
        for id, station_data in data:
            # 获取特征数据
            feature_cols = ['vTEC', 'DST_nT', 'AP', 'F10.7', 'SSN', 'X-ray']
            station_feature = station_data[feature_cols].values
            # 获取信息数据
            info_cols = ['Avg_Lat', 'Avg_Lon']
            info_data = station_data[info_cols].values
            date = station_data['date'].reset_index(drop=True)
            # 计算样本总个数
            samples_num = (len(station_data) - (seq_len + pred_len)) // sample_stride + 1
            # 划分成一个个的可用样本
            for index in range(samples_num):
                # 训练数据特征起始和终止位置
                x_begin = index * sample_stride
                x_end = x_begin + seq_len
                # 训练数据标签起始和终止位置
                y_begin = x_end
                y_end = y_begin + pred_len
                # 获取训练输入数据
                x = station_feature[x_begin:x_end]
                # 获取训练标签数据
                y = station_feature[y_begin:y_end][..., 0]
                # 获取样本信息数据
                mean_ipp = info_data[x_begin:x_end].mean(axis=0)
                info = {'ID': id, 'date': date[x_begin], 'position': mean_ipp}
                # 将此样本加入到总样本集
                self.tec_data.append((info, x, y))
    
    def __getitem__(self, index):
        return self.tec_data[index]
    
    def __len__(self):
        return len(self.tec_data)


# 标准化
# 不同特征各自独立标准化
class Normalize(nn.Module):
    def __init__(self,
                 num_features: int,             # 特征数量
                 eps: float = 1e-5,             # a value added for numerical stability
                 affine: bool = True,           # If True, RevIN has learnable affine parameters
                 subtract_last: bool = False):  # True 以最后时刻为基准归一化，Fasle 以均值为基准归一化
        super(Normalize, self).__init__()
        # 记录变量
        self.num_features = num_features
        self.eps = eps
        self.affine = affine
        self.subtract_last = subtract_last
        # 初始化需要训练的参数
        if self.affine:
            self._init_params()

    # 输入数据维度: B T N 或 B T
    def forward(self, x: Tensor, mode: str):
        if mode == 'norm':
            self._get_statistics(x)
            x = self._normalize(x)
        elif mode == 'denorm':
            x = self._denormalize(x)
        else:
            raise NotImplementedError
        return x

    # 初始化需要训练的参数
    def _init_params(self):
        # initialize RevIN params: (C,)
        self.affine_weight = nn.Parameter(torch.ones(self.num_features))
        self.affine_bias = nn.Parameter(torch.zeros(self.num_features))

    # 获取统计结果
    def _get_statistics(self, x: Tensor):
        # 除最后一维以外，其他各维都要求均值和标准差
        dim2reduce = 1
        if self.subtract_last:
            self.last = x[:, -1].unsqueeze(1)
        else:
            self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()
        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()

    # 标准化
    def _normalize(self, x: Tensor):
        if self.subtract_last:
            x = x - self.last
        else:
            x = x - self.mean
        x = x / self.stdev
        if self.affine:
            x = x * self.affine_weight
            x = x + self.affine_bias
        return x

    # 反标准化
    def _denormalize(self, x: Tensor):
        if self.affine:
            x = x - self.affine_bias
            x = x / (self.affine_weight + self.eps * self.eps)
        x = x * self.stdev
        if self.subtract_last:
            x = x + self.last
        else:
            x = x + self.mean
        return x


# 早停法
class EarlyStopping:
    def __init__(self, patience=7, verbose=False, delta=0):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.val_loss_min = torch.inf
        self.delta = delta

    def __call__(self, val_loss, model, path, file_name, save_type: str = "classic", accelerator: Accelerator = None):
        score = -val_loss
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path, file_name, save_type, accelerator)
        elif score < self.best_score + self.delta:
            self.counter += 1
            if self.counter >= self.patience:
                print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
                return True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path, file_name, save_type, accelerator)
            self.counter = 0
        return False

    def save_checkpoint(self, val_loss, model, path, file_name, save_type: str = "classic", accelerator: Accelerator = None):
        if self.verbose:
            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')
        if save_type == "classic":
            torch.save(model, os.path.join(path, file_name) + '.pth')
        elif save_type == "shared":
            assert accelerator is not None
            accelerator.wait_for_everyone()
            accelerator.save_model(model, os.path.join(path, file_name), max_shard_size="1GB", safe_serialization=True)
        self.val_loss_min = val_loss