import json
import os
import random
import shutil

import numpy as np
import pandas as pd

import torch
import torch.nn as nn
from sklearn.impute import KNNImputer
from sklearn.preprocessing import StandardScaler
from torch.optim.lr_scheduler import LambdaLR

# 检查csv的函数，放在读取csv之前
def check_csv(csv_path: str, is_test: bool) -> bool:
    """
    检查用户输入的csv是否合法
    :params: csv_path 待检查的csv路径
    :params: is_test 训练集/验证集 False 测试集 True
    :returns: 合法 True 不合法 False
    """
    if not os.path.exists(csv_path):
        raise FileNotFoundError
    data = pd.read_csv(csv_path, index_col=0, header=0)
    cols = list(data.columns)
    if is_test:
        return len(data) > 0 and len(cols) == 107 and "label" not in cols
    else:
        return len(data) > 0 and len(cols) == 108 and "label" in cols


def set_seed(seed: int) -> None:
    """
    设置随机种子
    :params: seed 随机种子
    """
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    random.seed(seed)
    np.random.seed(seed)

def fstr2float(num:str) -> float:
    """
    分数字符串转成小数
    :params: num 输入的 a/b 型分数字符串
    """
    a, b = num.split("/")
    a=float(a)
    b=float(b)
    return a / b


def make_dir(path: str) -> None:
    """
    新建文件夹，如果已存在就删除重建。
    :params: path 新建文件夹的路径
    """
    if os.path.exists(path):
        shutil.rmtree(path)
    os.makedirs(path)


def data_preprocess(x_train: np.ndarray, x_valid: np.ndarray, y_train: np.ndarray, y_valid: np.ndarray, seed=1024) -> tuple:
    """
    对数据进行缺失值填充和归一化处理, 返回处理好的4元组.
    :params: x_train 训练集的特征
    :params: x_valid 验证集的特征
    :params: y_train 训练集的标签
    :params: y_valid 验证集的标签
    :returns: tuple: x_train, x_valid, y_train, y_valid
    """
    set_seed(seed)
    imputer = KNNImputer(n_neighbors=20)
    scaler = StandardScaler()

    # 缺失值填充
    x_train = imputer.fit_transform(x_train)
    if len(x_valid) != 0:
        x_valid = imputer.transform(x_valid)

    # 数据归一化
    x_train = scaler.fit_transform(x_train)
    if len(x_valid) != 0:
        scaler = StandardScaler()
        x_valid = scaler.fit_transform(x_valid)

    return x_train, x_valid, y_train, y_valid


def probs2preds(probs):
    """
    将模型输出的概率向量转换成预测类别
    :params probs: 模型输出的概率矩阵
    :returns: 预测类别矩阵
    """
    # threshold = 0.1
    # probs[probs[:, 0] >= threshold, 0] = 1.
    # probs[probs[:, 0] < threshold, 0] = 0.
    return probs.argmax(dim=1)


def default_dump(obj):
    if isinstance(obj, (np.integer, np.floating, np.bool_)):
        return obj.item()
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    else:
        return obj


def ndarray2json(arr: np.ndarray):
    """
    numpy数组转换成json
    :params: arr Numpy数组
    :returns: json格式的文件
    """
    test_dict = dict(enumerate(arr))
    json_str = json.dumps(test_dict, ensure_ascii=False, default=default_dump)
    return json_str


def get_all_model_names(path: str) -> list:
    """
    获取所有模型名称
    :params: path 模型名称存储的路径
    :returns: 所有模型名称的列表
    """
    ret = []
    if not os.path.exists(path):
        raise FileNotFoundError
    for name in os.listdir(path):
        if name.endswith(".pth"):
            ret.append(name.replace(".pth", ""))
    return ret


def delete_model(path: str, name: str) -> None:
    """
    删除模型
    :params: path 模型名称存储的路径
    :params: name 模型名称
    """
    path = os.path.join(path, name)
    if not os.path.exists(path):
        raise FileNotFoundError
    os.remove(path)


def rm_all(path: str) -> None:
    """
    删除文件夹下的所有文件
    :params: path 文件夹的路径
    """
    if not os.path.exists(path):
        raise FileNotFoundError
    shutil.rmtree(path)
    os.makedirs(path)


class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=2, reduction='mean'):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, input, target):
        ce_loss = nn.CrossEntropyLoss(reduction='none')(input, target)
        pt = torch.exp(-ce_loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss
        if self.reduction == 'mean':
            return torch.mean(focal_loss)
        elif self.reduction == 'sum':
            return torch.sum(focal_loss)
        else:
            return focal_loss


class CosineAnnealingWarmUpRestarts(LambdaLR):
    def __init__(self, optimizer, T_0, T_mult=1, eta_max=0.1, T_warmup=100, last_epoch=-1):
        self.T_0 = T_0
        self.T_mult = T_mult
        self.eta_max = eta_max
        self.T_warmup = T_warmup
        self.cycle_count = 0
        self.cycle_iterations = 0
        self.total_iterations = 0
        super().__init__(optimizer, self.lr_lambda, last_epoch)

    def get_lr(self,):
        return [group['lr'] for group in self.optimizer.param_groups]

    def lr_lambda(self, step):
        if self.total_iterations == 0 or step == 0:
            return 1.0
        elif step <= self.T_warmup:
            return step / self.T_warmup
        else:
            step = step - self.T_warmup
            cycle_length = self.T_0 * (self.T_mult ** self.cycle_count)
            if step >= cycle_length:
                self.cycle_count += 1
                self.cycle_iterations = 0
                self.T_0 *= self.T_mult
                return self.eta_max
            else:
                self.cycle_iterations += 1
                return 0.5 * (np.cos(np.pi * self.cycle_iterations / cycle_length) + 1) * self.eta_max

        self.total_iterations += 1
        return lr_lambda
