import torch
import model
from model import Transformer1d, RNN, TransformerClassifier
from dataset import FeatureDataset
from tqdm import trange
from torch import nn
import wandb
from torch.utils.data import random_split
from sklearn.metrics import roc_auc_score, accuracy_score, multilabel_confusion_matrix
import numpy as np
import random
import os
from warmup_scheduler import GradualWarmupScheduler
from sklearn.preprocessing import MinMaxScaler
from resnet1d import ResNet1D
from CnnTransformer import CNN_Transformr
import argparse


def seed_everything(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True


def CrossEntropyLoss_label_smooth(outputs, targets,
                                  num_classes=2, epsilon=0.1):
    N = targets.size(0)
    smoothed_labels = torch.full(size=(N, num_classes),
                                 fill_value=epsilon / (num_classes - 1))
    targets = targets.data.cpu()
    smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(targets, dim=1),
                             value=1 - epsilon)
    # outputs = outputs.data.cpu()
    log_prob = nn.functional.log_softmax(outputs, dim=1).cpu()
    loss = - torch.sum(log_prob * smoothed_labels) / N
    return loss


if __name__ == '__main__':
    seed_everything(2023)
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_epochs', type=int, default=500)
    parser.add_argument('--num_classes', type=int, default=2)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--wandb', action="store_true", default=False)

    config = parser.parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    if config.wandb:
        wandb.init(project="transformer", entity="dogewandb")

    # 模型初始化
    model = CNN_Transformr(d_model=512, nhead=8, d_ff=2048, num_layers=8, dropout_rate=0.1, classes=2).to(device)
    # model = TransformerClassifier().to(device)
    # model = Transformer1d(
    #     n_classes=2,
    #     n_length=2589,  # 句子长度
    #     d_model=1,  # 词向量长度
    #     nhead=1,
    #     dim_feedforward=256,  # transformer内部，特征维度
    #     dropout=0.5,
    #     activation='relu',
    #     verbose=False).to(device)

    # model = ResNet1D(
    #     in_channels=2589,
    #     base_filters=128,
    #     kernel_size=16,
    #     stride=2,
    #     n_block=48,
    #     groups=32,
    #     n_classes=2,
    #     downsample_gap=6,
    #     increasefilter_gap=12,
    #     verbose=False).to(device)

    # model = RNN().to(device)
    data = np.loadtxt(open('features.csv'), delimiter=",")
    scaler = MinMaxScaler()
    # 对数据进行标准化处理
    data[:, 1:] = scaler.fit_transform(data[:, 1:])
    print(data, data.shape, '数据集构建完成')
    dataset_all = FeatureDataset(data)
    train_dataset, test_dataset = random_split(
        dataset=dataset_all,
        lengths=[int(len(dataset_all) * 0.8), len(dataset_all) - int(len(dataset_all) * 0.8)],
        generator=torch.Generator().manual_seed(2023)
    )

    from sklearn.ensemble import RandomForestClassifier

    x_train = data[:230, 1:]
    y_train = np.array(data[:230, 0], dtype=np.int32)
    x_test = data[230:, 1:]
    y_test = np.array(data[230:, 0], dtype=np.int32)
    reg = RandomForestClassifier().fit(x_train, y_train)
    yhat = reg.predict(x_test)
    acc = accuracy_score(yhat, y_test)
    print("随机森林准确率", acc, yhat)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True,
                                               num_workers=12, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False,
                                              num_workers=12, pin_memory=True)

    # optimizer
    # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    criterion = nn.CrossEntropyLoss().to(device)

    optimizer = torch.optim.SGD(model.parameters(), lr=0.001 / 100, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.num_epochs - int(config.num_epochs * 0.2),
                                                           eta_min=0, last_epoch=-1)
    scheduler = GradualWarmupScheduler(optimizer, multiplier=100, total_epoch=int(config.num_epochs * 0.2),
                                       after_scheduler=scheduler)
    # 训练
    for epoch in trange(config.num_epochs):
        model.train()
        correct = 0
        total = 0
        correct_val = 0
        total_val = 0
        correct_test = 0
        total_test = 0
        for i, data, in (enumerate(train_loader, 0)):
            inputs, label = data
            # get the input
            inputs = inputs.to(device)
            labels = label.to(device)
            # zeros the paramster gradients

            # print(inputs.shape, labels.shape)
            optimizer.zero_grad()
            # forward + backward + optimizer
            outputs = model(inputs)

            loss = CrossEntropyLoss_label_smooth(outputs, labels, num_classes=config.num_classes, epsilon=0.1)
            # loss = criterion(outputs, labels).to(device)
            loss.backward()
            optimizer.step()

            # print statistics
            prediction = torch.argmax(outputs, 1)
            correct += (prediction == labels).sum().float()
            total += len(labels)
        scheduler.step()
        train_acc = (correct / total).cpu().detach().data.numpy()

        # train log
        print(f"LearningRate: {optimizer.param_groups[0]['lr']}, tranLoss: {loss}, trainAccuracy: {train_acc}")
        if config.wandb:
            wandb.log({
                f"LearningRate": optimizer.param_groups[0]['lr'],
                f"trainLoss": loss,
                f"trainAccuracy": train_acc}, step=epoch)

        # test
        if (epoch + 1) % 1 == 0:
            model.eval()
            with torch.no_grad():
                prob_all = []
                label_all = []
                test_list = []
                prob_all_wandb = []
                for index, data in (enumerate(test_loader)):
                    # get pred

                    test_inputs, test_label = data
                    test_inputs = test_inputs.to(device)
                    test_labels = test_label.to(device)
                    test_outputs = model(test_inputs)

                    test_loss = criterion(test_outputs, test_labels)
                    test_outputs = nn.functional.softmax(test_outputs, dim=1)

                    prob_all.extend(test_outputs[:, 1].cpu().numpy())
                    prob_all_wandb.extend(test_outputs.cpu().numpy())
                    label_all.extend(test_label.cpu().numpy())

                test_acc = accuracy_score(label_all, np.around(prob_all))
                test_auc = roc_auc_score(label_all, prob_all)
                # print(outputs[:6, 1].cpu().numpy())
                # print(prob_all[:6])
                # print(label_all)
                # test log
                print(f"testLoss: {test_loss}, testAUC: {test_auc}, testAccuracy: {test_acc}")
                if config.wandb:
                    wandb.log({
                        f"testLoss": test_loss,
                        f"testAUC": test_auc,
                        f"testAccuracy": test_acc,
                    }, step=epoch)
