from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score, root_mean_squared_error
import numpy as np
import torch
from model import *
from model.debias import TwoLinear

EARLY_STOP_PATIENCE = 5


# MCD
def autodebias_model_train(
    model,  # 训练的模型
    train_loader,  # 训练数据的dataloader
    uniform_loader,  # 无偏数据的dataloader
    valid_loader=None,
    test_loader=None,  # 测试数据的dataloader
    epochs=5,
    lr=0.01,
    w_lr=1e-3,
    name=None,
    device="cpu",
    early_stop=False,
    start_epoch=20,
):
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = torch.nn.MSELoss(reduction="none")
    sum_criterion = torch.nn.MSELoss(reduction="sum")
    loaderLen = len(train_loader)
    auc_list = []
    acc_list = []
    rmse_list = []
    description = "Epoch %d"
    if name is not None:
        description = f"[{name}] Epoch %d"

    if early_stop:
        assert valid_loader is not None
        best_auc = 0
        best_model = None
        patience = EARLY_STOP_PATIENCE
        print(f"[{name}] Early stopping enabled")

    user_num = model.user_num
    item_num = model.item_num
    try:
        latent_dim = model.latent_dim
    except:
        latent_dim = model.knowledge_dim

    # φ
    w_function = TwoLinear(user_num, item_num)
    w_function_optim = torch.optim.Adam(w_function.parameters(), lr=w_lr)

    w_function.to(device)
    w_function.train()

    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for batch_data in tqdm(train_loader, description % epoch):
            batch_data = [data.to(device) for data in batch_data]
            input_data = batch_data[:-1]
            score = batch_data[-1]

            input_data_two = input_data[0:2]

            # 将input_data 拆成 user_data, item_data 并传入w_function
            w_output = w_function(*input_data_two)

            model_type = type(model)
            # 创建一个one_step_model，类型为model_type
            one_step_model = model_type(user_num, item_num, latent_dim)
            one_step_model.load_state_dict(model.state_dict())
            one_step_model.to(device)

            # one_step_model = MCD(user_num, item_num, latent_dim)
            # one_step_model.load_state_dict(model.state_dict())
            # one_step_model.to(device)

            # 使用所有数据训练one_step_model
            output = one_step_model(*input_data)
            one_step_loss = criterion(output, score)

            # 使用w矫正后的loss
            loss = w_output + one_step_loss
            loss = torch.sum(loss)

            # 使用矫正后的loss更新one_step_model
            one_step_model.zero_grad()
            grads = torch.autograd.grad(
                loss,
                (one_step_model.params()),
                create_graph=True,
            )
            one_step_model.update_params(lr, source_params=grads)

            for batch_data in uniform_loader:
                batch_data = [data.to(device) for data in batch_data]
                uniform_x = batch_data[:-1]
                uniform_y = batch_data[-1]
                # 将uniform_x传入one_step_model
                output = one_step_model(*uniform_x)
                uniform_loss = sum_criterion(output, uniform_y)

                # 使用uniform_loss更新w_function
                if epoch >= start_epoch:
                    w_function.zero_grad()
                    uniform_loss.backward()
                    w_function_optim.step()

            # 下面是real update
            w_output = w_function(*input_data_two)
            y_pred = model(*input_data)
            loss = criterion(y_pred, score) + w_output
            loss = torch.sum(loss)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
        log = f"[{name}] Epoch: {epoch}, Loss: {total_loss / loaderLen}"
        if valid_loader is not None:
            auc, acc, rmse = evaluate(model, valid_loader, name=name, device=device)
            auc_list.append(auc)
            acc_list.append(acc)
            rmse_list.append(rmse)
            log += f", AUC: {auc}, ACC: {acc}, RMSE: {rmse}"
        print(log)
        if early_stop:
            if auc > best_auc:
                best_auc = auc
                best_model = model.state_dict()
                patience = EARLY_STOP_PATIENCE
            else:
                patience -= 1
                if patience == 0:
                    model.load_state_dict(best_model)
                    print(f"[{name}] Early stopping at epoch {epoch}")
                    break
        # load
        if test_loader is not None:
            auc, acc, rmse = evaluate(model, test_loader, name=name, device=device)
            print(f"[{name}] Test AUC: {auc}, ACC: {acc}, RMSE: {rmse}")

    return auc_list, acc_list, rmse_list, w_function


def train(
    model,
    loader,
    valid_loader=None,
    epochs=5,
    lr=0.01,
    name=None,
    device="cpu",
    early_stop=False,
):
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = torch.nn.BCELoss()
    loaderLen = len(loader)
    auc_list = []
    acc_list = []
    rmse_list = []
    description = "Epoch %d"
    if name is not None:
        description = f"[{name}] Epoch %d"

    if early_stop:
        assert valid_loader is not None
        best_auc = 0
        best_model = None
        patience = EARLY_STOP_PATIENCE
        print(f"[{name}] Early stopping enabled")

    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for batch_data in tqdm(loader, description % epoch):
            batch_data = [data.to(device) for data in batch_data]
            input_data = batch_data[:-1]
            score = batch_data[-1]
            try:
                output = model(*input_data)
            except:
                for n, i in enumerate(input_data):
                    print(n)
                    print(i.shape)
                    print(i.max())
                    print(i.min())
                print()
                raise
            loss = criterion(output, score)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        log = f"[{name}] Epoch: {epoch}, Loss: {total_loss / loaderLen}"
        if valid_loader is not None:
            auc, acc, rmse = evaluate(model, valid_loader, name=name, device=device)
            auc_list.append(auc)
            acc_list.append(acc)
            rmse_list.append(rmse)
            log += f", AUC: {auc}, ACC: {acc}, RMSE: {rmse}"
        print(log)
        if early_stop:
            if auc > best_auc:
                best_auc = auc
                best_model = model.state_dict()
                patience = EARLY_STOP_PATIENCE
            else:
                patience -= 1
                if patience == 0:
                    model.load_state_dict(best_model)
                    print(f"[{name}] Early stopping at epoch {epoch}")
                    break

    return auc_list, acc_list, rmse_list


def evaluate(model, loader, name=None, device="cpu"):
    model.eval()
    y_pred = []
    y_true = []
    description = "evaluating"
    if name is not None:
        description = f"[{name}] evaluating"
    progress = tqdm(loader, description)
    for batch_data in progress:
        batch_data = [data.to(device) for data in batch_data]
        input_data = batch_data[:-1]
        score = batch_data[-1]
        output = model(*input_data)
        y_pred.extend(output.tolist())
        y_true.extend(score.tolist())
    auc = roc_auc_score(y_true, y_pred)
    acc = accuracy_score(np.array(y_true) >= 0.5, np.array(y_pred) >= 0.5)
    rmse = root_mean_squared_error(y_true, y_pred)
    model.train()
    return auc, acc, rmse
