import torch
import torch.nn as nn
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from model import MCD
from model.base import Module
import json
from sklearn.metrics import accuracy_score, roc_auc_score, root_mean_squared_error
import numpy as np
import multiprocessing
import optuna
import os

# F
from torch.nn import functional as F


DATASET = "a0910"

DATASET_DIR = Path("./dataset") / DATASET

data = pd.read_csv(DATASET_DIR / "train.csv")
data["user_id"] = data["user_id"] - 1
data["item_id"] = data["item_id"] - 1

test_data = pd.read_csv(DATASET_DIR / "valid.csv")
test_data["user_id"] = test_data["user_id"] - 1
test_data["item_id"] = test_data["item_id"] - 1

# device = torch.device("cpu")

describe = json.load(open(DATASET_DIR / "describe.json"))
KNOW_NUM = describe["knowledge_num"]
ITEM_NUM = describe["item_num"]
USER_NUM = describe["user_num"]


class StableDRMCD(nn.Module):
    def __init__(self, num_users, num_items, latent_dim, device, *args, **kwargs):
        super().__init__()
        self.num_users = num_users
        self.num_items = num_items
        self.latent_dim = latent_dim
        self.prediction_model = MCD(self.num_users, self.num_items, self.latent_dim)
        self.imputation = MCD(self.num_users, self.num_items, self.latent_dim)

        self.sigmoid = torch.nn.Sigmoid()
        self.xent_func = torch.nn.BCELoss()

        self.device = device

    def fit(
        self,
        x,
        y,
        y_ips,
        mu=0,
        eta=1,
        stop=5,
        num_epoch=1000,
        batch_size=128,
        lr=0.05,
        lr1=10,
        lamb=0,
        tol=1e-4,
        G=1,
        verbose=False,
    ):

        mu = torch.Tensor([mu])
        mu.requires_grad_(True)
        mu = mu.to(self.device)
        mu = torch.nn.Parameter(mu)

        optimizer_prediction = torch.optim.Adam(
            self.prediction_model.parameters(), lr=lr, weight_decay=lamb
        )
        optimizer_imputation = torch.optim.Adam(
            self.imputation.parameters(), lr=lr, weight_decay=lamb
        )
        optimizer_propensity = torch.optim.Adam([mu], lr=lr1, weight_decay=lamb)

        last_loss = 1e9

        observation = torch.zeros([self.num_users, self.num_items])
        for i in range(len(x)):
            observation[x[i][0], x[i][1]] = 1
        observation = observation.reshape(self.num_users * self.num_items)
        observation = observation.to(self.device)

        y1 = []
        for i in range(len(x)):
            if y[i] == 1:
                y1.append(self.num_items * x[i][0] + x[i][1])
        y1 = torch.LongTensor(y1)
        y1 = y1.to(self.device)

        # generate all counterfactuals and factuals
        x_all = []
        for i in range(self.num_users):
            x_all.extend([[i, j] for j in range(self.num_items)])
        x_all = np.array(x_all)

        num_sample = len(x)  # 6960
        total_batch = num_sample // batch_size

        if y_ips is None:
            one_over_zl = self._compute_IPS(x, y, y1, mu)
        else:
            one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)

        one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)].detach()

        early_stop = 0
        for epoch in range(num_epoch):
            all_idx = np.arange(num_sample)  # observation
            np.random.shuffle(all_idx)

            # sampling counterfactuals
            ul_idxs = np.arange(x_all.shape[0])  # all
            np.random.shuffle(ul_idxs)

            epoch_loss = 0

            for idx in range(total_batch):
                selected_idx = all_idx[batch_size * idx : (idx + 1) * batch_size]
                sub_x = x[selected_idx]
                sub_y = y[selected_idx]
                # propensity score
                inv_prop = one_over_zl_obs[selected_idx]

                sub_y = torch.Tensor(sub_y)

                sub_x = sub_x.T

                pred = self.prediction_model.forward(*sub_x)
                imputation_y = self.imputation.forward(*sub_x)
                pred = self.sigmoid(pred)
                imputation_y = self.sigmoid(imputation_y)

                e_loss = F.binary_cross_entropy(pred.detach(), sub_y, reduction="none")
                e_hat_loss = F.binary_cross_entropy(
                    imputation_y, pred.detach(), reduction="none"
                )
                imp_loss = (((e_loss - e_hat_loss) ** 2) * inv_prop.detach()).sum()

                optimizer_imputation.zero_grad()
                imp_loss.backward()
                optimizer_imputation.step()

                x_all_idx = ul_idxs[G * idx * batch_size : G * (idx + 1) * batch_size]
                x_sampled = x_all[x_all_idx]

                x_sampled = x_sampled.T
                x_sampled = torch.LongTensor(x_sampled)
                x_sampled = x_sampled.to(self.device)
                imputation_y1 = self.imputation(*x_sampled)
                imputation_y1 = self.sigmoid(imputation_y1)

                prop_loss = F.binary_cross_entropy(
                    1 / one_over_zl[x_all_idx], observation[x_all_idx], reduction="sum"
                )
                pred_y1 = self.prediction_model(*x_sampled)
                pred_y1 = self.sigmoid(pred_y1)

                imputation_loss = F.binary_cross_entropy(
                    imputation_y1, pred_y1, reduction="none"
                )

                loss = (
                    prop_loss
                    + eta
                    * (
                        (1 - observation[x_all_idx] * one_over_zl[x_all_idx])
                        * (imputation_loss - imputation_loss.mean())
                    ).sum()
                    ** 2
                )

                optimizer_propensity.zero_grad()
                loss.backward()
                optimizer_propensity.step()

                # print("mu = {}".format(mu))

                one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)
                one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)]
                inv_prop = one_over_zl_obs[selected_idx].detach()

                pred = self.prediction_model.forward(*sub_x)
                pred = self.sigmoid(pred)

                xent_loss = F.binary_cross_entropy(
                    pred, sub_y, weight=inv_prop.detach(), reduction="sum"
                )
                xent_loss = (xent_loss) / (inv_prop.detach().sum())

                optimizer_prediction.zero_grad()
                xent_loss.backward()
                optimizer_prediction.step()

                epoch_loss += xent_loss.detach().cpu().numpy()

            relative_loss_div = (last_loss - epoch_loss) / (last_loss + 1e-10)
            if relative_loss_div < tol:
                if early_stop > stop:
                    print("[MF-Stable-DR] epoch:{}, xent:{}".format(epoch, epoch_loss))
                    break
                early_stop += 1

            last_loss = epoch_loss

            if epoch % 10 == 0 and verbose:
                print("[MF-Stable-DR] epoch:{}, xent:{}".format(epoch, epoch_loss))

            if epoch == num_epoch - 1:
                print("[MF-Stable-DR] Reach preset epochs, it seems does not converge.")

    def predict(self, x):
        x = torch.LongTensor(x).T
        pred = self.prediction_model(*x)
        pred = self.sigmoid(pred)
        return pred.detach().cpu().numpy()

    def _compute_IPS(self, x, y, y1, mu, y_ips=None):
        if y_ips is None:
            y_ips = 1
            print("y_ips is none")
        else:
            py1 = y_ips.sum() / len(y_ips)
            py0 = 1 - py1
            po1 = (len(x) + mu) / (x[:, 0].max() * x[:, 1].max() + 2 * mu)
            py1o1 = (y.sum() + mu) / (len(y) + 2 * mu)
            py0o1 = 1 - py1o1

            propensity = torch.zeros(
                self.num_users * self.num_items, device=self.device
            )
            propensity += (py0o1 * po1) / py0

            propensity[y1] = (py1o1 * po1) / py1

            one_over_zl = 1 / propensity

        # one_over_zl = torch.Tensor(one_over_zl)
        return one_over_zl


x_train = data[["user_id", "item_id"]].values
x_train = torch.LongTensor(x_train)
y_train = torch.Tensor(data["score"].values)

x_test = test_data[["user_id", "item_id"]].values
x_test = torch.LongTensor(x_test)
y_test = test_data["score"].values

ips_idxs = np.arange(len(y_test))
np.random.shuffle(ips_idxs)
y_ips = y_test[ips_idxs[: int(0.05 * len(ips_idxs))]]
y_ips = torch.Tensor(y_ips)

x_test = x_test.T


def objective(trial: optuna.Trial, device):
    x_t = torch.empty_like(x_train, device=device)
    y_t = torch.empty_like(y_train, device=device)
    y_ips_t = torch.empty_like(y_ips, device=device)

    x_t = x_t.copy_(x_train)
    y_t = y_t.copy_(y_train)
    y_ips_t = y_ips_t.copy_(y_ips)
    model = StableDRMCD(USER_NUM, ITEM_NUM, KNOW_NUM, device)
    model.to(device)
    model.fit(
        x_t,
        y_t,
        y_ips_t,
        mu=trial.suggest_int("mu", 0, 2000),
        eta=trial.suggest_int("eta", 1, 2000),
        stop=5,
        num_epoch=1000,
        batch_size=trial.suggest_int("batch_size", 16, 256),
        lr=trial.suggest_float("lr", 1e-5, 1e-1),
        lr1=trial.suggest_int("lr1", 5, 300),
        lamb=trial.suggest_float("lamb", 0, 1e-3),
        tol=trial.suggest_float("tol", 1e-5, 1e-3),
        G=trial.suggest_int("G", 1, 10),
        verbose=False,
    )

    x_t = torch.empty_like(x_test, device=device)
    x_t = x_t.copy_(x_test)

    pred = model.prediction_model(*x_t)
    pred = model.sigmoid(pred).detach().cpu().numpy()
    auc = roc_auc_score(y_test, pred)
    print(f"auc: {auc}")
    return auc


def run_trail(device):
    study = optuna.load_study(
        study_name="stable-dr-mcd-junyi",
        storage="sqlite:///optuna/stable-dr-mcd-junyi.db",
    )
    study.optimize(lambda trial: objective(trial, device), n_trials=100)
    return "Finish"


if __name__ == "__main__":
    multiprocessing.set_start_method("spawn", force=True)

    if not os.path.exists("optuna/stable-dr-mcd-junyi.db"):
        study = optuna.create_study(
            study_name="stable-dr-mcd-junyi",
            direction="maximize",
            storage="sqlite:///optuna/stable-dr-mcd-junyi.db",
        )
    index = 0
    devices = ["cuda:{}".format(i) for i in range(3)]

    processes = []
    for _ in range(8 * len(devices)):
        device = devices[index % len(devices)]
        index += 1
        process = multiprocessing.Process(target=run_trail, args=(device,))
        process.start()
        processes.append(process)

    for process in processes:
        process.join()
    print("All Finish")
