#coding=utf-8
import optuna
from numpy import mean
from torch.utils.data import DataLoader
from dataset import TaoBaoDataset,XLongDataset
from models.Dien import *
from models.Din import *
from models.Sim import *
from models.End4Rec import *
from models.kuaiformer import *
from models.TWIN import *
from models.FEARec import *
from config import *
import torch.optim as optim
import torch
import os
from sklearn.metrics import roc_auc_score
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore')
def set_seed(seed_value):
    random.seed(seed_value)

    np.random.seed(seed_value)

    torch.manual_seed(seed_value)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed_value)

        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True


os.environ["WANDB_API_KEY"] = "39e1ad5f4d18c65342cc4489e887bd7bf717a973"
# os.environ["WANDB_API_KEY"] = "693e85da1f2365ff996276d7d4fa83f24d5ab4d7" # lxm
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

# set_seed(seed)


if dataset_name.startswith("taobao"):
    # data
    train_target_item_file = f'../data/taobao/final_train_data{"2" if dataset_name[-1] == "2" else ""}.csv'

    user_behavior_file = '../data/taobao/active_user_actions.csv'

    train_data = TaoBaoDataset(train_target_item_file, user_behavior_file, mode='train', n_users=n_users)
elif dataset_name == "xlong":
    data_path = "../data/xlong"
    train_data = XLongDataset(data_path, nrows=n_users)
else:
    print(f"{dataset_name}数据集不存在")
    exit()

# models
if model_name.lower() == "kuaiformer":
    config = KuaiFormerConfig(
        train_data.vocab_size_dic,
        heads,
        q_num=interest_num,
        split_range=split_range,
        total_behavior_num=total_behavior_num,
        id_embedding_dim=id_embedding_dim,
        simple_embedding_dim=simple_embedding_dim,
        device_id=device_id
    )

elif model_name.lower() == "fearec":
    config = FEAConfig(train_data.vocab_size_dic,
                       id_embed_dim=id_embedding_dim,
                       simple_embed_dim=simple_embedding_dim,
                       seq_len=total_behavior_num)

else:
    config = BaseConfig(
        train_data.vocab_size_dic,
        id_embed_dim=id_embedding_dim,
        simple_embed_dim=simple_embedding_dim,
        device_id=device_id,
        sim_mode=sim_mode,
        k=k,
        seq_len=total_behavior_num
    )
device = config.device
config.print_info(model_name, dataset_name)

best_auc_dic = {
   i:0.0 for i in range(EPOCHS)
}
if model_name.lower() == 'din':
    model = DIN(config)
elif model_name.lower() == "dien":
    model = DIEN(config)
elif model_name.lower() == "sim":
    model = SIM(config)
elif model_name.lower() == "end4rec":
    model = END4Rec(config)
elif model_name.lower() == "twin":
    model = TWIN(config)
elif model_name.lower() == "kuaiformer":
    model = KuaiFormer(config)
elif model_name.lower() == "fearec":
    model = FEARec(config)
else:
    print(f"No such model:{model_name}")
    exit()

if dataset_name == "xlong":
    weight = torch.tensor(np.loadtxt("../data/xlong/graph_emb.txt"), dtype=torch.float32)
    model.embedding.embed_layer_dic['item_id_embedding_0'].weight.data = weight

model = model.to(device)

def objective(trial):
    origin_criterion = nn.CrossEntropyLoss().to(device)
    compress_lambda = trial.suggest_float("compress_lambda", 1e-5, 1e-1, log=False)
    lr = trial.suggest_float("lr", 1e-5, 1e-1, log=False)

    weight_decay = trial.suggest_float("weight_decay", 1e-6, 1e-2, log=False)
    # lr = 1e-3
    # weight_decay = 1e-4
    # compress_lambda = 1e-5
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    dataloader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)

    total_num = len(train_data)


    for epoch in range(0,EPOCHS):
        model.train()
        accuracy = 0
        cur_data_num = 0
        train_loss_ = 0
        aucs = []
        for idx, (target_item, behaviors, target) in enumerate(dataloader):
            cur_data_num += BATCH_SIZE
            target_item = target_item.to(device)
            behaviors = behaviors.to(device)
            target = target.to(device)
            pred_y = model(behaviors,target_item)
            pred_y = pred_y.float().to(device)
            train_loss = origin_criterion(pred_y,target)
            accuracy += (torch.argmax(pred_y, dim=-1) == target).sum().cpu().item()
            y_pred_prob_positive = pred_y[:, 1].cpu().detach().numpy()
            if len(set(target.cpu().numpy())) > 1:
                auc = roc_auc_score(target.cpu().detach().numpy(), y_pred_prob_positive)
            else:
                auc = 0.5
            aucs.append(auc)
            cur_acc = accuracy / cur_data_num
            optimizer.zero_grad()
            train_loss.backward()
            optimizer.step()
            train_loss_ += train_loss.detach() * len(behaviors)
            if idx % 50 == 0:
                print("{} epoch {} step training loss:{}, acc:{}, auc:{}".format(epoch + 1,
                                                                                 idx + 1,
                                                                                 train_loss,
                                                                                 cur_acc,
                                                                                 mean(auc)))
        total_acc = accuracy / total_num
        total_train_loss = train_loss_ / total_num
        auc = mean(aucs)
        print("{} epoch finish training loss:{}, acc:{}, auc:{}".format(epoch + 1, total_train_loss, total_acc,
                                                                        auc))

        if not os.path.exists("./checkpoints/"):
            os.mkdir("./checkpoints/")
        if not os.path.exists(f"./checkpoints/{model_name}_pths/"):
            os.mkdir(f"./checkpoints/{model_name}_pths/")

        if auc > best_auc_dic[epoch] and n_users is None:
            best_auc_dic[epoch] = auc
            torch.save({
                'epoch':epoch+1,
                'model_state_dict':model.state_dict(),
                'optimizer_state_dict':optimizer.state_dict(),
                'best_auc':auc
            }, "./checkpoints/{}_pths/{}_{}_{}.pth".format(model_name,dataset_name,model_name,epoch))
        trial.report(auc, epoch)
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

    return auc


if __name__ == '__main__':
    optuna.logging.disable_propagation()
    study = optuna.create_study(
        pruner=optuna.pruners.MedianPruner(n_warmup_steps=3), direction="maximize",
        study_name="compression", load_if_exists=True)

    study.optimize(objective, n_trials=6, timeout=1200)
    best_params = study.best_params
    best_value = study.best_value
    print("\nbest_value = " + str(best_value))
    print("best_params:",best_params)
    print("best_auc_dic:", best_auc_dic)
