#coding=utf-8
import optuna
from numpy import mean
from torch.utils.data import DataLoader
from dataset import TaoBaoDataset,XLongDataset,TmallDataset
from models.ReinforceModelConfig import *
from models.Reinforce_model import *
from config import *
import torch.optim as optim
import torch
import os
from sklearn.metrics import roc_auc_score
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore')
def set_seed(seed_value):
    random.seed(seed_value)

    np.random.seed(seed_value)

    torch.manual_seed(seed_value)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed_value)

        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True


os.environ["WANDB_API_KEY"] = "39e1ad5f4d18c65342cc4489e887bd7bf717a973"
# os.environ["WANDB_API_KEY"] = "693e85da1f2365ff996276d7d4fa83f24d5ab4d7" # lxm
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

# set_seed(seed)


if dataset_name.startswith("taobao"):
    # data
    train_target_item_file = f'../data/taobao/final_train_data{"2" if dataset_name[-1] == "2" else ""}.csv'

    user_behavior_file = '../data/taobao/active_user_actions.csv'

    train_data = TaoBaoDataset(train_target_item_file, user_behavior_file, mode='train', n_users=n_users)
elif dataset_name == "xlong":
    data_path = "../data/xlong"
    train_data = XLongDataset(data_path, nrows=n_users)
elif dataset_name == "tmall":
    train_target_item_file = '../data/tmall/final_train_data.csv'

    user_behavior_file = '../data/tmall/800_user_actions.csv'
    train_data = TmallDataset(train_target_item_file, user_behavior_file,
                              mode='train', n_users=n_users,
                              behaviors_num=total_behavior_num)
else:
    print(f"{dataset_name}数据集不存在")
    exit()

# models
config = ReinforceModelConfig(
        train_data.vocab_size_dic,
        heads=heads,
        total_behavior_num=total_behavior_num,
        group_num=group_num,
        interest_num=interest_num,
        short_time=short_time,
        id_embedding_dim=id_embedding_dim,
        simple_embedding_dim=simple_embedding_dim,
        layers1=layers1,
        layers2=layers2,
        drop_out=drop_out,
        device_id=device_id,
        compressed=compressed,
        diff1=diff1,
        diff2=diff2,
        layer=layer,
        use_cos=use_cos,
        random_init_q=random_init_q,
        compress_net=compress_net
)
device = config.device

print(f'''
    model_name:{model_name}
    batch_size:{BATCH_SIZE}
    heads:{heads},
    total_behavior_num:{total_behavior_num},
    group_num:{group_num},
    interest_num:{interest_num},
    short_time:{short_time},
    id_embedding_dim:{id_embedding_dim},
    simple_embedding_dim:{simple_embedding_dim},
    layers1:{layers1},
    layers2:{layers2},
    drop_out:{drop_out},
    device_id :{device_id},
    compressed:{compressed},
    diff1:{diff1},
    diff2:{diff2},
    layer : {layer},
    use_cos:{use_cos},
    random_init_q:{random_init_q}
''')

best_auc_dic = {
   i:0.0 for i in range(EPOCHS)
}
if model_name == 'Reinforce':
    model = Reinforce_Model(config)
else:
    print(f"No such model:{model_name}")
    exit()

if dataset_name == "xlong":
    weight = torch.tensor(np.loadtxt("../data/xlong/graph_emb.txt"), dtype=torch.float32)
    model.embedding.embed_layer_dic['item_id_embedding_0'].weight.data = weight

model = model.to(device)

def objective(trial):
    origin_criterion = nn.CrossEntropyLoss().to(device)
    compress_lambda = trial.suggest_float("compress_lambda", 1e-5, 1e-1, log=False)
    lr = trial.suggest_float("lr", 1e-5, 1e-1, log=False)

    weight_decay = trial.suggest_float("weight_decay", 1e-6, 1e-2, log=False)
    # lr = 1e-3
    # weight_decay = 1e-4
    # compress_lambda = 1e-5
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    dataloader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)

    total_num = len(train_data)


    for epoch in range(0,EPOCHS):
        model.train()
        accuracy = 0
        cur_data_num = 0
        train_loss_ = 0
        aucs = []
        # for idx, (pos_target_item,neg_target_item,behaviors,global_interest_tokens,pos_target,neg_target) in enumerate(dataloader):
        #     cur_data_num += self.batch_size * 2
        #     behaviors = behaviors.to(self.device)
        #     global_interest_tokens = global_interest_tokens.to(self.device)
        #     pos_target_item = pos_target_item.to(self.device)
        #     neg_target_item = neg_target_item.to(self.device)
        #     pos_target = pos_target.to(self.device)
        #     neg_target = neg_target.to(self.device)

        #     train_loss = 0
        #     if "prob" in model_name:
        #         pos_score, neg_score, bpr_loss, reg_loss = model(behaviors, pos_target_item,neg_target_item)
        #         target = torch.concat([pos_target,neg_target], dim = 0)
        #         y_pred = torch.concat([pos_score, neg_score], dim = 0)
        #         accuracy += (torch.argmax(y_pred, dim=-1) == target).sum().cpu().item()
        #         y_pred_prob_positive = y_pred[:, 1].cpu().detach().numpy()
        #         train_loss += origin_criterion(y_pred,target)
        #         train_loss += bpr_lambda *bpr_loss + reg_lambda*reg_loss
        #     else:
        #         target_pair = []
        #         y_pred_pair = []
        #         target_embed_pair = []
        #         us_gbl = None
        #         for target_item, target in [(pos_target_item, pos_target), (neg_target_item, neg_target)]:
        #             target_item = target_item.to(self.device)
        #             target = target.to(self.device)
        #             pred_y, atte1, atte2, us, target_embed, ts = self.model(behaviors,
        #                                         global_interest_tokens,
        #                                         target_item)
        #             us_gbl = us
        #             target_embed_pair.append(target_embed)
        #             target_pair.append(target)
        #             y_pred_pair.append(pred_y)
        #             pred_y = pred_y.float().to(self.device)
        #             train_loss += origin_criterion(pred_y,target)
        #             # for i,lamb in enumerate(self.lambda_list):
        #             #     train_loss += self.other_criterion[i](atte) * lamb
        #             # y_pred_prob_positive = pred_y[:, 1].cpu().detach().numpy()
        #             accuracy += (torch.argmax(pred_y, dim=-1) == target).sum().cpu().item()
        #         target = torch.concat(target_pair, dim = 0)
        #         y_pred_prob_positive = torch.concat([y_pred[:, 1] for y_pred in y_pred_pair], dim = 0).cpu().detach().numpy()
        for idx, (target_item, behaviors, target) in enumerate(dataloader):
            cur_data_num += BATCH_SIZE
            target_item = target_item.to(device)
            behaviors = behaviors.to(device)
            target = target.to(device)
            pred_y, compress_loss = model(behaviors,target_item)
            pred_y = pred_y.float().to(device)
            train_loss = origin_criterion(pred_y,target)
            train_loss += compress_loss * compress_lambda
            accuracy += (torch.argmax(pred_y, dim=-1) == target).sum().cpu().item()
            y_pred_prob_positive = pred_y[:, 1].cpu().detach().numpy()
            if len(set(target.cpu().numpy())) > 1:
                auc = roc_auc_score(target.cpu().detach().numpy(), y_pred_prob_positive)
            else:
                auc = 0.5
            aucs.append(auc)
            cur_acc = accuracy / cur_data_num
            optimizer.zero_grad()
            train_loss.backward()
            optimizer.step()
            train_loss_ += train_loss.detach() * len(behaviors)
            if idx % 50 == 0:
                print("{} epoch {} step training loss:{}, acc:{}, auc:{}".format(epoch + 1,
                                                                                 idx + 1,
                                                                                 train_loss,
                                                                                 cur_acc,
                                                                                 mean(auc)))
        total_acc = accuracy / total_num
        total_train_loss = train_loss_ / total_num
        auc = mean(aucs)
        print("{} epoch finish training loss:{}, acc:{}, auc:{}".format(epoch + 1, total_train_loss, total_acc,
                                                                        auc))
        if not os.path.exists("./checkpoints/"):
            os.mkdir("./checkpoints/")
        if auc > best_auc_dic[epoch] and n_users is None:
            best_auc_dic[epoch] = auc
            torch.save({
                'epoch':epoch+1,
                'model_state_dict':model.state_dict(),
                'optimizer_state_dict':optimizer.state_dict(),
                'best_auc':auc
            }, "./checkpoints/{}_{}_{}.pth".format(dataset_name,model_name,epoch))
        trial.report(auc, epoch)
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

    return auc


if __name__ == '__main__':
    optuna.logging.disable_propagation()
    study = optuna.create_study(
        pruner=optuna.pruners.MedianPruner(n_warmup_steps=3), direction="maximize",
        study_name="compression", load_if_exists=True)

    study.optimize(objective, n_trials=6, timeout=1200)
    best_params = study.best_params
    best_value = study.best_value
    print("\n\nbest_value = " + str(best_value))
    print("best_params:",best_params)
    print("best_auc_dic:", best_auc_dic)
