import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import torch
from model import RecSys
from dataset import SeqDataset
from torch.utils.data import DataLoader
from transformers import AdamW, get_scheduler
from torch import nn
from tqdm.auto import tqdm
import pickle
import torch.optim as optim
from torch.cuda.amp import GradScaler, autocast
from utils import *
from peft import PeftModel
from transformers import AutoTokenizer, AutoModelForCausalLM,AutoConfig
from mylog import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default = "./task_yamls/e4srec_train.yaml")
args = parser.parse_args()

device = 'cuda' if torch.cuda.is_available() else 'cpu'
ml=MyLog("myExperiment.log")
config=ml.read_yaml(args.config)
ml.init(os.path.abspath(__file__))

base_model = config['base_model']
model_path=config["model_path"]
data = config['data']
epoch_num = config['epoch_num']
learning_rate = config['learning_rate']
maxlen = config['maxlen']
item_embed_hidden_units = config['item_embed_hidden_units']
batch_size = config['batch_size']
use_pretrained=config['use_pretrained']
lora_path=config['lora_path']
item_embed_path=config['item_embed_path']

dataset = SeqDataset("./data/"+data,maxlen=maxlen)
item_embed = pickle.load(open(item_embed_path, 'rb'))
model = RecSys(output_dim=dataset.item_max,
               input_dim=item_embed_hidden_units,
               base_model=base_model,
               item_embed=item_embed,
               model_path=model_path,
               use_pretrained=use_pretrained,
               lora_path=lora_path,
               cache_dir=model_path)
if use_pretrained:
    model_path2 = os.path.join(lora_path, "adapter.pth")#其他线性层参数
    model.load_from_checkpoint(model_path2)

train_dataloader = DataLoader(
    dataset.train_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(
    dataset.val_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
test_dataloader = DataLoader(
    dataset.test_data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
candidate_dataloader=DataLoader(dataset.candidate_item,batch_size=batch_size,collate_fn=collate_fn3)

optimizer = AdamW(model.parameters(), lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
hr,ndcg,mrr=e4srec_evaluate_all(test_dataloader,model,10,device)

for t in range(epoch_num):
    model.train()
    total_loss = 0
    print(f"Epoch {t+1}/{epoch_num}\n-------------------------------")
    progress_bar = tqdm(range(len(train_dataloader)),position=0)
    progress_bar.set_description(f'loss: {0:>7f}')

    for step, (inputs, inputs_mask, labels) in enumerate(train_dataloader, start=1):
        inputs, inputs_mask, labels = inputs.to(
            device), inputs_mask.to(device), labels.to(device)
        optimizer.zero_grad()
        pred = model(inputs, inputs_mask)
        if labels.shape[0]==1:
            labels=labels.squeeze(0)
        else:
            labels=labels.squeeze()
        loss = loss_fn(pred, labels)
        loss.backward()
        optimizer.step()
        progress_bar.set_description(f'loss: {loss.item():>7f}')
        progress_bar.update(1)

        total_loss += loss.item()
    print("total loss:",total_loss/len(train_dataloader))
    ml.write(f"Epoch {t+1}:\ntotal loss={total_loss/len(train_dataloader)}, ",end="")
    output_dir=os.path.join(config['output_dir'],f"epoch={t+1}")
    model.model.save_pretrained(output_dir)
    model_path = os.path.join(output_dir,"adapter.pth")
    item_proj, score = model.item_proj.state_dict(), model.score.state_dict()
    torch.save({'item_proj': item_proj, 'score': score}, model_path)
    k=10
    hr,ndcg,mrr=e4srec_evaluate_all(test_dataloader,model,k,device)
    ml.write(f"HR@{k}={hr}, nDCG@{k}={ndcg}, MRR@{k}={mrr}")

print("Training done!")
ml.write("Training done!")
ml.close()