import torch 
import torch.utils.data as Data 
from torch import nn, optim 
import time 
from tqdm import tqdm
import os 
from model.model import *
import torch
from torch.utils.data import Dataset

n_gpu = torch.cuda.device_count() 
loss_values = []

def make_data(datas):
    train_datas = []
    for data in datas:
        data = data.strip()
        train_data = [i if i != '\t' else "<sep>" for i in data] + ['<sep>']
        train_datas.append(train_data)
    return train_datas

class MyDataSet(Dataset):
    def __init__(self, datas):
        self.datas = datas

    def __getitem__(self, item):
        data = self.datas[item]
        
        decoder_input = data[:-1]
        decoder_output = data[1:] 
        
        decoder_input_len = len(decoder_input)
        decoder_output_len = len(decoder_output)
        
        return {
            "decoder_input": decoder_input,
            "decoder_input_len": decoder_input_len,
            "decoder_output": decoder_output,
            "decoder_output_len": decoder_output_len
        }

    def __len__(self):
        return len(self.datas)

    def padding_batch(self, batch):
        decoder_input_lens = [d["decoder_input_len"] for d in batch]
        decoder_output_lens = [d["decoder_output_len"] for d in batch]
        
        decoder_input_maxlen = max(decoder_input_lens)
        decoder_output_maxlen = max(decoder_output_lens)
        
        for d in batch:
            d["decoder_input"].extend([word2id["<pad>"]] * (decoder_input_maxlen - d["decoder_input_len"]))
            d["decoder_output"].extend([word2id["<pad>"]] * (decoder_output_maxlen - d["decoder_output_len"]))
        
        decoder_inputs = torch.tensor([d["decoder_input"] for d in batch], dtype=torch.long)
        decoder_outputs = torch.tensor([d["decoder_output"] for d in batch], dtype=torch.long)
        
        return decoder_inputs, decoder_outputs

def epoch_time(start_time, end_time):
    elapsed_time = end_time - start_time
    
    elapsed_mins = int(elapsed_time / 60)
    
    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
    
    return elapsed_mins, elapsed_secs

def train_step(model, data_loader, optimizer, criterion, clip=1, print_every=None):
    model.train()
    
    if print_every is None or print_every == 0:
        print_every = 1
    
    print_loss_total = 0  
    print_loss_avg = 0   
    epoch_loss = 0
    
    for i, (dec_inputs, dec_outputs) in enumerate(tqdm(data_loader)):
        optimizer.zero_grad()
        
        dec_inputs, dec_outputs = dec_inputs.to(device), dec_outputs.to(device)
        
        outputs, _ = model(dec_inputs)
        
        loss = criterion(outputs, dec_outputs.view(-1))  
        print_loss_total += loss.item()
        epoch_loss += loss.item()
        
        loss.backward()
        
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
        
        optimizer.step()
        
        print_loss_total += loss.item()
        epoch_loss += loss.item()
        batch_loss_avg = print_loss_total / (i + 1)
        
        if print_every and (i + 1) % print_every == 0:
            print_loss_avg = batch_loss_avg 
            print(f'\tCurrent Loss: {print_loss_avg:.4f}')  
        
    return epoch_loss / len(data_loader), print_loss_avg if print_loss_avg != 0 else epoch_loss / len(data_loader)

def train(model, data_loader, loss_list, model_path='ResGPT2_lccc_final.pt'):

    criterion = nn.CrossEntropyLoss(ignore_index=0).to(device)
    
    if os.path.isfile(model_path):
        print(f'正在加载预训练模型权重：{model_path}\n')
        model.load_state_dict(torch.load(model_path))

    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    for epoch in range(epochs):
        start_time = time.time()

        train_loss = train_step(model, data_loader, optimizer, criterion, CLIP, print_every=10)

        train_loss, avg_loss = train_step(model, data_loader, optimizer, criterion, CLIP, print_every=10)
        loss_values.append(avg_loss)

        end_time = time.time()

        if n_gpu > 1:
            torch.save(model.module.state_dict(), model_path)
        else:
            torch.save(model.state_dict(), model_path)

        epoch_mins, epoch_secs = epoch_time(start_time, end_time)
        print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')

        loss_list.append(f"{train_loss:.3f}")
        print(f'\tTrain Loss: {train_loss:.3f}')

        
    return model


def print_num_parameters(model):
    total_params = sum(p.numel() for p in model.parameters())
    print(f'{total_params:,} total parameters.')
    total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f'{total_trainable_params:,} training parameters.')

if __name__ == '__main__':
    try:
        with open('/data/whl/cl/gpt2/dataset/lccc_dataset.txt', 'r', encoding='utf-8') as f:
            datas = f.readlines()
    except FileNotFoundError:
        print("无法找到数据集文件'lccc_dataset.txt'，请检查文件是否存在。")
        input('按回车键退出...')
        exit()

    train_data = make_data(datas)
    loss = []

    train_num_data = [[word2id[word] for word in line] for line in train_data]

    ################################### batch size 和 epoch ##########################################
    batch_size = 32
    epochs_size_input = 200
    #################################################################################################

    epochs = int(epochs_size_input)
    dataset = MyDataSet(train_num_data)
    data_loader = Data.DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.padding_batch)
    model = GPT().to(device)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)

    while True:
        model_path = 'ResGPT2_lccc_final.pt'
        if os.path.isfile(model_path):
            inquire = input(f"检测到工作目录中已存在模型文件'{model_path}'，是否继续训练？(y/n):")
            if inquire not in ["y", "n"]:
                print("输入错误！\n")
            else:
                if inquire == 'y':
                    print(f'正在加载预训练模型：{model_path}')
                    model.load_state_dict(torch.load(model_path))
                    train(model, data_loader, model_path, loss)
                    break
                elif inquire == 'n':
                    print('\n将进行新的训练任务\n')
                    os.remove(model_path)
                    train(model, data_loader, loss_list=loss)
                    break
        else:
            train(model, data_loader, loss_list=loss)
            break
    
    with open('loss_record.txt', 'w') as file:
        file.write(" ".join(loss))

    print(f"\n模型文件'{model_path}'已经输出至当前工作目录。")