import torch 
import torch.utils.data as Data 
from torch import nn, optim 
from tqdm import tqdm
from model.model import GPT 
from model.dataloader import MyDataSet, get_dataset_info 
from peft import get_peft_model, LoraConfig, TaskType  

n_gpu = torch.cuda.device_count() 
loss_values = []
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
epochs=400
CLIP = 1

def make_data(datas):
    train_datas = []
    for data in datas:
        data = data.strip()
        train_data = [i if i != '\t' else "<sep>" for i in data] + ['<sep>']
        train_datas.append(train_data)
    return train_datas

def train_step(model, data_loader, optimizer, criterion, clip=1, print_every=None):
    model.train()
    
    if print_every is None or print_every == 0:
        print_every = 1
    
    print_loss_total = 0  
    print_loss_avg = 0   
    epoch_loss = 0
    
    for i, (dec_inputs, dec_outputs) in enumerate(tqdm(data_loader)):
        optimizer.zero_grad()
        
        dec_inputs, dec_outputs = dec_inputs.to(device), dec_outputs.to(device)
        
        outputs, _ = model(dec_inputs)
        
        loss = criterion(outputs, dec_outputs.view(-1))  
        print_loss_total += loss.item()
        epoch_loss += loss.item()
        
        loss.backward()
        
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
        
        optimizer.step()
        
        batch_loss_avg = print_loss_total / (i + 1)
        
        if print_every and (i + 1) % print_every == 0:
            print_loss_avg = batch_loss_avg 
            print(f'\tCurrent Loss: {print_loss_avg:.4f}')  
        
    return epoch_loss / len(data_loader), print_loss_avg if print_loss_avg != 0 else epoch_loss / len(data_loader)

def main():
    try:
        with open('/data/whl/cl/gpt2/dataset/64_lines.txt', 'r', encoding='utf-8') as f:
            datas = f.readlines()
    except FileNotFoundError:
        print("无法找到数据集文件'64_lines.txt'，请检查文件是否存在。")

    train_data = make_data(datas)
    loss = []
    word2id, a, b = get_dataset_info()
    train_num_data = [[word2id[word] for word in line if word in word2id] for line in train_data]

    batch_size = 16
    
    dataset = MyDataSet(train_num_data)
    data_loader = Data.DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.padding_batch)

    model = GPT().to(device)

    lora_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        r=8, 
        lora_alpha=16,  
        lora_dropout=0.1,  
        target_modules=['W_Q', 'W_K', 'W_V']
    )
    
    model = get_peft_model(model, lora_config) 
    model.to(device)

    if n_gpu > 1:
        model = torch.nn.DataParallel(model)

    optimizer = optim.AdamW(model.parameters(), lr=1e-4)

    pretrained_model_path = 'ResGPT2.pt'
    model.load_state_dict(torch.load(pretrained_model_path), strict=False) 

    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for _, (dec_inputs, dec_outputs) in enumerate(tqdm(data_loader)):
            optimizer.zero_grad()
            dec_inputs, dec_outputs = dec_inputs.to(device), dec_outputs.to(device)
            
            outputs, _ = model(dec_inputs)
            loss = nn.CrossEntropyLoss(ignore_index=0)(outputs, dec_outputs.view(-1))
            
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        
        avg_loss = total_loss / len(data_loader)
        loss_values.append(f"{avg_loss:.4f}")
        print(f"Epoch {epoch + 1}/{epochs}, Loss: {avg_loss:.4f}")
    
    lora_adapter_path = "lora_chatbot_weights.bin"
    if isinstance(model, torch.nn.DataParallel):
        model_to_save = model.module
    else:
        model_to_save = model

    model_to_save.save_pretrained(lora_adapter_path)
    print(f"LoRA 微调的权重已保存至 '{lora_adapter_path}'")

    with open('loss_record_lora_chatbot.txt', 'w') as file:
        file.write(" ".join(loss_values))

if __name__ == '__main__':
    main()