# 读入模型
from model import model
model = model.MultiModalModel().cuda()
# 读入数据集
from data import dataset
filckr30k = dataset.getflickr30k()
# 设置数据采样器
from data import prompt
from torch.utils.data import DataLoader
def collate_fn(batch):
    images = []
    messages = []
    for example in batch:
        training_samples = prompt.prepare_training_data(example['image'], example['caption'])
        images.extend([item['image'] for item in training_samples])
        messages.extend([item['messages'] for item in training_samples])
        
    return {'messages': messages, 'images': images}
dataloader = DataLoader(dataset=filckr30k['test'], batch_size=2, shuffle=True, collate_fn=collate_fn)

# 设置损失函数和优化器
from tqdm import tqdm
import torch
import torch.optim as optim
# 训练器函数
from loss import loss as ls
import os

num_epochs = 2
learning_rate = 1e-6
optimizer = optim.AdamW(model.vision_projection.parameters(), lr=learning_rate)
step = 0
running_losses = []

save_path = "outs/checkpoints"
if not os.path.exists(save_path):
    os.makedirs(save_path)

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}/{num_epochs}", leave=False)
    for batch in progress_bar:
        images = batch['images']
        messages = batch['messages']

        optimizer.zero_grad()
        
        projected_vision_states, img_token_positions, outputs = model(messages, images, return_lm_loss=True)
        # 使用已有的损失计算函数
        loss = ls.loss_fct(projected_vision_states, img_token_positions, outputs)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        running_losses.append(loss.item())

        if len(running_losses) > 100:
            running_losses.pop(0)

        if step % 100 == 0 and step != 0:
            print(running_loss / 100)
            running_loss = 0.0

        step += 1

    avg_loss = running_loss / len(dataloader)
    print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {avg_loss:.4f}")
    
    # 保存模型权重和最近100次的running_loss
    torch.save({
        'epoch': epoch + 1,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'running_losses': running_losses,
    }, os.path.join(save_path, f'model_epoch_{epoch+1}.pth'))

print("Training complete.")
