import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup

class QATrainer:
    def __init__(self, model, train_loader, val_loader=None, 
                 learning_rate=2e-5, epochs=3, device='cpu'):
        self.model = model
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.epochs = epochs
        self.device = device
        
        # Move model to device
        self.model.to(self.device)
        
        # Set up optimizer and scheduler
        self.optimizer = AdamW(self.model.parameters(), lr=learning_rate)
        total_steps = len(train_loader) * epochs
        self.scheduler = get_linear_schedule_with_warmup(
            self.optimizer, 
            num_warmup_steps=0, 
            num_training_steps=total_steps
        )
    
    def train(self):
        for epoch in range(self.epochs):
            self.model.train()
            total_loss = 0
            
            for batch in self.train_loader:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                start_positions = batch['start_positions'].to(self.device)
                end_positions = batch['end_positions'].to(self.device)
                
                self.optimizer.zero_grad()
                
                outputs = self.model(
                    input_ids=input_ids, 
                    attention_mask=attention_mask,
                    start_positions=start_positions,
                    end_positions=end_positions
                )
                loss = outputs.loss
                
                loss.backward()
                self.optimizer.step()
                self.scheduler.step()
                
                total_loss += loss.item()
            
            avg_train_loss = total_loss / len(self.train_loader)
            print(f'Epoch {epoch+1}: Training Loss: {avg_train_loss:.4f}')