import torch
import torch.nn as nn
from torchvision.utils import DataLoader

# -------------------- 3. 数据预处理与训练 --------------------
class ChineseOCRData(Dataset):
    """中文OCR数据集"""
    def __init__(self, data_dir, charset_path, img_size=(32, 100)):
        # 加载字符集
        with open(charset_path) as f:
            self.charset = [line.strip() for line in f]
        self.num_classes = len(self.charset)
        
        # 加载数据（假设数据为(image_path, label)的列表）
        self.data = self.load_data(data_dir)
        
        # 图像变换
        self.transform = transforms.Compose([
            transforms.Resize(img_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        img_path, label = self.data[idx]
        image = Image.open(img_path).convert('RGB')
        image = self.transform(image)
        
        # 标签转换为索引
        label_indices = [self.charset.index(c) for c in label]
        return image, torch.tensor(label_indices), len(label_indices)

def train_abinet():
    # 参数设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    num_classes = 5000  # 根据字符集调整
    batch_size = 32
    lr = 1e-4
    
    # 数据加载
    dataset = ChineseOCRData(data_dir='data/', charset_path='charset.txt')
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    
    # 模型初始化
    model = ABINet(num_classes=num_classes).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    criterion = nn.CTCLoss(blank=0)
    
    # 训练循环
    model.train()
    for epoch in range(100):
        total_loss = 0.0
        for images, labels, label_lengths in dataloader:
            images = images.to(device)
            labels = labels.to(device)
            label_lengths = label_lengths.to(device)
            
            # 前向传播
            outputs = model(images)  # [B, steps+1, W, C]
            
            # 计算各步骤的CTC损失
            loss = 0.0
            input_lengths = torch.full((batch_size,), outputs.size(2), dtype=torch.long)
            for step in range(outputs.size(1)):
                log_probs = F.log_softmax(outputs[:, step], dim=-1)
                loss += criterion(log_probs.permute(1, 0, 2), labels, input_lengths, label_lengths)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
            optimizer.step()
            
            total_loss += loss.item()
        
        print(f'Epoch {epoch+1}, Loss: {total_loss/len(dataloader):.4f}')

def collate_fn(batch):
    """动态填充标签"""
    images = [item[0] for item in batch]
    labels = [item[1] for item in batch]
    label_lengths = [item[2] for item in batch]
    
    # 填充图像到相同宽度
    images = torch.nn.utils.rnn.pad_sequence(images, batch_first=True)
    
    # 填充标签
    labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=0)
    
    return images, labels, torch.tensor(label_lengths)

if __name__ == '__main__':
    train_abinet()