# -------------------- 3. 训练与评估 --------------------
class ChineseOCRData(torch.utils.data.Dataset):
    """中文OCR数据集（示例）"""
    def __init__(self, num_samples=10000, img_size=(32, 128)):
        self.img_size = img_size
        self.transform = transforms.Compose([
            transforms.Resize(img_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        # 生成合成数据（实际应替换为真实数据）
        self.samples = [(torch.rand(3, *img_size), "测试文本"] * num_samples

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        img, text = self.samples[idx]
        return self.transform(img), text

def train_parseq():
    # 参数配置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    num_classes = 5000  # 中文字符集大小
    batch_size = 64
    lr = 3e-4
    epochs = 100
    
    # 数据加载
    dataset = ChineseOCRData()
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    
    # 模型初始化
    model = PARSeq(num_classes=num_classes).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss(ignore_index=0)
    
    # 训练循环
    model.train()
    for epoch in range(epochs):
        total_loss = 0.0
        for images, labels in dataloader:
            images = images.to(device)
            targets = labels_to_tensor(labels).to(device)
            
            # 前向传播
            p_logits, a_logits = model(images)
            
            # 并行解码损失
            p_loss = criterion(p_logits.view(-1, num_classes), targets.view(-1))
            # 自回归解码损失
            a_loss = criterion(a_logits.view(-1, num_classes), targets.view(-1))
            loss = p_loss + 0.5 * a_loss  # 加权融合
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
            optimizer.step()
            
            total_loss += loss.item()
        
        print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(dataloader):.4f}")

def collate_fn(batch):
    """动态填充"""
    images = [item[0] for item in batch]
    texts = [item[1] for item in batch]
    images = torch.stack(images)
    return images, texts

def labels_to_tensor(texts, max_len=25):
    """将文本标签转为张量（示例需实现字符到索引的映射）"""
    batch_size = len(texts)
    tensor = torch.zeros((batch_size, max_len), dtype=torch.long)
    for i, text in enumerate(texts):
        for j, char in enumerate(text[:max_len]):
            tensor[i,j] = char_to_index(char)
    return tensor

if __name__ == "__main__":
    train_parseq()