# 数据集构建

import os
import torch
from src.configs import base_config
from src.data_preprocessing.path_tokenizer import PathTokenizer
from src.data_preprocessing.vector_to_tensor import svg_to_tensor
from src.data_preprocessing.rasterizer import generate_style_images

def build_dataset(svg_metadata):
    tensor_dir = base_config['data']['processed']['tensor_dir']
    style_dir = os.path.join(base_config['data']['processed']['root'], 'styles')
    
    # 创建输出目录（保持字体子目录结构）
    os.makedirs(tensor_dir, exist_ok=True)
    os.makedirs(style_dir, exist_ok=True)

    # 生成全局风格图像
    style_mapping = generate_style_images(svg_metadata, style_dir)

    for _, record in svg_metadata.iterrows():
        # 构建包含字体子目录的完整路径
        svg_path = os.path.join(
            base_config['data']['processed']['svg_dir'],
            record['font'],  # 字体子目录
            record['svg_path']  # 字符SVG文件名
        )
        
        # 生成结构数据
        image_tensor = svg_to_tensor(svg_path)
        if image_tensor is None:
            continue
            
        # 生成路径token序列    
        with open(svg_path) as f:
            path_tokens = tokenizer.tokenize(f.read())
        
        # 加载对应风格图像
        style_tensor = torch.load(
            os.path.join(style_dir, f"{record['font']}_style.pt")
        )

        # 创建字体专属输出目录
        font_output_dir = os.path.join(tensor_dir, record['font'])
        os.makedirs(font_output_dir, exist_ok=True)

        # 保存三元组数据
        torch.save(
            (image_tensor, path_tokens, style_tensor),
            os.path.join(font_output_dir, f"{record['char']}.pt")
        )

    return {
        'num_samples': sum(len(files) for _, _, files in os.walk(tensor_dir)),
        'style_mapping': style_mapping
    }