import torch
from ave2flame.networks import AudioEncoder
from torch.utils.data import DataLoader
from ezdlops.core.dataset import ZipDataset, CombinedDataset
from ave2flame.dataset import AudDataset, FlameDataset
from ave2flame.trainer_axis import AveFace2Flame_Diff as Trainer
from pathlib import Path
import glob
import tqdm

if __name__ == '__main__':
    aud_path = Path(r"~/data/fps25/*.wav").expanduser()
    npz_path = Path(r"~/data/trace/*/*/tracked_flame_params_30.npz").expanduser()
    
    aud_paths = sorted(glob.glob(str(aud_path)))
    npz_paths = sorted(glob.glob(str(npz_path)))
    
    dsets = []
    for aud_path, npz_path in zip(aud_paths, npz_paths):
        dset = ZipDataset(AudDataset(aud_path), FlameDataset(npz_path))
        dsets.append(dset)
    dataset = CombinedDataset(*dsets)
        
    trainer = Trainer('checkpoints/audio_visual_encoder.pth', torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
    trainer.load_auto()
    data_loader = DataLoader(dataset, batch_size=256, shuffle=True)
    
    groups = [
        {'params': trainer.decoder.parameters(), 'lr': 1e-3},
        {'params': trainer.decoder_diff.parameters(), 'lr': 1e-3},
    ]
    
    optimizer = torch.optim.Adam(groups)
    trainer.fit(data_loader, optimizer)