import torch
from ave2flame.networks import AudioEncoder
from torch.utils.data import DataLoader
from ezdlops.ezdlops.core.dataset import ZipDataset, CombinedDataset
from ave2flame.dataset import AudDataset, BFMDataset
from ave2flame.trainer_3dmm import AveFace2BFM_Diff as Trainer
from pathlib import Path
import glob
import tqdm

if __name__ == '__main__':
    aud_folder = Path(r"3dmm").expanduser().resolve()
    add_noise = True
    
    dsets = []
    for aud_path in aud_folder.glob('*.wav'):
      npz_path = aud_path.with_suffix('.npz')
      if not npz_path.exists() or not aud_path.exists():
        continue
      dset = ZipDataset(AudDataset(aud_path), BFMDataset(npz_path))
      dsets.append(dset)
    dataset = CombinedDataset(*dsets)
        
    Trainer._alias = f"{Trainer._alias}{'_add_noise' if add_noise else ''}"
    trainer = Trainer('checkpoints/audio_visual_encoder.pth', torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
    trainer.load_auto()
    data_loader = DataLoader(dataset, batch_size=256, shuffle=True)
    
    groups = [
        {'params': trainer.decoder.parameters(), 'lr': 1e-3},
        {'params': trainer.decoder_diff.parameters(), 'lr': 1e-3},
    ]
    
    optimizer = torch.optim.Adam(groups)
    trainer.fit(data_loader, optimizer, add_noise=add_noise)