import torchaudio 
import torch
from datetime import datetime
from tf_gridnet import TF_GridNet
### parameters
device = 4
time = 4
file_path = "/NASdata/zb/data/wsj0-mix/2speakers/wav8k/max/tt/mix/421o0304_1.6849_446c0204_-1.6849.wav"
####
audio, _ = torchaudio.load(file_path)
print(audio.shape)
audio = audio[:,:8000 * time]
print(audio.shape)
model = TF_GridNet(B=5,device = device)
ckpt = torch.load("tf_gridnet.pth",map_location=torch.device('cpu'))
model.load_state_dict(ckpt['model_state_dict'])
model.to(device)
model.eval()

## inference
with torch.no_grad():
    audio = audio.to(device)
    output = model(audio)

## save the output 
current_time = datetime.now()
formatted_time = current_time.strftime("%Y_%m_%d_%H_%M_%S")
source1 = output[0][0].unsqueeze(0).cpu() # [1, 32000]
source2 = output[0][1].unsqueeze(0).cpu() # [1, 32000]
torchaudio.save(f"output/source1_{time}s_{formatted_time}.wav",source1,  8000)
torchaudio.save(f"output/source2_{time}s_{formatted_time}.wav",source2,  8000)
print("done...")

