import torch
from denoiser.pretrained import dns64

def convert_model(input_path, output_path=None):
    """转换模型格式
    
    Args:
        input_path: 输入.th文件路径
        output_path: 输出.pt文件路径（可选）
    """
    try:
        # 加载.th模型
        print(f"Loading model from {input_path}")
        state_dict = torch.load(input_path, map_location='cpu')
        
        # 创建模型实例
        model = dns64()
        
        # 加载状态字典
        model.load_state_dict(state_dict)
        model.eval()
        
        # 设置输出路径
        if output_path is None:
            output_path = input_path.rsplit('.', 1)[0] + '_traced.pt'
        
        # 创建示例输入
        example_input = torch.randn(1, 1, 16000)
        
        # 转换为TorchScript格式
        traced_model = torch.jit.trace(model, example_input)
        
        # 保存模型
        traced_model.save(output_path)
        print(f"Model converted and saved to {output_path}")
        
    except Exception as e:
        print(f"Error converting model: {str(e)}")
        raise

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="Convert .th model to TorchScript format")
    parser.add_argument("--input", type=str, required=True, help="Input .th model path")
    parser.add_argument("--output", type=str, help="Output .pt model path (optional)")
    args = parser.parse_args()
    
    convert_model(args.input, args.output) 