from tokenize import group
import torch.nn as nn
import torch
import torchaudio
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import argparse


class Vadnet(object):
    """class for vad frame detection

    Args:
        object (_type_): _description_
    """
    def __init__(self, arg):
        super(Vadnet, self).__init__()
        self.nnet = nn.ModuleList(
            nn.Conv1d(),
            nn.MaxPool1d(),
            nn.Conv1d(),
            nn.MaxPool1d(),
            nn.Conv1d(),
            nn.MaxPool1d()
        )

    def forward(self, input):
        """[vadnet forward]

        Args:
            input (tensor): [B, T, F]

        Returns:
            tensor:[B, T, L] 
        """
        input = self.nnet(input)
        return input


    def get_state_dict(self, state_dict):
        """_summary_

        Args:
            one (_type_): _description_
            two (_type_): _description_
        """
        # comment: 
        return state_dict
    # end def

def run(arg):
    # comment: 
    """_summary_

    Args:
        arg (_type_): _description_
    """
# end def

def main(args):
    # comment: 
    """_summary_

    Args:
        one (_type_): _description_
        two (_type_): _description_
    """
    run(args)
# end def


if __name__ == "__main__":
    group = argparse.ArgumentParser(description="")
    group.add_argument('--input_dim', type=int, default=80,
                           help='number of intput dim, such fbank')
    group.add_argument('--output_dim', type=int, default=2,
                        help='number of vad class')
    group.add_argument('--layer_num', type=int, default=3,
                        help='number of cnn group layers')
    
    main(group.parse_args())
    


    