
if True:
    import sys
    import os
    #os.environ['CUDA_VISIBLE_DEVICES'] = "1"#attention the text order

    dir_path = os.path.dirname(os.path.realpath(__file__)) # get now path 
    parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir)) # get father path

    sys.path.append(parent_dir_path) # add father path


import argparse

import yaml

from models.commom_fusion import *





class Model(nn.Module):
    def __init__(self, model_cfg='yolov5s.yaml',  nc=None):  # model, input channels, number of classes
        super(Model, self).__init__()
        if type(model_cfg) is dict:
            self.md = model_cfg  # model dict
        else:  # is *.yaml
            with open(model_cfg) as f:
                self.md = yaml.load(f, Loader=yaml.FullLoader)  # model dict

        # Define model
        if nc:
            self.md['nc'] = nc  # override yaml value
        self.model = wiky_parse_model(self.md)  # model, savelist, ch_out
        # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])

        ch  = self.md['image_ch']

        # Build strides, anchors
        m = self.model[-1]  # Detect()
        m.stride = torch.tensor([64 / x.shape[-2] for x in self.forward(torch.zeros(1, ch, 64, 64),torch.zeros(1, ch, 64, 64))])  # forward
        m.anchors /= m.stride.view(-1, 1, 1)
        self.stride = m.stride

        # Init weights, biases
        torch_utils.initialize_weights(self)
        # self._initialize_biases()  # only run once
        
        # torch_utils.model_info(self,False)
        print('')

    def forward(self, x, y,augment=False, profile=False):
        if augment:
            # img_size = x.shape[-2:]  # height, width
            # s = [0.83, 0.67]  # scales
            # y = []
            # for i, xi in enumerate((x,
            #                         torch_utils.scale_img(x.flip(3), s[0]),  # flip-lr and scale
            #                         torch_utils.scale_img(x, s[1]),  # scale
            #                         )):
            #     # cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])
            #     y.append(self.forward_once(xi)[0])

            # y[1][..., :4] /= s[0]  # scale
            # y[1][..., 0] = img_size[1] - y[1][..., 0]  # flip lr
            # y[2][..., :4] /= s[1]  # scale
            # return torch.cat(y, 1), None  # augmented inference, train
            pass
        else:
            return self.forward_once(x,y, profile)  # single-scale inference, train

    def forward_once(self, x,y, profile=False):
       
        count = 0
        for m in self.model:

            if count == 0:
                x = m(x,y)  # run
            else:
                x = m(x)  # run
            count+=1

        if profile:
            #print('%.1fms total' % sum(dt))
            pass
        return x



    def _print_biases(self):
        m = self.model[-1]  # Detect() module
        for f in sorted([x % m.i for x in m.f]):  #  from
            b = self.model[f].bias.detach().view(m.na, -1).T  # conv.bias(255) to (3,85)
            print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean()))

    # def _print_weights(self):
    #     for m in self.model.modules():
    #         if type(m) is Bottleneck:
    #             print('%10.3g' % (m.w.detach().sigmoid() * 2))  # shortcut weights

    def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
        print('Fusing layers...')
        for m in self.model.modules():
            if type(m) is Conv:
                m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn)  # update conv
                m.bn = None  # remove batchnorm
                m.forward = m.fuseforward  # update forward
        torch_utils.model_info(self)


def collector(layers_lits,nn_module):

    #判断是否为模块

    if isinstance(nn_module, nn.Sequential):
        name = type(nn_module).__name__
    else:
        name = str(nn_module)
        # 清理名称字符串
        if name.startswith('__main__.'):
            name = name.replace('__main__.', '')
        if name.endswith(')'):
            name = name[:4]

    para = sum([x.numel() for x in nn_module.parameters()])  # number params

    print('%3s %-40s %10.0f' % (len(layers_lits), name, para))  #

    layers_lits.append(nn_module) # can not output directly! or it will be none

    return layers_lits

def wiky_parse_model(md):
    print('\n%3s %-40s %10s' % ('', 'module', 'params'))

    anchors, nc, gd, gw = md['anchors'], md['nc'], md['depth_multiple'], md['width_multiple']
    input_img_ch = md['image_ch']
    na = (len(anchors[0]) // 2)  # number of anchors
    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)

    
    layers= []

    layers = collector(layers,yolov5_fusion_backbone(input_img_ch,input_img_ch,gd,gw))


    layers = collector(layers, yolov5_head_without_detect(na_nc_5 = no,depth_multiple=gd,width_multiple=gw))

    layers = collector(layers, Detect(nc, anchors))

    return nn.Sequential(*layers) # do not use layers!!!



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--cfg', type=str, default='yolov5m.yaml', help='model.yaml')
    parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    opt = parser.parse_args()
    opt.cfg = glob.glob('./**/' + opt.cfg, recursive=True)[0]  # find file
    device = torch_utils.select_device(opt.device)

   

    # Create model
    model = Model(opt.cfg)
    
    model = model.to(device)
    


    input_X = torch.zeros((1, 3, 768, 1280),device=device)
                         

    from thop import profile
    macs, para = profile(model, inputs=(input_X,input_X), verbose=False)
    
    fs = ' %.1f GFLOPS' % (macs / 1E9 * 2)
    print (fs) # 38.9 GFLOPS
    print (' %.1f M' % (para / 1E6 ))



    # 211.9 GFLOPS
    # 27.1 M
    


