|
DLC info for: /data0/ai-transform-data/data/3622e363-c39a-4fd2-998c-1a54b793f782/yolov5s_save_path/cutoff_yolov5s_int16_snpe2.dlc
|
|
Model Version: N/A
|
|
Model Copyright:N/A
|
|
Id,Name,Type,Inputs,Outputs,Out Dims,Runtimes,Parameters
|
|
0,/model.0/conv/Conv,Conv2d,"images (data type: uFxp_16; tensor dimension: [1, 640, 640, 3]; tensor type: APP_WRITE) [NW Input]","/model.0/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)",1x320x320x32,A D G C,"images encoding : bitwidth 16, min 0.000000000000, max 1.000000000000, scale 0.000015259022, offset 0.000000000000"
|
|
,,,"model.0.conv.weight (data type: uFxp_8; tensor dimension: [6, 6, 3, 32]; tensor type: STATIC)",,,,"model.0.conv.weight encoding : bitwidth 8, min -11.740019798279, max 14.292198181152, scale 0.102087132633, offset -115.000000000000"
|
|
,,,model.0.conv.bias (data type: sFxp_32
|
|
,,,,,,,"/model.0/conv/Conv_output_0 encoding : bitwidth 16, min -47.576812744141, max 50.069561004639, scale 0.001489988179, offset -31931.000000000000"
|
|
,,,,,,,bias_op_name: model.0.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[2, 2], [2, 2]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 3k (0.0483%)
|
|
,,,,,,,MACs per inference: 353M (4.31%)
|
|
1,/model.0/act/Sigmoid,Neuron,"/model.0/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)","/model.0/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)",1x320x320x32,A D G C,"/model.0/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
2,/model.0/act/Mul,Eltwise_Binary,"/model.0/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)","/model.0/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)",1x320x320x32,A D G C,"/model.0/act/Mul_output_0 encoding : bitwidth 16, min -0.278106689453, max 50.069190979004, scale 0.000768250495, offset -362.000000000000"
|
|
,,,"/model.0/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
3,/model.1/conv/Conv,Conv2d,"/model.0/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 320, 320, 32]; tensor type: NATIVE)","/model.1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"model.1.conv.weight encoding : bitwidth 8, min -0.967206001282, max 0.794490695000, scale 0.006908614654, offset -140.000000000000"
|
|
,,,"model.1.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 32, 64]; tensor type: STATIC)",,,,"model.1.conv.bias encoding : bitwidth 32, min -4.244979858398, max 4.244979858398, scale 0.000000001977, offset 0.000000000000"
|
|
,,,model.1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 18k (0.256%)
|
|
,,,,,,,MACs per inference: 471M (5.74%)
|
|
4,/model.1/act/Sigmoid,Neuron,"/model.1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"/model.1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
5,/model.1/act/Mul,Eltwise_Binary,"/model.1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"/model.1/act/Mul_output_0 encoding : bitwidth 16, min -0.278422504663, max 81.544090270996, scale 0.001248531509, offset -223.000000000000"
|
|
,,,"/model.1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
6,/model.2/cv1/conv/Conv,Conv2d,"/model.1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"model.2.cv1.conv.weight encoding : bitwidth 8, min -0.663011074066, max 0.260856807232, scale 0.003623011289, offset -183.000000000000"
|
|
,,,"model.2.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 32]; tensor type: STATIC)",,,,"model.2.cv1.conv.bias encoding : bitwidth 32, min -2.148406505585, max 2.148406505585, scale 0.000000001000, offset 0.000000000000"
|
|
,,,model.2.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.2.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 2k (0.0288%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
7,/model.2/cv1/act/Sigmoid,Neuron,"/model.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
8,/model.2/cv1/act/Mul,Eltwise_Binary,"/model.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278536349535, max 22.229328155518, scale 0.000343448017, offset -811.000000000000"
|
|
,,,"/model.2/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
9,/model.2/m/m.0/cv1/conv/Conv,Conv2d,"/model.2/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"model.2.m.0.cv1.conv.weight encoding : bitwidth 8, min -3.478188514709, max 1.619156718254, scale 0.019989589229, offset -174.000000000000"
|
|
,,,"model.2.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 32, 32]; tensor type: STATIC)",,,,"model.2.m.0.cv1.conv.bias encoding : bitwidth 32, min -5.234250545502, max 5.234250545502, scale 0.000000002437, offset 0.000000000000"
|
|
,,,model.2.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.2.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 1k (0.0146%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
10,/model.2/m/m.0/cv1/act/Sigmoid,Neuron,"/model.2/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
11,/model.2/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.2/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278397649527, max 15.105068206787, scale 0.000234736639, offset -1186.000000000000"
|
|
,,,"/model.2/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
12,/model.2/m/m.0/cv2/conv/Conv,Conv2d,"/model.2/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"model.2.m.0.cv2.conv.weight encoding : bitwidth 8, min -2.201115131378, max 2.515560388565, scale 0.018496766686, offset -119.000000000000"
|
|
,,,"model.2.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 32, 32]; tensor type: STATIC)",,,,"model.2.m.0.cv2.conv.bias encoding : bitwidth 32, min -6.463135242462, max 6.463135242462, scale 0.000000003010, offset 0.000000000000"
|
|
,,,model.2.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.2.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 9k (0.128%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
13,/model.2/m/m.0/cv2/act/Sigmoid,Neuron,"/model.2/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
14,/model.2/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.2/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278553187847, max 27.255359649658, scale 0.000420140568, offset -663.000000000000"
|
|
,,,"/model.2/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
15,/model.2/m/m.0/Add,Eltwise_Binary,"/model.2/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/m/m.0/Add_output_0 encoding : bitwidth 16, min -0.556870102882, max 27.111425399780, scale 0.000422191137, offset -1319.000000000000"
|
|
,,,"/model.2/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
16,/model.2/cv2/conv/Conv,Conv2d,"/model.1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"model.2.cv2.conv.weight encoding : bitwidth 8, min -1.497491240501, max 0.635806322098, scale 0.008365873247, offset -179.000000000000"
|
|
,,,"model.2.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 32]; tensor type: STATIC)",,,,"model.2.cv2.conv.bias encoding : bitwidth 32, min -3.142369747162, max 3.142369747162, scale 0.000000001463, offset 0.000000000000"
|
|
,,,model.2.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.2.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 2k (0.0288%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
17,/model.2/cv2/act/Sigmoid,Neuron,"/model.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
18,/model.2/cv2/act/Mul,Eltwise_Binary,"/model.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",1x160x160x32,A D G C,"/model.2/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278407722712, max 42.152873992920, scale 0.000647459878, offset -430.000000000000"
|
|
,,,"/model.2/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
19,/model.2/Concat,Concat,"/model.2/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)","/model.2/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"/model.2/Concat_output_0 encoding : bitwidth 16, min -0.556870102882, max 42.152873992920, scale 0.000651708920, offset -854.000000000000"
|
|
,,,"/model.2/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 32]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
20,/model.2/cv3/conv/Conv,Conv2d,"/model.2/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.2/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"model.2.cv3.conv.weight encoding : bitwidth 8, min -0.952466607094, max 0.699771344662, scale 0.006479364354, offset -147.000000000000"
|
|
,,,"model.2.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 64]; tensor type: STATIC)",,,,"model.2.cv3.conv.bias encoding : bitwidth 32, min -6.437954902649, max 6.437954902649, scale 0.000000002998, offset 0.000000000000"
|
|
,,,model.2.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.2.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 4k (0.0576%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
21,/model.2/cv3/act/Sigmoid,Neuron,"/model.2/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.2/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"/model.2/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
22,/model.2/cv3/act/Mul,Eltwise_Binary,"/model.2/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.2/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",1x160x160x64,A D G C,"/model.2/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278559714556, max 17.124118804932, scale 0.000265547860, offset -1049.000000000000"
|
|
,,,"/model.2/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
23,/model.3/conv/Conv,Conv2d,"/model.2/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 160, 160, 64]; tensor type: NATIVE)","/model.3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"model.3.conv.weight encoding : bitwidth 8, min -0.493704855442, max 0.582318544388, scale 0.004219699651, offset -117.000000000000"
|
|
,,,"model.3.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 64, 128]; tensor type: STATIC)",,,,"model.3.conv.bias encoding : bitwidth 32, min -3.177195310593, max 3.177195310593, scale 0.000000001479, offset 0.000000000000"
|
|
,,,model.3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 73k (1.02%)
|
|
,,,,,,,MACs per inference: 471M (5.74%)
|
|
24,/model.3/act/Sigmoid,Neuron,"/model.3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
25,/model.3/act/Mul,Eltwise_Binary,"/model.3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.3/act/Mul_output_0 encoding : bitwidth 16, min -0.278479456902, max 9.023329734802, scale 0.000141936514, offset -1962.000000000000"
|
|
,,,"/model.3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
26,/model.4/cv1/conv/Conv,Conv2d,"/model.3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.4/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.cv1.conv.weight encoding : bitwidth 8, min -0.750764250755, max 0.355853587389, scale 0.004339677747, offset -173.000000000000"
|
|
,,,"model.4.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 64]; tensor type: STATIC)",,,,"model.4.cv1.conv.bias encoding : bitwidth 32, min -1.200073599815, max 1.200073599815, scale 0.000000000559, offset 0.000000000000"
|
|
,,,model.4.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 8k (0.114%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
27,/model.4/cv1/act/Sigmoid,Neuron,"/model.4/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
28,/model.4/cv1/act/Mul,Eltwise_Binary,"/model.4/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278457492590, max 2.755914449692, scale 0.000046301546, offset -6014.000000000000"
|
|
,,,"/model.4/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
29,/model.4/m/m.0/cv1/conv/Conv,Conv2d,"/model.4/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.m.0.cv1.conv.weight encoding : bitwidth 8, min -2.917552232742, max 3.784932374954, scale 0.026284253225, offset -111.000000000000"
|
|
,,,"model.4.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 64]; tensor type: STATIC)",,,,"model.4.m.0.cv1.conv.bias encoding : bitwidth 32, min -3.825795173645, max 3.825795173645, scale 0.000000001782, offset 0.000000000000"
|
|
,,,model.4.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 4k (0.0576%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
30,/model.4/m/m.0/cv1/act/Sigmoid,Neuron,"/model.4/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
31,/model.4/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.4/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278535932302, max 10.484348297119, scale 0.000164231082, offset -1696.000000000000"
|
|
,,,"/model.4/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
32,/model.4/m/m.0/cv2/conv/Conv,Conv2d,"/model.4/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.m.0.cv2.conv.weight encoding : bitwidth 8, min -0.385654598475, max 0.469492524862, scale 0.003353518201, offset -115.000000000000"
|
|
,,,"model.4.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 64, 64]; tensor type: STATIC)",,,,"model.4.m.0.cv2.conv.bias encoding : bitwidth 32, min -2.385303020477, max 2.385303020477, scale 0.000000001111, offset 0.000000000000"
|
|
,,,model.4.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 36k (0.511%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
33,/model.4/m/m.0/cv2/act/Sigmoid,Neuron,"/model.4/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
34,/model.4/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.4/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278453469276, max 5.180250644684, scale 0.000083294486, offset -3343.000000000000"
|
|
,,,"/model.4/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
35,/model.4/m/m.0/Add,Eltwise_Binary,"/model.4/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.0/Add_output_0 encoding : bitwidth 16, min -0.556935131550, max 5.005201816559, scale 0.000084872772, offset -6562.000000000000"
|
|
,,,"/model.4/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
36,/model.4/m/m.1/cv1/conv/Conv,Conv2d,"/model.4/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.m.1.cv1.conv.weight encoding : bitwidth 8, min -2.033593893051, max 1.423515796661, scale 0.013557292521, offset -150.000000000000"
|
|
,,,"model.4.m.1.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 64]; tensor type: STATIC)",,,,"model.4.m.1.cv1.conv.bias encoding : bitwidth 32, min -2.686864852905, max 2.686864852905, scale 0.000000001251, offset 0.000000000000"
|
|
,,,model.4.m.1.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.m.1.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 4k (0.0576%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
37,/model.4/m/m.1/cv1/act/Sigmoid,Neuron,"/model.4/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.1/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
38,/model.4/m/m.1/cv1/act/Mul,Eltwise_Binary,"/model.4/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.1/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278420478106, max 7.763133525848, scale 0.000122706246, offset -2269.000000000000"
|
|
,,,"/model.4/m/m.1/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
39,/model.4/m/m.1/cv2/conv/Conv,Conv2d,"/model.4/m/m.1/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.m.1.cv2.conv.weight encoding : bitwidth 8, min -0.926929771900, max 0.798377454281, scale 0.006765910890, offset -137.000000000000"
|
|
,,,"model.4.m.1.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 64, 64]; tensor type: STATIC)",,,,"model.4.m.1.cv2.conv.bias encoding : bitwidth 32, min -2.182634353638, max 2.182634353638, scale 0.000000001016, offset 0.000000000000"
|
|
,,,model.4.m.1.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.m.1.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 36k (0.511%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
40,/model.4/m/m.1/cv2/act/Sigmoid,Neuron,"/model.4/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.1/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
41,/model.4/m/m.1/cv2/act/Mul,Eltwise_Binary,"/model.4/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.1/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278423190117, max 10.840688705444, scale 0.000169666775, offset -1641.000000000000"
|
|
,,,"/model.4/m/m.1/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
42,/model.4/m/m.1/Add,Eltwise_Binary,"/model.4/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/m/m.1/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/m/m.1/Add_output_0 encoding : bitwidth 16, min -0.835357725620, max 10.651248931885, scale 0.000175274385, offset -4766.000000000000"
|
|
,,,"/model.4/m/m.1/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
43,/model.4/cv2/conv/Conv,Conv2d,"/model.3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.4/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.4.cv2.conv.weight encoding : bitwidth 8, min -1.484934687614, max 1.473333716393, scale 0.011601052247, offset -128.000000000000"
|
|
,,,"model.4.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 64]; tensor type: STATIC)",,,,"model.4.cv2.conv.bias encoding : bitwidth 32, min -4.325590133667, max 4.325590133667, scale 0.000000002014, offset 0.000000000000"
|
|
,,,model.4.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 8k (0.114%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
44,/model.4/cv2/act/Sigmoid,Neuron,"/model.4/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
45,/model.4/cv2/act/Mul,Eltwise_Binary,"/model.4/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.4/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278452366590, max 9.098924636841, scale 0.000143089594, offset -1946.000000000000"
|
|
,,,"/model.4/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
46,/model.4/Concat,Concat,"/model.4/m/m.1/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.4/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.4/Concat_output_0 encoding : bitwidth 16, min -0.835357725620, max 10.651248931885, scale 0.000175274385, offset -4766.000000000000"
|
|
,,,"/model.4/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
47,/model.4/cv3/conv/Conv,Conv2d,"/model.4/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.4/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"model.4.cv3.conv.weight encoding : bitwidth 8, min -0.705637216568, max 0.678497374058, scale 0.005427978933, offset -130.000000000000"
|
|
,,,"model.4.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.4.cv3.conv.bias encoding : bitwidth 32, min -3.149493694305, max 3.149493694305, scale 0.000000001467, offset 0.000000000000"
|
|
,,,model.4.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.4.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
48,/model.4/cv3/act/Sigmoid,Neuron,"/model.4/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.4/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.4/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
49,/model.4/cv3/act/Mul,Eltwise_Binary,"/model.4/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.4/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.4/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278503715992, max 8.450210571289, scale 0.000133191643, offset -2091.000000000000"
|
|
,,,"/model.4/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
50,/model.5/conv/Conv,Conv2d,"/model.4/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.5/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"model.5.conv.weight encoding : bitwidth 8, min -0.783310711384, max 0.505361795425, scale 0.005053617526, offset -155.000000000000"
|
|
,,,"model.5.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 256]; tensor type: STATIC)",,,,"model.5.conv.bias encoding : bitwidth 32, min -3.501016855240, max 3.501016855240, scale 0.000000001630, offset 0.000000000000"
|
|
,,,model.5.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.5.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 295k (4.08%)
|
|
,,,,,,,MACs per inference: 471M (5.74%)
|
|
51,/model.5/act/Sigmoid,Neuron,"/model.5/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.5/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.5/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
52,/model.5/act/Mul,Eltwise_Binary,"/model.5/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.5/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.5/act/Mul_output_0 encoding : bitwidth 16, min -0.278484076262, max 9.544150352478, scale 0.000149883781, offset -1858.000000000000"
|
|
,,,"/model.5/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
53,/model.6/cv1/conv/Conv,Conv2d,"/model.5/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.6/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.cv1.conv.weight encoding : bitwidth 8, min -0.493675291538, max 0.705250442028, scale 0.004701669328, offset -105.000000000000"
|
|
,,,"model.6.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 128]; tensor type: STATIC)",,,,"model.6.cv1.conv.bias encoding : bitwidth 32, min -1.552146315575, max 1.552146315575, scale 0.000000000723, offset 0.000000000000"
|
|
,,,model.6.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
54,/model.6/cv1/act/Sigmoid,Neuron,"/model.6/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
55,/model.6/cv1/act/Mul,Eltwise_Binary,"/model.6/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278463363647, max 4.065525531769, scale 0.000066285022, offset -4201.000000000000"
|
|
,,,"/model.6/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
56,/model.6/m/m.0/cv1/conv/Conv,Conv2d,"/model.6/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.0.cv1.conv.weight encoding : bitwidth 8, min -2.751099348068, max 3.244886159897, scale 0.023513669148, offset -117.000000000000"
|
|
,,,"model.6.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.6.m.0.cv1.conv.bias encoding : bitwidth 32, min -4.260776996613, max 4.260776996613, scale 0.000000001984, offset 0.000000000000"
|
|
,,,model.6.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
57,/model.6/m/m.0/cv1/act/Sigmoid,Neuron,"/model.6/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
58,/model.6/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.6/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278427958488, max 14.952603340149, scale 0.000232410646, offset -1198.000000000000"
|
|
,,,"/model.6/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
59,/model.6/m/m.0/cv2/conv/Conv,Conv2d,"/model.6/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.0.cv2.conv.weight encoding : bitwidth 8, min -0.515836954117, max 0.240130990744, scale 0.002964580199, offset -174.000000000000"
|
|
,,,"model.6.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.6.m.0.cv2.conv.bias encoding : bitwidth 32, min -1.657929182053, max 1.657929182053, scale 0.000000000772, offset 0.000000000000"
|
|
,,,model.6.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
60,/model.6/m/m.0/cv2/act/Sigmoid,Neuron,"/model.6/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
61,/model.6/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.6/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278488844633, max 5.576745510101, scale 0.000089345151, offset -3117.000000000000"
|
|
,,,"/model.6/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
62,/model.6/m/m.0/Add,Eltwise_Binary,"/model.6/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.0/Add_output_0 encoding : bitwidth 16, min -0.556914687157, max 8.452565193176, scale 0.000137475858, offset -4051.000000000000"
|
|
,,,"/model.6/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
63,/model.6/m/m.1/cv1/conv/Conv,Conv2d,"/model.6/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.1.cv1.conv.weight encoding : bitwidth 8, min -2.055269718170, max 1.770232319832, scale 0.015001968481, offset -137.000000000000"
|
|
,,,"model.6.m.1.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.6.m.1.cv1.conv.bias encoding : bitwidth 32, min -3.756540298462, max 3.756540298462, scale 0.000000001749, offset 0.000000000000"
|
|
,,,model.6.m.1.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.1.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
64,/model.6/m/m.1/cv1/act/Sigmoid,Neuron,"/model.6/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.1/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
65,/model.6/m/m.1/cv1/act/Mul,Eltwise_Binary,"/model.6/m/m.1/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.1/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278501331806, max 14.931152343750, scale 0.000232084451, offset -1200.000000000000"
|
|
,,,"/model.6/m/m.1/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
66,/model.6/m/m.1/cv2/conv/Conv,Conv2d,"/model.6/m/m.1/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.1.cv2.conv.weight encoding : bitwidth 8, min -0.645956516266, max 0.539071679115, scale 0.004647169262, offset -139.000000000000"
|
|
,,,"model.6.m.1.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.6.m.1.cv2.conv.bias encoding : bitwidth 32, min -1.705662012100, max 1.705662012100, scale 0.000000000794, offset 0.000000000000"
|
|
,,,model.6.m.1.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.1.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
67,/model.6/m/m.1/cv2/act/Sigmoid,Neuron,"/model.6/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.1/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
68,/model.6/m/m.1/cv2/act/Mul,Eltwise_Binary,"/model.6/m/m.1/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.1/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278431177139, max 7.569735050201, scale 0.000119755343, offset -2325.000000000000"
|
|
,,,"/model.6/m/m.1/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
69,/model.6/m/m.1/Add,Eltwise_Binary,"/model.6/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.1/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.1/Add_output_0 encoding : bitwidth 16, min -0.835419178009, max 12.331775665283, scale 0.000200918512, offset -4158.000000000000"
|
|
,,,"/model.6/m/m.1/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
70,/model.6/m/m.2/cv1/conv/Conv,Conv2d,"/model.6/m/m.1/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.2.cv1.conv.weight encoding : bitwidth 8, min -1.093924880028, max 0.927458047867, scale 0.007926992141, offset -138.000000000000"
|
|
,,,"model.6.m.2.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.6.m.2.cv1.conv.bias encoding : bitwidth 32, min -2.577328681946, max 2.577328681946, scale 0.000000001200, offset 0.000000000000"
|
|
,,,model.6.m.2.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.2.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
71,/model.6/m/m.2/cv1/act/Sigmoid,Neuron,"/model.6/m/m.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.2/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
72,/model.6/m/m.2/cv1/act/Mul,Eltwise_Binary,"/model.6/m/m.2/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.2/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278410524130, max 9.621553421021, scale 0.000151063767, offset -1843.000000000000"
|
|
,,,"/model.6/m/m.2/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
73,/model.6/m/m.2/cv2/conv/Conv,Conv2d,"/model.6/m/m.2/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.m.2.cv2.conv.weight encoding : bitwidth 8, min -1.265535950661, max 1.072954297066, scale 0.009170549922, offset -138.000000000000"
|
|
,,,"model.6.m.2.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.6.m.2.cv2.conv.bias encoding : bitwidth 32, min -3.210509061813, max 3.210509061813, scale 0.000000001495, offset 0.000000000000"
|
|
,,,model.6.m.2.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.m.2.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
74,/model.6/m/m.2/cv2/act/Sigmoid,Neuron,"/model.6/m/m.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.2/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
75,/model.6/m/m.2/cv2/act/Mul,Eltwise_Binary,"/model.6/m/m.2/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.2/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278462082148, max 12.756546974182, scale 0.000198901485, offset -1400.000000000000"
|
|
,,,"/model.6/m/m.2/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
76,/model.6/m/m.2/Add,Eltwise_Binary,"/model.6/m/m.1/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/m/m.2/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/m/m.2/Add_output_0 encoding : bitwidth 16, min -1.113839268684, max 12.297047615051, scale 0.000204637021, offset -5443.000000000000"
|
|
,,,"/model.6/m/m.2/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
77,/model.6/cv2/conv/Conv,Conv2d,"/model.5/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.6/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.6.cv2.conv.weight encoding : bitwidth 8, min -1.415374517441, max 0.854565739632, scale 0.008901726454, offset -159.000000000000"
|
|
,,,"model.6.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 128]; tensor type: STATIC)",,,,"model.6.cv2.conv.bias encoding : bitwidth 32, min -1.798043727875, max 1.798043727875, scale 0.000000000837, offset 0.000000000000"
|
|
,,,model.6.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
78,/model.6/cv2/act/Sigmoid,Neuron,"/model.6/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
79,/model.6/cv2/act/Mul,Eltwise_Binary,"/model.6/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.6/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278406918049, max 11.615592956543, scale 0.000181490817, offset -1534.000000000000"
|
|
,,,"/model.6/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
80,/model.6/Concat,Concat,"/model.6/m/m.2/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.6/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.6/Concat_output_0 encoding : bitwidth 16, min -1.113839268684, max 12.297047615051, scale 0.000204637021, offset -5443.000000000000"
|
|
,,,"/model.6/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
81,/model.6/cv3/conv/Conv,Conv2d,"/model.6/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.6/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"model.6.cv3.conv.weight encoding : bitwidth 8, min -0.495371192694, max 0.652989268303, scale 0.004503374454, offset -110.000000000000"
|
|
,,,"model.6.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 256]; tensor type: STATIC)",,,,"model.6.cv3.conv.bias encoding : bitwidth 32, min -1.787060737610, max 1.787060737610, scale 0.000000000832, offset 0.000000000000"
|
|
,,,model.6.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.6.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.911%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
82,/model.6/cv3/act/Sigmoid,Neuron,"/model.6/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.6/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.6/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
83,/model.6/cv3/act/Mul,Eltwise_Binary,"/model.6/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.6/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.6/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278489530087, max 9.187699317932, scale 0.000144444784, offset -1928.000000000000"
|
|
,,,"/model.6/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
84,/model.7/conv/Conv,Conv2d,"/model.6/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.7/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"model.7.conv.weight encoding : bitwidth 8, min -0.260625928640, max 0.417533397675, scale 0.002659448422, offset -98.000000000000"
|
|
,,,"model.7.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 256, 512]; tensor type: STATIC)",,,,"model.7.conv.bias encoding : bitwidth 32, min -2.779919862747, max 2.779919862747, scale 0.000000001295, offset 0.000000000000"
|
|
,,,model.7.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.7.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 1M (16.3%)
|
|
,,,,,,,MACs per inference: 471M (5.74%)
|
|
85,/model.7/act/Sigmoid,Neuron,"/model.7/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.7/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.7/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
86,/model.7/act/Mul,Eltwise_Binary,"/model.7/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.7/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.7/act/Mul_output_0 encoding : bitwidth 16, min -0.278457075357, max 8.924106597900, scale 0.000140422126, offset -1983.000000000000"
|
|
,,,"/model.7/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
87,/model.8/cv1/conv/Conv,Conv2d,"/model.7/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.8/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.8.cv1.conv.weight encoding : bitwidth 8, min -0.473426491022, max 0.532604813576, scale 0.003945220727, offset -120.000000000000"
|
|
,,,"model.8.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.8.cv1.conv.bias encoding : bitwidth 32, min -2.305548667908, max 2.305548667908, scale 0.000000001074, offset 0.000000000000"
|
|
,,,model.8.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.8.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
88,/model.8/cv1/act/Sigmoid,Neuron,"/model.8/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
89,/model.8/cv1/act/Mul,Eltwise_Binary,"/model.8/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278436839581, max 8.264333724976, scale 0.000130354325, offset -2136.000000000000"
|
|
,,,"/model.8/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
90,/model.8/m/m.0/cv1/conv/Conv,Conv2d,"/model.8/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.8.m.0.cv1.conv.weight encoding : bitwidth 8, min -3.390120267868, max 2.490700721741, scale 0.023062042892, offset -147.000000000000"
|
|
,,,"model.8.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 256]; tensor type: STATIC)",,,,"model.8.m.0.cv1.conv.bias encoding : bitwidth 32, min -6.830086231232, max 6.830086231232, scale 0.000000003181, offset 0.000000000000"
|
|
,,,model.8.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.8.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.911%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
91,/model.8/m/m.0/cv1/act/Sigmoid,Neuron,"/model.8/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
92,/model.8/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.8/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278380572796, max 16.127798080444, scale 0.000250342244, offset -1112.000000000000"
|
|
,,,"/model.8/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
93,/model.8/m/m.0/cv2/conv/Conv,Conv2d,"/model.8/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.8.m.0.cv2.conv.weight encoding : bitwidth 8, min -0.438855737448, max 0.456409960985, scale 0.003510845825, offset -125.000000000000"
|
|
,,,"model.8.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 256, 256]; tensor type: STATIC)",,,,"model.8.m.0.cv2.conv.bias encoding : bitwidth 32, min -3.063917636871, max 3.063917636871, scale 0.000000001427, offset 0.000000000000"
|
|
,,,model.8.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.8.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 590k (8.17%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
94,/model.8/m/m.0/cv2/act/Sigmoid,Neuron,"/model.8/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
95,/model.8/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.8/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278495937586, max 15.468898773193, scale 0.000240289839, offset -1159.000000000000"
|
|
,,,"/model.8/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
96,/model.8/m/m.0/Add,Eltwise_Binary,"/model.8/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/m/m.0/Add_output_0 encoding : bitwidth 16, min -0.556832909584, max 15.281728744507, scale 0.000241680958, offset -2304.000000000000"
|
|
,,,"/model.8/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseAdd
|
|
,,,,,,,packageName: qti.aisw
|
|
97,/model.8/cv2/conv/Conv,Conv2d,"/model.7/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.8/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.8.cv2.conv.weight encoding : bitwidth 8, min -0.543772697449, max 0.592801392078, scale 0.004457153380, offset -122.000000000000"
|
|
,,,"model.8.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.8.cv2.conv.bias encoding : bitwidth 32, min -1.480291366577, max 1.480291366577, scale 0.000000000689, offset 0.000000000000"
|
|
,,,model.8.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.8.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
98,/model.8/cv2/act/Sigmoid,Neuron,"/model.8/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
99,/model.8/cv2/act/Mul,Eltwise_Binary,"/model.8/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.8/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278516262770, max 12.344280242920, scale 0.000192611522, offset -1446.000000000000"
|
|
,,,"/model.8/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
100,/model.8/Concat,Concat,"/model.8/m/m.0/Add_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.8/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.8/Concat_output_0 encoding : bitwidth 16, min -0.556832909584, max 15.281728744507, scale 0.000241680958, offset -2304.000000000000"
|
|
,,,"/model.8/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
101,/model.8/cv3/conv/Conv,Conv2d,"/model.8/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.8/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"model.8.cv3.conv.weight encoding : bitwidth 8, min -0.564996063709, max 0.677021145821, scale 0.004870655946, offset -116.000000000000"
|
|
,,,"model.8.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 512]; tensor type: STATIC)",,,,"model.8.cv3.conv.bias encoding : bitwidth 32, min -1.565519332886, max 1.565519332886, scale 0.000000000729, offset 0.000000000000"
|
|
,,,model.8.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.8.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 262k (3.63%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
102,/model.8/cv3/act/Sigmoid,Neuron,"/model.8/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.8/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.8/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
103,/model.8/cv3/act/Mul,Eltwise_Binary,"/model.8/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.8/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.8/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278487950563, max 11.346166610718, scale 0.000177380862, offset -1570.000000000000"
|
|
,,,"/model.8/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
104,/model.9/cv1/conv/Conv,Conv2d,"/model.8/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.9/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.9.cv1.conv.weight encoding : bitwidth 8, min -0.561515927315, max 0.548457443714, scale 0.004352836870, offset -129.000000000000"
|
|
,,,"model.9.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.9.cv1.conv.bias encoding : bitwidth 32, min -2.725049734116, max 2.725049734116, scale 0.000000001269, offset 0.000000000000"
|
|
,,,model.9.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.9.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
105,/model.9/cv1/act/Sigmoid,Neuron,"/model.9/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.9/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
106,/model.9/cv1/act/Mul,Eltwise_Binary,"/model.9/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.9/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278452873230, max 8.567115783691, scale 0.000134974733, offset -2063.000000000000"
|
|
,,,"/model.9/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
107,/model.9/m/MaxPool,Pool,"/model.9/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/m/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.9/m/MaxPool_output_0 encoding : bitwidth 16, min -0.278452873230, max 8.567115783691, scale 0.000134974733, offset -2063.000000000000"
|
|
,,,,,,,"filter_size: [5, 5]"
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[2, 2], [2, 2]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,pool_type: PoolMax2d
|
|
,,,,,,,"stride: [1, 1]"
|
|
108,/model.9/m_1/MaxPool,Pool,"/model.9/m/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/m_1/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.9/m_1/MaxPool_output_0 encoding : bitwidth 16, min -0.278452873230, max 8.567115783691, scale 0.000134974733, offset -2063.000000000000"
|
|
,,,,,,,"filter_size: [5, 5]"
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[2, 2], [2, 2]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,pool_type: PoolMax2d
|
|
,,,,,,,"stride: [1, 1]"
|
|
109,/model.9/m_2/MaxPool,Pool,"/model.9/m_1/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/m_2/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.9/m_2/MaxPool_output_0 encoding : bitwidth 16, min -0.278452873230, max 8.567115783691, scale 0.000134974733, offset -2063.000000000000"
|
|
,,,,,,,"filter_size: [5, 5]"
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[2, 2], [2, 2]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,pool_type: PoolMax2d
|
|
,,,,,,,"stride: [1, 1]"
|
|
110,/model.9/Concat,Concat,"/model.9/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.9/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 1024]; tensor type: NATIVE)",1x20x20x1024,A D G C,"/model.9/Concat_output_0 encoding : bitwidth 16, min -0.278452873230, max 8.567115783691, scale 0.000134974733, offset -2063.000000000000"
|
|
,,,"/model.9/m/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,"/model.9/m_1/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,packageName: qti.aisw
|
|
,,,"/model.9/m_2/MaxPool_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,
|
|
111,/model.9/cv2/conv/Conv,Conv2d,"/model.9/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 1024]; tensor type: NATIVE)","/model.9/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"model.9.cv2.conv.weight encoding : bitwidth 8, min -0.536085247993, max 0.575310945511, scale 0.004358416423, offset -123.000000000000"
|
|
,,,"model.9.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 1024, 512]; tensor type: STATIC)",,,,"model.9.cv2.conv.bias encoding : bitwidth 32, min -6.068809986115, max 6.068809986115, scale 0.000000002826, offset 0.000000000000"
|
|
,,,model.9.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.9.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 524k (7.26%)
|
|
,,,,,,,MACs per inference: 209M (2.55%)
|
|
112,/model.9/cv2/act/Sigmoid,Neuron,"/model.9/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.9/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.9/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
113,/model.9/cv2/act/Mul,Eltwise_Binary,"/model.9/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.9/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.9/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278507143259, max 10.308479309082, scale 0.000161547068, offset -1724.000000000000"
|
|
,,,"/model.9/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
114,/model.10/conv/Conv,Conv2d,"/model.9/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.10/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.10.conv.weight encoding : bitwidth 8, min -1.641803860664, max 1.391964077950, scale 0.011897129007, offset -138.000000000000"
|
|
,,,"model.10.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.10.conv.bias encoding : bitwidth 32, min -4.094081878662, max 4.094081878662, scale 0.000000001906, offset 0.000000000000"
|
|
,,,model.10.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.10.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
115,/model.10/act/Sigmoid,Neuron,"/model.10/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.10/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.10/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
116,/model.10/act/Mul,Eltwise_Binary,"/model.10/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.10/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.10/act/Mul_output_0 encoding : bitwidth 16, min -0.278458654881, max 9.623216629028, scale 0.000151089887, offset -1843.000000000000"
|
|
,,,"/model.10/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
117,/model.11/Resize,Resize,"/model.10/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.11/Resize_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.11/Resize_output_0 encoding : bitwidth 16, min -0.278458654881, max 9.623216629028, scale 0.000151089887, offset -1843.000000000000"
|
|
,,,,,,,align_corners: False
|
|
,,,,,,,half_pixel_centers: False
|
|
,,,,,,,interpolation_mode: 0
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,scale_height: 2
|
|
,,,,,,,scale_width: 2
|
|
,,,,,,,transformation_mode: 3
|
|
118,/model.12/Concat,Concat,"/model.11/Resize_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.12/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 512]; tensor type: NATIVE)",1x40x40x512,A D G C,"/model.12/Concat_output_0 encoding : bitwidth 16, min -0.278489530087, max 9.623216629028, scale 0.000151090353, offset -1843.000000000000"
|
|
,,,"/model.6/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
119,/model.13/cv1/conv/Conv,Conv2d,"/model.12/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 512]; tensor type: NATIVE)","/model.13/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.13.cv1.conv.weight encoding : bitwidth 8, min -0.919384360313, max 0.612922906876, scale 0.006009048317, offset -153.000000000000"
|
|
,,,"model.13.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 128]; tensor type: STATIC)",,,,"model.13.cv1.conv.bias encoding : bitwidth 32, min -2.564177513123, max 2.564177513123, scale 0.000000001194, offset 0.000000000000"
|
|
,,,model.13.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.13.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.909%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
120,/model.13/cv1/act/Sigmoid,Neuron,"/model.13/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
121,/model.13/cv1/act/Mul,Eltwise_Binary,"/model.13/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278474420309, max 6.391781330109, scale 0.000101781581, offset -2736.000000000000"
|
|
,,,"/model.13/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
122,/model.13/m/m.0/cv1/conv/Conv,Conv2d,"/model.13/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.13.m.0.cv1.conv.weight encoding : bitwidth 8, min -1.729861855507, max 1.230640053749, scale 0.011609811336, offset -149.000000000000"
|
|
,,,"model.13.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.13.m.0.cv1.conv.bias encoding : bitwidth 32, min -3.035499095917, max 3.035499095917, scale 0.000000001414, offset 0.000000000000"
|
|
,,,model.13.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.13.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
123,/model.13/m/m.0/cv1/act/Sigmoid,Neuron,"/model.13/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
124,/model.13/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.13/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278505504131, max 9.178416252136, scale 0.000144303383, offset -1930.000000000000"
|
|
,,,"/model.13/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
125,/model.13/m/m.0/cv2/conv/Conv,Conv2d,"/model.13/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.13.m.0.cv2.conv.weight encoding : bitwidth 8, min -0.930533289909, max 0.590530693531, scale 0.005964956712, offset -156.000000000000"
|
|
,,,"model.13.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.13.m.0.cv2.conv.bias encoding : bitwidth 32, min -2.699791431427, max 2.699791431427, scale 0.000000001257, offset 0.000000000000"
|
|
,,,model.13.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.13.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
126,/model.13/m/m.0/cv2/act/Sigmoid,Neuron,"/model.13/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
127,/model.13/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.13/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278457611799, max 8.627700805664, scale 0.000135899274, offset -2049.000000000000"
|
|
,,,"/model.13/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
128,/model.13/cv2/conv/Conv,Conv2d,"/model.12/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 512]; tensor type: NATIVE)","/model.13/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.13.cv2.conv.weight encoding : bitwidth 8, min -0.739760279655, max 0.446647703648, scale 0.004652580246, offset -159.000000000000"
|
|
,,,"model.13.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 128]; tensor type: STATIC)",,,,"model.13.cv2.conv.bias encoding : bitwidth 32, min -1.689719557762, max 1.689719557762, scale 0.000000000787, offset 0.000000000000"
|
|
,,,model.13.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.13.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.909%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
129,/model.13/cv2/act/Sigmoid,Neuron,"/model.13/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
130,/model.13/cv2/act/Mul,Eltwise_Binary,"/model.13/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.13/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278407067060, max 7.475697040558, scale 0.000118320044, offset -2353.000000000000"
|
|
,,,"/model.13/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
131,/model.13/Concat,Concat,"/model.13/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.13/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.13/Concat_output_0 encoding : bitwidth 16, min -0.278457611799, max 8.627700805664, scale 0.000135899274, offset -2049.000000000000"
|
|
,,,"/model.13/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
132,/model.13/cv3/conv/Conv,Conv2d,"/model.13/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.13/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"model.13.cv3.conv.weight encoding : bitwidth 8, min -1.190755844116, max 1.238386154175, scale 0.009526046924, offset -125.000000000000"
|
|
,,,"model.13.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 256]; tensor type: STATIC)",,,,"model.13.cv3.conv.bias encoding : bitwidth 32, min -2.110710144043, max 2.110710144043, scale 0.000000000983, offset 0.000000000000"
|
|
,,,model.13.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.13.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.911%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
133,/model.13/cv3/act/Sigmoid,Neuron,"/model.13/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.13/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.13/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
134,/model.13/cv3/act/Mul,Eltwise_Binary,"/model.13/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.13/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.13/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278416544199, max 10.949908256531, scale 0.000171333260, offset -1625.000000000000"
|
|
,,,"/model.13/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
135,/model.14/conv/Conv,Conv2d,"/model.13/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.14/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.14.conv.weight encoding : bitwidth 8, min -0.655613541603, max 0.640366733074, scale 0.005082275718, offset -129.000000000000"
|
|
,,,"model.14.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 128]; tensor type: STATIC)",,,,"model.14.conv.bias encoding : bitwidth 32, min -1.909503817558, max 1.909503817558, scale 0.000000000889, offset 0.000000000000"
|
|
,,,model.14.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.14.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
136,/model.14/act/Sigmoid,Neuron,"/model.14/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.14/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.14/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
137,/model.14/act/Mul,Eltwise_Binary,"/model.14/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.14/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.14/act/Mul_output_0 encoding : bitwidth 16, min -0.278471291065, max 8.119879722595, scale 0.000128150627, offset -2173.000000000000"
|
|
,,,"/model.14/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
138,/model.15/Resize,Resize,"/model.14/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.15/Resize_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.15/Resize_output_0 encoding : bitwidth 16, min -0.278471291065, max 8.119879722595, scale 0.000128150627, offset -2173.000000000000"
|
|
,,,,,,,align_corners: False
|
|
,,,,,,,half_pixel_centers: False
|
|
,,,,,,,interpolation_mode: 0
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,scale_height: 2
|
|
,,,,,,,scale_width: 2
|
|
,,,,,,,transformation_mode: 3
|
|
139,/model.16/Concat,Concat,"/model.15/Resize_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.16/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 256]; tensor type: NATIVE)",1x80x80x256,A D G C,"/model.16/Concat_output_0 encoding : bitwidth 16, min -0.278503715992, max 8.450210571289, scale 0.000133191643, offset -2091.000000000000"
|
|
,,,"/model.4/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
140,/model.17/cv1/conv/Conv,Conv2d,"/model.16/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 256]; tensor type: NATIVE)","/model.17/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.17.cv1.conv.weight encoding : bitwidth 8, min -0.694504261017, max 0.769120454788, scale 0.005739704706, offset -121.000000000000"
|
|
,,,"model.17.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 64]; tensor type: STATIC)",,,,"model.17.cv1.conv.bias encoding : bitwidth 32, min -1.675090909004, max 1.675090909004, scale 0.000000000780, offset 0.000000000000"
|
|
,,,model.17.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.17.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.228%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
141,/model.17/cv1/act/Sigmoid,Neuron,"/model.17/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
142,/model.17/cv1/act/Mul,Eltwise_Binary,"/model.17/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278487890959, max 3.462177038193, scale 0.000057078891, offset -4879.000000000000"
|
|
,,,"/model.17/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
143,/model.17/m/m.0/cv1/conv/Conv,Conv2d,"/model.17/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.17.m.0.cv1.conv.weight encoding : bitwidth 8, min -2.548283815384, max 1.487817883492, scale 0.015827849507, offset -161.000000000000"
|
|
,,,"model.17.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 64, 64]; tensor type: STATIC)",,,,"model.17.m.0.cv1.conv.bias encoding : bitwidth 32, min -3.132049083710, max 3.132049083710, scale 0.000000001458, offset 0.000000000000"
|
|
,,,model.17.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.17.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 4k (0.0576%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
144,/model.17/m/m.0/cv1/act/Sigmoid,Neuron,"/model.17/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
145,/model.17/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.17/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278457969427, max 6.095535278320, scale 0.000097260905, offset -2863.000000000000"
|
|
,,,"/model.17/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
146,/model.17/m/m.0/cv2/conv/Conv,Conv2d,"/model.17/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.17.m.0.cv2.conv.weight encoding : bitwidth 8, min -1.583908319473, max 1.301067590714, scale 0.011313631199, offset -140.000000000000"
|
|
,,,"model.17.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 64, 64]; tensor type: STATIC)",,,,"model.17.m.0.cv2.conv.bias encoding : bitwidth 32, min -3.532960891724, max 3.532960891724, scale 0.000000001645, offset 0.000000000000"
|
|
,,,model.17.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.17.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 36k (0.511%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
147,/model.17/m/m.0/cv2/act/Sigmoid,Neuron,"/model.17/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
148,/model.17/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.17/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278341144323, max 19.003992080688, scale 0.000294229540, offset -946.000000000000"
|
|
,,,"/model.17/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
149,/model.17/cv2/conv/Conv,Conv2d,"/model.16/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 256]; tensor type: NATIVE)","/model.17/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"model.17.cv2.conv.weight encoding : bitwidth 8, min -1.464563965797, max 1.386304736137, scale 0.011179877445, offset -131.000000000000"
|
|
,,,"model.17.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 64]; tensor type: STATIC)",,,,"model.17.cv2.conv.bias encoding : bitwidth 32, min -2.074291944504, max 2.074291944504, scale 0.000000000966, offset 0.000000000000"
|
|
,,,model.17.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.17.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.228%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
150,/model.17/cv2/act/Sigmoid,Neuron,"/model.17/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
151,/model.17/cv2/act/Mul,Eltwise_Binary,"/model.17/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",1x80x80x64,A D G C,"/model.17/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278421491385, max 7.766706943512, scale 0.000122760801, offset -2268.000000000000"
|
|
,,,"/model.17/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
152,/model.17/Concat,Concat,"/model.17/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)","/model.17/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.17/Concat_output_0 encoding : bitwidth 16, min -0.278421491385, max 19.003992080688, scale 0.000294230762, offset -946.000000000000"
|
|
,,,"/model.17/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 64]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
153,/model.17/cv3/conv/Conv,Conv2d,"/model.17/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.17/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"model.17.cv3.conv.weight encoding : bitwidth 8, min -3.034088134766, max 3.360064506531, scale 0.025075107813, offset -121.000000000000"
|
|
,,,"model.17.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.17.cv3.conv.bias encoding : bitwidth 32, min -23.540845870972, max 23.540845870972, scale 0.000000010962, offset 0.000000000000"
|
|
,,,model.17.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.17.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
154,/model.17/cv3/act/Sigmoid,Neuron,"/model.17/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.17/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.17/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
155,/model.17/cv3/act/Mul,Eltwise_Binary,"/model.17/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.17/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",1x80x80x128,A D G C,"/model.17/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278577655554, max 30.250833511353, scale 0.000465848949, offset -598.000000000000"
|
|
,,,"/model.17/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
156,/model.18/conv/Conv,Conv2d,"/model.17/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.18/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.18.conv.weight encoding : bitwidth 8, min -0.216575950384, max 0.239844426513, scale 0.001789883827, offset -121.000000000000"
|
|
,,,"model.18.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.18.conv.bias encoding : bitwidth 32, min -1.885105609894, max 1.885105609894, scale 0.000000000878, offset 0.000000000000"
|
|
,,,model.18.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.18.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
157,/model.18/act/Sigmoid,Neuron,"/model.18/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.18/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.18/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
158,/model.18/act/Mul,Eltwise_Binary,"/model.18/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.18/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.18/act/Mul_output_0 encoding : bitwidth 16, min -0.278441607952, max 9.417451858521, scale 0.000147949846, offset -1882.000000000000"
|
|
,,,"/model.18/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
159,/model.19/Concat,Concat,"/model.18/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.19/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.19/Concat_output_0 encoding : bitwidth 16, min -0.278471291065, max 9.417451858521, scale 0.000147950297, offset -1882.000000000000"
|
|
,,,"/model.14/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
160,/model.20/cv1/conv/Conv,Conv2d,"/model.19/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.20/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.20.cv1.conv.weight encoding : bitwidth 8, min -1.073780179024, max 0.984971284866, scale 0.008073534817, offset -133.000000000000"
|
|
,,,"model.20.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 128]; tensor type: STATIC)",,,,"model.20.cv1.conv.bias encoding : bitwidth 32, min -2.347510337830, max 2.347510337830, scale 0.000000001093, offset 0.000000000000"
|
|
,,,model.20.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.20.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
161,/model.20/cv1/act/Sigmoid,Neuron,"/model.20/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
162,/model.20/cv1/act/Mul,Eltwise_Binary,"/model.20/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278467267752, max 9.367067337036, scale 0.000147181432, offset -1892.000000000000"
|
|
,,,"/model.20/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
163,/model.20/m/m.0/cv1/conv/Conv,Conv2d,"/model.20/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.20.m.0.cv1.conv.weight encoding : bitwidth 8, min -1.074177265167, max 0.814893126488, scale 0.007408119272, offset -145.000000000000"
|
|
,,,"model.20.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 128]; tensor type: STATIC)",,,,"model.20.m.0.cv1.conv.bias encoding : bitwidth 32, min -1.833884835243, max 1.833884835243, scale 0.000000000854, offset 0.000000000000"
|
|
,,,model.20.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.20.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 16k (0.229%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
164,/model.20/m/m.0/cv1/act/Sigmoid,Neuron,"/model.20/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
165,/model.20/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.20/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278406590223, max 8.275449752808, scale 0.000130523476, offset -2133.000000000000"
|
|
,,,"/model.20/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
166,/model.20/m/m.0/cv2/conv/Conv,Conv2d,"/model.20/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.20.m.0.cv2.conv.weight encoding : bitwidth 8, min -1.163091063499, max 1.190783619881, scale 0.009230880998, offset -126.000000000000"
|
|
,,,"model.20.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 128, 128]; tensor type: STATIC)",,,,"model.20.m.0.cv2.conv.bias encoding : bitwidth 32, min -2.663993597031, max 2.663993597031, scale 0.000000001241, offset 0.000000000000"
|
|
,,,model.20.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.20.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 147k (2.04%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
167,/model.20/m/m.0/cv2/act/Sigmoid,Neuron,"/model.20/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
168,/model.20/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.20/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278537154198, max 10.961568832397, scale 0.000171513020, offset -1624.000000000000"
|
|
,,,"/model.20/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
169,/model.20/cv2/conv/Conv,Conv2d,"/model.19/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.20/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"model.20.cv2.conv.weight encoding : bitwidth 8, min -1.762340426445, max 0.640851080418, scale 0.009424280375, offset -187.000000000000"
|
|
,,,"model.20.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 128]; tensor type: STATIC)",,,,"model.20.cv2.conv.bias encoding : bitwidth 32, min -1.739673137665, max 1.739673137665, scale 0.000000000810, offset 0.000000000000"
|
|
,,,model.20.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.20.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
170,/model.20/cv2/act/Sigmoid,Neuron,"/model.20/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
171,/model.20/cv2/act/Mul,Eltwise_Binary,"/model.20/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",1x40x40x128,A D G C,"/model.20/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278419017792, max 8.701005935669, scale 0.000137017239, offset -2032.000000000000"
|
|
,,,"/model.20/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
172,/model.20/Concat,Concat,"/model.20/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)","/model.20/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.20/Concat_output_0 encoding : bitwidth 16, min -0.278537154198, max 10.961568832397, scale 0.000171513020, offset -1624.000000000000"
|
|
,,,"/model.20/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 128]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
173,/model.20/cv3/conv/Conv,Conv2d,"/model.20/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.20/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"model.20.cv3.conv.weight encoding : bitwidth 8, min -2.630712985992, max 2.490140438080, scale 0.020081778988, offset -131.000000000000"
|
|
,,,"model.20.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 256]; tensor type: STATIC)",,,,"model.20.cv3.conv.bias encoding : bitwidth 32, min -5.921085834503, max 5.921085834503, scale 0.000000002757, offset 0.000000000000"
|
|
,,,model.20.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.20.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.911%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
174,/model.20/cv3/act/Sigmoid,Neuron,"/model.20/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.20/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.20/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
175,/model.20/cv3/act/Mul,Eltwise_Binary,"/model.20/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.20/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",1x40x40x256,A D G C,"/model.20/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278196603060, max 36.258102416992, scale 0.000557508203, offset -499.000000000000"
|
|
,,,"/model.20/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
176,/model.21/conv/Conv,Conv2d,"/model.20/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.21/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.21.conv.weight encoding : bitwidth 8, min -0.244086831808, max 0.316653192043, scale 0.002198980423, offset -111.000000000000"
|
|
,,,"model.21.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 256, 256]; tensor type: STATIC)",,,,"model.21.conv.bias encoding : bitwidth 32, min -1.798731207848, max 1.798731207848, scale 0.000000000838, offset 0.000000000000"
|
|
,,,model.21.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.21.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [2, 2]"
|
|
,,,,,,,param count: 590k (8.17%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
177,/model.21/act/Sigmoid,Neuron,"/model.21/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.21/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.21/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
178,/model.21/act/Mul,Eltwise_Binary,"/model.21/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.21/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.21/act/Mul_output_0 encoding : bitwidth 16, min -0.278380751610, max 11.755710601807, scale 0.000183628465, offset -1516.000000000000"
|
|
,,,"/model.21/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
179,/model.22/Concat,Concat,"/model.21/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.22/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.22/Concat_output_0 encoding : bitwidth 16, min -0.278458654881, max 11.755710601807, scale 0.000183629658, offset -1516.000000000000"
|
|
,,,"/model.10/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
180,/model.23/cv1/conv/Conv,Conv2d,"/model.22/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.23/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.23.cv1.conv.weight encoding : bitwidth 8, min -1.523377776146, max 1.066364526749, scale 0.010155851953, offset -150.000000000000"
|
|
,,,"model.23.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.23.cv1.conv.bias encoding : bitwidth 32, min -1.084763884544, max 1.084763884544, scale 0.000000000505, offset 0.000000000000"
|
|
,,,model.23.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.23.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
181,/model.23/cv1/act/Sigmoid,Neuron,"/model.23/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
182,/model.23/cv1/act/Mul,Eltwise_Binary,"/model.23/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278414577246, max 10.881155967712, scale 0.000170284140, offset -1635.000000000000"
|
|
,,,"/model.23/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
183,/model.23/m/m.0/cv1/conv/Conv,Conv2d,"/model.23/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.23.m.0.cv1.conv.weight encoding : bitwidth 8, min -1.685556530952, max 0.872877478600, scale 0.010033074766, offset -168.000000000000"
|
|
,,,"model.23.m.0.cv1.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 256]; tensor type: STATIC)",,,,"model.23.m.0.cv1.conv.bias encoding : bitwidth 32, min -1.529487609863, max 1.529487609863, scale 0.000000000712, offset 0.000000000000"
|
|
,,,model.23.m.0.cv1.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.23.m.0.cv1.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.911%)
|
|
,,,,,,,MACs per inference: 26M (0.319%)
|
|
184,/model.23/m/m.0/cv1/act/Sigmoid,Neuron,"/model.23/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/m/m.0/cv1/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
185,/model.23/m/m.0/cv1/act/Mul,Eltwise_Binary,"/model.23/m/m.0/cv1/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/m/m.0/cv1/act/Mul_output_0 encoding : bitwidth 16, min -0.278437763453, max 10.727243423462, scale 0.000167935927, offset -1658.000000000000"
|
|
,,,"/model.23/m/m.0/cv1/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
186,/model.23/m/m.0/cv2/conv/Conv,Conv2d,"/model.23/m/m.0/cv1/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.23.m.0.cv2.conv.weight encoding : bitwidth 8, min -0.849559187889, max 0.743364274502, scale 0.006246758625, offset -136.000000000000"
|
|
,,,"model.23.m.0.cv2.conv.weight (data type: uFxp_8; tensor dimension: [3, 3, 256, 256]; tensor type: STATIC)",,,,"model.23.m.0.cv2.conv.bias encoding : bitwidth 32, min -1.477485418320, max 1.477485418320, scale 0.000000000688, offset 0.000000000000"
|
|
,,,model.23.m.0.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.23.m.0.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[1, 1], [1, 1]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 590k (8.17%)
|
|
,,,,,,,MACs per inference: 235M (2.87%)
|
|
187,/model.23/m/m.0/cv2/act/Sigmoid,Neuron,"/model.23/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/m/m.0/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
188,/model.23/m/m.0/cv2/act/Mul,Eltwise_Binary,"/model.23/m/m.0/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/m/m.0/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278402566910, max 14.506939888000, scale 0.000225609867, offset -1234.000000000000"
|
|
,,,"/model.23/m/m.0/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
189,/model.23/cv2/conv/Conv,Conv2d,"/model.22/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.23/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"model.23.cv2.conv.weight encoding : bitwidth 8, min -0.951136946678, max 0.634091317654, scale 0.006216581445, offset -153.000000000000"
|
|
,,,"model.23.cv2.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 256]; tensor type: STATIC)",,,,"model.23.cv2.conv.bias encoding : bitwidth 32, min -1.122701168060, max 1.122701168060, scale 0.000000000523, offset 0.000000000000"
|
|
,,,model.23.cv2.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.23.cv2.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 131k (1.82%)
|
|
,,,,,,,MACs per inference: 52M (0.638%)
|
|
190,/model.23/cv2/act/Sigmoid,Neuron,"/model.23/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/cv2/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
191,/model.23/cv2/act/Mul,Eltwise_Binary,"/model.23/cv2/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",1x20x20x256,A D G C,"/model.23/cv2/act/Mul_output_0 encoding : bitwidth 16, min -0.278523623943, max 9.228270530701, scale 0.000145064376, offset -1920.000000000000"
|
|
,,,"/model.23/cv2/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
192,/model.23/Concat,Concat,"/model.23/m/m.0/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)","/model.23/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.23/Concat_output_0 encoding : bitwidth 16, min -0.278523623943, max 14.506939888000, scale 0.000225611715, offset -1235.000000000000"
|
|
,,,"/model.23/cv2/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 256]; tensor type: NATIVE)",,,,axis: 3
|
|
,,,,,,,packageName: qti.aisw
|
|
193,/model.23/cv3/conv/Conv,Conv2d,"/model.23/Concat_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.23/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"model.23.cv3.conv.weight encoding : bitwidth 8, min -1.523797392845, max 1.119524598122, scale 0.010365968570, offset -147.000000000000"
|
|
,,,"model.23.cv3.conv.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 512]; tensor type: STATIC)",,,,"model.23.cv3.conv.bias encoding : bitwidth 32, min -3.408988952637, max 3.408988952637, scale 0.000000001587, offset 0.000000000000"
|
|
,,,model.23.cv3.conv.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.23.cv3.conv.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 262k (3.63%)
|
|
,,,,,,,MACs per inference: 104M (1.28%)
|
|
194,/model.23/cv3/act/Sigmoid,Neuron,"/model.23/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.23/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.23/cv3/act/Sigmoid_output_0 encoding : bitwidth 16, min 0.000000000000, max 0.999984741211, scale 0.000015258789, offset 0.000000000000"
|
|
,,,,,,,neuron_type: Sigmoid
|
|
,,,,,,,packageName: qti.aisw
|
|
195,/model.23/cv3/act/Mul,Eltwise_Binary,"/model.23/cv3/conv/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.23/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",1x20x20x512,A D G C,"/model.23/cv3/act/Mul_output_0 encoding : bitwidth 16, min -0.278268188238, max 26.461183547974, scale 0.000408017891, offset -682.000000000000"
|
|
,,,"/model.23/cv3/act/Sigmoid_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)",,,,eltwise_type: ElementWiseMultiply
|
|
,,,,,,,packageName: qti.aisw
|
|
196,/model.24/m.0/Conv,Conv2d,"/model.17/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 128]; tensor type: NATIVE)","/model.24/m.0/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 80, 80, 255]; tensor type: APP_READ)",1x80x80x255,A D G C,"model.24.m.0.weight encoding : bitwidth 8, min -0.530882358551, max 0.526734828949, scale 0.004147518426, offset -128.000000000000"
|
|
,,,"model.24.m.0.weight (data type: uFxp_8; tensor dimension: [1, 1, 128, 255]; tensor type: STATIC)",,,,"model.24.m.0.bias encoding : bitwidth 32, min -6.835937500000, max 6.835937500000, scale 0.000000003183, offset 0.000000000000"
|
|
,,,model.24.m.0.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.24.m.0.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 32k (0.455%)
|
|
,,,,,,,MACs per inference: 208M (2.54%)
|
|
197,/model.24/m.1/Conv,Conv2d,"/model.20/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 256]; tensor type: NATIVE)","/model.24/m.1/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 40, 40, 255]; tensor type: APP_READ)",1x40x40x255,A D G C,"model.24.m.1.weight encoding : bitwidth 8, min -0.545375704765, max 0.492466121912, scale 0.004069968127, offset -134.000000000000"
|
|
,,,"model.24.m.1.weight (data type: uFxp_8; tensor dimension: [1, 1, 256, 255]; tensor type: STATIC)",,,,"model.24.m.1.bias encoding : bitwidth 32, min -7.421875000000, max 7.421875000000, scale 0.000000003456, offset 0.000000000000"
|
|
,,,model.24.m.1.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.24.m.1.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 65k (0.907%)
|
|
,,,,,,,MACs per inference: 104M (1.27%)
|
|
198,/model.24/m.2/Conv,Conv2d,"/model.23/cv3/act/Mul_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 512]; tensor type: NATIVE)","/model.24/m.2/Conv_output_0 (data type: uFxp_16; tensor dimension: [1, 20, 20, 255]; tensor type: APP_READ)",1x20x20x255,A D G C,"model.24.m.2.weight encoding : bitwidth 8, min -0.374947339296, max 0.389945238829, scale 0.002999578835, offset -125.000000000000"
|
|
,,,"model.24.m.2.weight (data type: uFxp_8; tensor dimension: [1, 1, 512, 255]; tensor type: STATIC)",,,,"model.24.m.2.bias encoding : bitwidth 32, min -8.132812500000, max 8.132812500000, scale 0.000000003787, offset 0.000000000000"
|
|
,,,model.24.m.2.bias (data type: sFxp_32
|
|
,,,,,,,bias_op_name: model.24.m.2.bias
|
|
,,,,,,,"dilation: [1, 1]"
|
|
,,,,,,,group: 1
|
|
,,,,,,,packageName: qti.aisw
|
|
,,,,,,,"pad_amount: [[0, 0], [0, 0]]"
|
|
,,,,,,,padding_size_strategy: 5
|
|
,,,,,,,"stride: [1, 1]"
|
|
,,,,,,,param count: 130k (1.81%)
|
|
,,,,,,,MACs per inference: 52M (0.636%)
|
|
Note: The supported runtimes column assumes a processor target of Snapdragon 855
|
|
Key : A:AIP
|
|
D:DSP
|
|
G:GPU
|
|
C:CPU
|
|
""
|
|
Input Name,Dimensions,Type,Encoding Info
|
|
images,"1,640,640,3",uFxp_16,"bitwidth 16, min 0.000000000000, max 1.000000000000, scale 0.000015259022, offset 0.000000000000"
|
|
Total parameters: 7225885 (27 MB assuming single precision float. This does not represent the actual memory requirement for the model. It provides a rough estimate of the contribution from the parameters 4xNo of Params in bytes)
|
|
Total MACs per inference: 8216M (100%)
|
|
"Converter command: snpe-onnx-to-dlc adjust_nms_features_dims=True align_matmul_ranks=True batch=None converter_op_package_lib= copyright_file=None custom_io= custom_op_config_paths=None debug=-1 define_symbol=None disable_batchnorm_folding=False dry_run=None dumpIR=False dump_custom_io_config_template= dump_inferred_model=False dump_value_info=False enable_match_gathernd=False enable_strict_validation=False expand_gru_op_structure=True expand_lstm_op_structure=False extract_color_transform=True float_bw=32 force_prune_cast_ops=False handle_gather_negative_indices=True inject_cast_for_gather=True input_dim=[['images', '1,3,640,640']] input_dtype=[] input_encoding=[] input_layout=[] input_type=[] keep_disconnected_nodes=False keep_int64_inputs=False keep_quant_nodes=False match_caffe_ssd_to_tf=True model_version=None no_simplification=False op_package_lib= out_names=['/model.24/m.0/Conv_output_0', '/model.24/m.1/Conv_output_0', '/model.24/m.2/Conv_output_0'] package_name=None perform_axes_to_spatial_first_order=True prepare_inputs_as_params=False preprocess_roi_pool_inputs=True preserve_io=[] quantization_overrides= squash_box_decoder=True unroll_gru_time_steps=True unroll_lstm_time_steps=True use_convert_quantization_nodes=False validation_target=[]"
|
|
Quantizer command: snpe-dlc-quant help=false version=false verbose=false quiet=false silent=false debug=[] debug1=false debug2=false debug3=false log-mask=[] log-file=[] log-dir=[] log-file-include-hostname=false input_dlc=[/data0/ai-transform-data/data/3622e363-c39a-4fd2-998c-1a54b793f782/yolov5s_save_path/cutoff_yolov5s.dlc] input_list=[/home/dlc_quan_temp/fQcdgBuukudRwzh/quant.txt] no_weight_quantization=false output_dlc=[/data0/ai-transform-data/data/3622e363-c39a-4fd2-998c-1a54b793f782/yolov5s_save_path/cutoff_yolov5s.dlc] use_enhanced_quantizer=false use_adjusted_weights_quantizer=false optimizations=[] override_params=false use_encoding_optimizations=false udo_package_path=[/home/model_convert_plantform/Aidlux_UDO216_SO/libUdoAidluxUdoPackageReg.so] use_symmetric_quantize_weights=false use_native_dtype=false use_native_input_files=false use_native_output_files=false float_fallback=false use_dynamic_16_bit_weights=false bitwidth=[] weights_bitwidth=[8] act_bitwidth=[16] float_bitwidth=[] bias_bitwidth=[32] float_bias_bitwidth=[] clip_alpha=[] axis_quant=false restrict_quantization_steps=[]
|
|
DLC created with converter version: 2.16.4.231110151339_60331
|
|
"Ops used by DLC: Concat, Conv2d, Eltwise_Binary, Neuron, Pool, Resize"
|
|
Est. Steady-State Memory Needed to Run: 379.8 MiB
|
|
""
|
|
Cache Info:
|
|
Cache Record Name,SNPE Version,Cache Version,Identifier,Information,Subnets
|
|
backend.metadata0,2.16.4,3.3.0.0,HTP_V75_SM8650_8MB,Record Size: 7.378 MB,Total Subnets: 1
|
|
,,,,Optimization Level: 2,subnet_0:
|
|
,,,,Contains Udo: False, Start Op ID: 0
|
|
,,,,, End Op ID: 198
|
|
,,,,, Input Tensors:
|
|
,,,,," images [1, 640, 640, 3] (UFIXED_POINT_16)"
|
|
,,,,, Output Tensors:
|
|
,,,,," /model.24/m.0/Conv_output_0 [1, 80, 80, 255] (UFIXED_POINT_16)"
|
|
,,,,," /model.24/m.1/Conv_output_0 [1, 40, 40, 255] (UFIXED_POINT_16)"
|
|
,,,,," /model.24/m.2/Conv_output_0 [1, 20, 20, 255] (UFIXED_POINT_16)"
|
|
|