|
program(1.0) |
|
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})] |
|
{ |
|
func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) { |
|
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)]; |
|
tensor<int32, []> var_24_batch_dims_0 = const()[name = tensor<string, []>("op_24_batch_dims_0"), val = tensor<int32, []>(0)]; |
|
tensor<fp16, [51864, 384]> embed_tokens_weight_to_fp16 = const()[name = tensor<string, []>("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51864, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))]; |
|
tensor<fp16, [1, 384]> var_24_cast_fp16 = gather(axis = var_24_axis_0, batch_dims = var_24_batch_dims_0, indices = input_ids, x = embed_tokens_weight_to_fp16)[name = tensor<string, []>("op_24_cast_fp16")]; |
|
tensor<int32, []> var_28_axis_0 = const()[name = tensor<string, []>("op_28_axis_0"), val = tensor<int32, []>(0)]; |
|
tensor<int32, []> var_28_batch_dims_0 = const()[name = tensor<string, []>("op_28_batch_dims_0"), val = tensor<int32, []>(0)]; |
|
tensor<fp16, [448, 384]> embed_positions_weight_to_fp16 = const()[name = tensor<string, []>("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39831680)))]; |
|
tensor<fp16, [1, 384]> var_28_cast_fp16 = gather(axis = var_28_axis_0, batch_dims = var_28_batch_dims_0, indices = cache_length, x = embed_positions_weight_to_fp16)[name = tensor<string, []>("op_28_cast_fp16")]; |
|
tensor<fp16, [1, 384]> hidden_states_1_cast_fp16 = add(x = var_24_cast_fp16, y = var_28_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")]; |
|
tensor<int32, [1]> var_42_axes_0 = const()[name = tensor<string, []>("op_42_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 384, 1]> var_42_cast_fp16 = expand_dims(axes = var_42_axes_0, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("op_42_cast_fp16")]; |
|
tensor<int32, [1]> inputs_1_axes_0 = const()[name = tensor<string, []>("inputs_1_axes_0"), val = tensor<int32, [1]>([3])]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_42_cast_fp16)[name = tensor<string, []>("inputs_1_cast_fp16")]; |
|
tensor<int32, [4]> tile_0 = const()[name = tensor<string, []>("tile_0"), val = tensor<int32, [4]>([384, 384, 384, 384])]; |
|
tensor<int32, []> var_47_axis_0 = const()[name = tensor<string, []>("op_47_axis_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_3 = split(axis = var_47_axis_0, split_sizes = tile_0, x = key_cache)[name = tensor<string, []>("op_47_cast_fp16")]; |
|
tensor<int32, [4]> tile_1 = const()[name = tensor<string, []>("tile_1"), val = tensor<int32, [4]>([384, 384, 384, 384])]; |
|
tensor<int32, []> var_54_axis_0 = const()[name = tensor<string, []>("op_54_axis_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_3 = split(axis = var_54_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor<string, []>("op_54_cast_fp16")]; |
|
tensor<int32, []> var_64 = const()[name = tensor<string, []>("op_64"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_1_axes_0 = const()[name = tensor<string, []>("out_1_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_90_to_fp16 = const()[name = tensor<string, []>("op_90_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_90_to_fp16, x = inputs_1_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")]; |
|
tensor<fp16, [384]> obj_1_mean_0_to_fp16 = const()[name = tensor<string, []>("obj_1_mean_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40175808)))]; |
|
tensor<fp16, [384]> obj_1_variance_0_to_fp16 = const()[name = tensor<string, []>("obj_1_variance_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40176640)))]; |
|
tensor<fp16, [384]> obj_1_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_1_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40177472)))]; |
|
tensor<fp16, [384]> obj_1_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_1_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40178304)))]; |
|
tensor<fp16, []> obj_1_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_1_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor<string, []>("obj_1_cast_fp16")]; |
|
tensor<string, []> query_1_pad_type_0 = const()[name = tensor<string, []>("query_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_1_strides_0 = const()[name = tensor<string, []>("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_1_pad_0 = const()[name = tensor<string, []>("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_1_dilations_0 = const()[name = tensor<string, []>("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_1_groups_0 = const()[name = tensor<string, []>("query_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40179136)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40474112)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("query_1_cast_fp16")]; |
|
tensor<string, []> current_key_1_pad_type_0 = const()[name = tensor<string, []>("current_key_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_1_strides_0 = const()[name = tensor<string, []>("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_1_pad_0 = const()[name = tensor<string, []>("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_1_dilations_0 = const()[name = tensor<string, []>("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_1_groups_0 = const()[name = tensor<string, []>("current_key_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40474944)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_key_1_cast_fp16")]; |
|
tensor<string, []> current_value_1_pad_type_0 = const()[name = tensor<string, []>("current_value_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_1_strides_0 = const()[name = tensor<string, []>("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_1_pad_0 = const()[name = tensor<string, []>("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_1_dilations_0 = const()[name = tensor<string, []>("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_1_groups_0 = const()[name = tensor<string, []>("current_value_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40769920)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41064896)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_value_1_cast_fp16")]; |
|
tensor<int32, [1]> var_125_axes_0 = const()[name = tensor<string, []>("op_125_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_125_cast_fp16 = expand_dims(axes = var_125_axes_0, x = kv_cache_update_mask)[name = tensor<string, []>("op_125_cast_fp16")]; |
|
tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_126_cast_fp16 = expand_dims(axes = var_126_axes_0, x = var_125_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_128_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_128_cast_fp16")]; |
|
tensor<fp16, []> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, []>(0x1p+0)]; |
|
tensor<fp16, [1, 1, 1, 448]> var_129_cast_fp16 = sub(x = var_65_to_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_129_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_130_cast_fp16 = mul(x = var_47_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_130_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_1_cast_fp16 = add(x = var_128_cast_fp16, y = var_130_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_132_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_132_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_134_cast_fp16 = mul(x = var_54_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_1_cast_fp16 = add(x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")]; |
|
tensor<int32, [4]> var_137 = const()[name = tensor<string, []>("op_137"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_137, x = query_1_cast_fp16)[name = tensor<string, []>("mh_q_1_cast_fp16")]; |
|
tensor<fp16, []> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_140_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_139_to_fp16)[name = tensor<string, []>("op_140_cast_fp16")]; |
|
tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_142_cast_fp16 = reshape(shape = var_141, x = key_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")]; |
|
tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_1_transpose_y_0 = const()[name = tensor<string, []>("mh_w_1_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_140_cast_fp16, y = var_142_cast_fp16)[name = tensor<string, []>("mh_w_1_cast_fp16")]; |
|
tensor<int32, [1]> var_146_axes_0 = const()[name = tensor<string, []>("op_146_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_146_cast_fp16 = expand_dims(axes = var_146_axes_0, x = decoder_key_padding_mask)[name = tensor<string, []>("op_146_cast_fp16")]; |
|
tensor<int32, [1]> var_147_axes_0 = const()[name = tensor<string, []>("op_147_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_147_cast_fp16 = expand_dims(axes = var_147_axes_0, x = var_146_cast_fp16)[name = tensor<string, []>("op_147_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_3_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_150_cast_fp16 = softmax(axis = var_64, x = mh_w_3_cast_fp16)[name = tensor<string, []>("op_150_cast_fp16")]; |
|
tensor<int32, [4]> var_151 = const()[name = tensor<string, []>("op_151"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_152_cast_fp16 = reshape(shape = var_151, x = value_1_cast_fp16)[name = tensor<string, []>("op_152_cast_fp16")]; |
|
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_152_cast_fp16, y = var_150_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")]; |
|
tensor<int32, [4]> var_155 = const()[name = tensor<string, []>("op_155"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_1_cast_fp16 = reshape(shape = var_155, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")]; |
|
tensor<string, []> obj_7_pad_type_0 = const()[name = tensor<string, []>("obj_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_7_strides_0 = const()[name = tensor<string, []>("obj_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_7_pad_0 = const()[name = tensor<string, []>("obj_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_7_dilations_0 = const()[name = tensor<string, []>("obj_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_7_groups_0 = const()[name = tensor<string, []>("obj_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41065728)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41360704)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_7_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_7_dilations_0, groups = obj_7_groups_0, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = obj_7_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("obj_7_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_7_cast_fp16)[name = tensor<string, []>("inputs_3_cast_fp16")]; |
|
tensor<int32, [1]> out_3_axes_0 = const()[name = tensor<string, []>("out_3_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_177_to_fp16 = const()[name = tensor<string, []>("op_177_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_177_to_fp16, x = inputs_3_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")]; |
|
tensor<fp16, [384]> obj_9_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_9_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41361536)))]; |
|
tensor<fp16, [384]> obj_9_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_9_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41362368)))]; |
|
tensor<fp16, []> obj_9_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_9_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor<string, []>("obj_9_cast_fp16")]; |
|
tensor<string, []> query_3_pad_type_0 = const()[name = tensor<string, []>("query_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_3_strides_0 = const()[name = tensor<string, []>("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_3_pad_0 = const()[name = tensor<string, []>("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_3_dilations_0 = const()[name = tensor<string, []>("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_3_groups_0 = const()[name = tensor<string, []>("query_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41363200)))]; |
|
tensor<fp16, [384]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41658176)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("query_3_cast_fp16")]; |
|
tensor<string, []> key_3_pad_type_0 = const()[name = tensor<string, []>("key_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_3_strides_0 = const()[name = tensor<string, []>("key_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_3_pad_0 = const()[name = tensor<string, []>("key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_3_dilations_0 = const()[name = tensor<string, []>("key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_3_groups_0 = const()[name = tensor<string, []>("key_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41659008)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> key_3_cast_fp16 = conv(dilations = key_3_dilations_0, groups = key_3_groups_0, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = key_3_strides_0, weight = layers_0_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_3_cast_fp16")]; |
|
tensor<string, []> value_3_pad_type_0 = const()[name = tensor<string, []>("value_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_3_strides_0 = const()[name = tensor<string, []>("value_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_3_pad_0 = const()[name = tensor<string, []>("value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_3_dilations_0 = const()[name = tensor<string, []>("value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_3_groups_0 = const()[name = tensor<string, []>("value_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41953984)))]; |
|
tensor<fp16, [384]> layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42248960)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = value_3_dilations_0, groups = value_3_groups_0, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = value_3_strides_0, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_3_cast_fp16")]; |
|
tensor<int32, [4]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_212, x = query_3_cast_fp16)[name = tensor<string, []>("mh_q_3_cast_fp16")]; |
|
tensor<fp16, []> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_215_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_214_to_fp16)[name = tensor<string, []>("op_215_cast_fp16")]; |
|
tensor<int32, [4]> var_216 = const()[name = tensor<string, []>("op_216"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_217_cast_fp16 = reshape(shape = var_216, x = key_3_cast_fp16)[name = tensor<string, []>("op_217_cast_fp16")]; |
|
tensor<bool, []> mh_w_5_transpose_x_0 = const()[name = tensor<string, []>("mh_w_5_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_5_transpose_y_0 = const()[name = tensor<string, []>("mh_w_5_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 1500]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_215_cast_fp16, y = var_217_cast_fp16)[name = tensor<string, []>("mh_w_5_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1500]> obj_13_cast_fp16 = softmax(axis = var_64, x = mh_w_5_cast_fp16)[name = tensor<string, []>("obj_13_cast_fp16")]; |
|
tensor<int32, [4]> var_221 = const()[name = tensor<string, []>("op_221"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_222_cast_fp16 = reshape(shape = var_221, x = value_3_cast_fp16)[name = tensor<string, []>("op_222_cast_fp16")]; |
|
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_222_cast_fp16, y = obj_13_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")]; |
|
tensor<int32, [4]> var_225 = const()[name = tensor<string, []>("op_225"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_3_cast_fp16 = reshape(shape = var_225, x = attn_3_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")]; |
|
tensor<string, []> obj_11_pad_type_0 = const()[name = tensor<string, []>("obj_11_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_11_strides_0 = const()[name = tensor<string, []>("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_11_pad_0 = const()[name = tensor<string, []>("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_11_dilations_0 = const()[name = tensor<string, []>("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_11_groups_0 = const()[name = tensor<string, []>("obj_11_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42249792)))]; |
|
tensor<fp16, [384]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42544768)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("obj_11_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_11_cast_fp16)[name = tensor<string, []>("inputs_5_cast_fp16")]; |
|
tensor<int32, [1]> out_5_axes_0 = const()[name = tensor<string, []>("out_5_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_243_to_fp16 = const()[name = tensor<string, []>("op_243_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_243_to_fp16, x = inputs_5_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")]; |
|
tensor<fp16, [384]> input_5_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_5_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42545600)))]; |
|
tensor<fp16, [384]> input_5_beta_0_to_fp16 = const()[name = tensor<string, []>("input_5_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42546432)))]; |
|
tensor<fp16, []> input_5_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_5_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")]; |
|
tensor<string, []> input_7_pad_type_0 = const()[name = tensor<string, []>("input_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_7_strides_0 = const()[name = tensor<string, []>("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_7_dilations_0 = const()[name = tensor<string, []>("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_7_groups_0 = const()[name = tensor<string, []>("input_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42547264)))]; |
|
tensor<fp16, [1536]> layers_0_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(43726976)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")]; |
|
tensor<string, []> input_9_mode_0 = const()[name = tensor<string, []>("input_9_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")]; |
|
tensor<string, []> hidden_states_3_pad_type_0 = const()[name = tensor<string, []>("hidden_states_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = tensor<string, []>("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = tensor<string, []>("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = tensor<string, []>("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_3_groups_0 = const()[name = tensor<string, []>("hidden_states_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(43730112)))]; |
|
tensor<fp16, [384]> layers_0_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44909824)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("hidden_states_3_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = tensor<string, []>("inputs_7_cast_fp16")]; |
|
tensor<int32, []> var_278 = const()[name = tensor<string, []>("op_278"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_7_axes_0 = const()[name = tensor<string, []>("out_7_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_304_to_fp16 = const()[name = tensor<string, []>("op_304_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_304_to_fp16, x = inputs_7_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")]; |
|
tensor<fp16, [384]> obj_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_15_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44910656)))]; |
|
tensor<fp16, [384]> obj_15_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_15_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44911488)))]; |
|
tensor<fp16, []> obj_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_15_cast_fp16 = batch_norm(beta = obj_15_beta_0_to_fp16, epsilon = obj_15_epsilon_0_to_fp16, gamma = obj_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor<string, []>("obj_15_cast_fp16")]; |
|
tensor<string, []> query_5_pad_type_0 = const()[name = tensor<string, []>("query_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_5_strides_0 = const()[name = tensor<string, []>("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_5_pad_0 = const()[name = tensor<string, []>("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_5_dilations_0 = const()[name = tensor<string, []>("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_5_groups_0 = const()[name = tensor<string, []>("query_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44912320)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45207296)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("query_5_cast_fp16")]; |
|
tensor<string, []> current_key_3_pad_type_0 = const()[name = tensor<string, []>("current_key_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_3_strides_0 = const()[name = tensor<string, []>("current_key_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_3_pad_0 = const()[name = tensor<string, []>("current_key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_3_dilations_0 = const()[name = tensor<string, []>("current_key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_3_groups_0 = const()[name = tensor<string, []>("current_key_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45208128)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_3_cast_fp16 = conv(dilations = current_key_3_dilations_0, groups = current_key_3_groups_0, pad = current_key_3_pad_0, pad_type = current_key_3_pad_type_0, strides = current_key_3_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_key_3_cast_fp16")]; |
|
tensor<string, []> current_value_3_pad_type_0 = const()[name = tensor<string, []>("current_value_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_3_strides_0 = const()[name = tensor<string, []>("current_value_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_3_pad_0 = const()[name = tensor<string, []>("current_value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_3_dilations_0 = const()[name = tensor<string, []>("current_value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_3_groups_0 = const()[name = tensor<string, []>("current_value_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45503104)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45798080)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_3_dilations_0, groups = current_value_3_groups_0, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = current_value_3_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_value_3_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_342_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_342_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_344_cast_fp16 = mul(x = var_47_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_344_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_5_cast_fp16 = add(x = var_342_cast_fp16, y = var_344_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_346_cast_fp16 = mul(x = current_value_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_346_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_348_cast_fp16 = mul(x = var_54_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_5_cast_fp16 = add(x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")]; |
|
tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_351, x = query_5_cast_fp16)[name = tensor<string, []>("mh_q_5_cast_fp16")]; |
|
tensor<fp16, []> var_353_to_fp16 = const()[name = tensor<string, []>("op_353_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_354_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_353_to_fp16)[name = tensor<string, []>("op_354_cast_fp16")]; |
|
tensor<int32, [4]> var_355 = const()[name = tensor<string, []>("op_355"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_356_cast_fp16 = reshape(shape = var_355, x = key_5_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")]; |
|
tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_7_transpose_y_0 = const()[name = tensor<string, []>("mh_w_7_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_354_cast_fp16, y = var_356_cast_fp16)[name = tensor<string, []>("mh_w_7_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_9_cast_fp16 = add(x = mh_w_7_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_9_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_364_cast_fp16 = softmax(axis = var_278, x = mh_w_9_cast_fp16)[name = tensor<string, []>("op_364_cast_fp16")]; |
|
tensor<int32, [4]> var_365 = const()[name = tensor<string, []>("op_365"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_366_cast_fp16 = reshape(shape = var_365, x = value_5_cast_fp16)[name = tensor<string, []>("op_366_cast_fp16")]; |
|
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_366_cast_fp16, y = var_364_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")]; |
|
tensor<int32, [4]> var_369 = const()[name = tensor<string, []>("op_369"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_11_cast_fp16 = reshape(shape = var_369, x = attn_5_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")]; |
|
tensor<string, []> obj_21_pad_type_0 = const()[name = tensor<string, []>("obj_21_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_21_strides_0 = const()[name = tensor<string, []>("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_21_pad_0 = const()[name = tensor<string, []>("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_21_dilations_0 = const()[name = tensor<string, []>("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_21_groups_0 = const()[name = tensor<string, []>("obj_21_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45798912)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46093888)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("obj_21_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_21_cast_fp16)[name = tensor<string, []>("inputs_9_cast_fp16")]; |
|
tensor<int32, [1]> out_9_axes_0 = const()[name = tensor<string, []>("out_9_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_391_to_fp16 = const()[name = tensor<string, []>("op_391_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_391_to_fp16, x = inputs_9_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")]; |
|
tensor<fp16, [384]> obj_23_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_23_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46094720)))]; |
|
tensor<fp16, [384]> obj_23_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_23_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46095552)))]; |
|
tensor<fp16, []> obj_23_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_23_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_23_cast_fp16 = batch_norm(beta = obj_23_beta_0_to_fp16, epsilon = obj_23_epsilon_0_to_fp16, gamma = obj_23_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor<string, []>("obj_23_cast_fp16")]; |
|
tensor<string, []> query_7_pad_type_0 = const()[name = tensor<string, []>("query_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_7_strides_0 = const()[name = tensor<string, []>("query_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_7_pad_0 = const()[name = tensor<string, []>("query_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_7_dilations_0 = const()[name = tensor<string, []>("query_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_7_groups_0 = const()[name = tensor<string, []>("query_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46096384)))]; |
|
tensor<fp16, [384]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46391360)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_7_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_7_dilations_0, groups = query_7_groups_0, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = query_7_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_23_cast_fp16)[name = tensor<string, []>("query_7_cast_fp16")]; |
|
tensor<string, []> key_7_pad_type_0 = const()[name = tensor<string, []>("key_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_7_strides_0 = const()[name = tensor<string, []>("key_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_7_pad_0 = const()[name = tensor<string, []>("key_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_7_dilations_0 = const()[name = tensor<string, []>("key_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_7_groups_0 = const()[name = tensor<string, []>("key_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46392192)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> key_7_cast_fp16 = conv(dilations = key_7_dilations_0, groups = key_7_groups_0, pad = key_7_pad_0, pad_type = key_7_pad_type_0, strides = key_7_strides_0, weight = layers_1_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_7_cast_fp16")]; |
|
tensor<string, []> value_7_pad_type_0 = const()[name = tensor<string, []>("value_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_7_strides_0 = const()[name = tensor<string, []>("value_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_7_pad_0 = const()[name = tensor<string, []>("value_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_7_dilations_0 = const()[name = tensor<string, []>("value_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_7_groups_0 = const()[name = tensor<string, []>("value_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46687168)))]; |
|
tensor<fp16, [384]> layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46982144)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> value_7_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = value_7_dilations_0, groups = value_7_groups_0, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = value_7_strides_0, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_7_cast_fp16")]; |
|
tensor<int32, [4]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_7_cast_fp16 = reshape(shape = var_426, x = query_7_cast_fp16)[name = tensor<string, []>("mh_q_7_cast_fp16")]; |
|
tensor<fp16, []> var_428_to_fp16 = const()[name = tensor<string, []>("op_428_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_429_cast_fp16 = mul(x = mh_q_7_cast_fp16, y = var_428_to_fp16)[name = tensor<string, []>("op_429_cast_fp16")]; |
|
tensor<int32, [4]> var_430 = const()[name = tensor<string, []>("op_430"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_431_cast_fp16 = reshape(shape = var_430, x = key_7_cast_fp16)[name = tensor<string, []>("op_431_cast_fp16")]; |
|
tensor<bool, []> mh_w_11_transpose_x_0 = const()[name = tensor<string, []>("mh_w_11_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_11_transpose_y_0 = const()[name = tensor<string, []>("mh_w_11_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 1500]> mh_w_11_cast_fp16 = matmul(transpose_x = mh_w_11_transpose_x_0, transpose_y = mh_w_11_transpose_y_0, x = var_429_cast_fp16, y = var_431_cast_fp16)[name = tensor<string, []>("mh_w_11_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1500]> obj_27_cast_fp16 = softmax(axis = var_278, x = mh_w_11_cast_fp16)[name = tensor<string, []>("obj_27_cast_fp16")]; |
|
tensor<int32, [4]> var_435 = const()[name = tensor<string, []>("op_435"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_436_cast_fp16 = reshape(shape = var_435, x = value_7_cast_fp16)[name = tensor<string, []>("op_436_cast_fp16")]; |
|
tensor<bool, []> attn_7_transpose_x_0 = const()[name = tensor<string, []>("attn_7_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_7_transpose_y_0 = const()[name = tensor<string, []>("attn_7_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_7_cast_fp16 = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_436_cast_fp16, y = obj_27_cast_fp16)[name = tensor<string, []>("attn_7_cast_fp16")]; |
|
tensor<int32, [4]> var_439 = const()[name = tensor<string, []>("op_439"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_13_cast_fp16 = reshape(shape = var_439, x = attn_7_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")]; |
|
tensor<string, []> obj_25_pad_type_0 = const()[name = tensor<string, []>("obj_25_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_25_strides_0 = const()[name = tensor<string, []>("obj_25_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_25_pad_0 = const()[name = tensor<string, []>("obj_25_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_25_dilations_0 = const()[name = tensor<string, []>("obj_25_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_25_groups_0 = const()[name = tensor<string, []>("obj_25_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46982976)))]; |
|
tensor<fp16, [384]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47277952)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_25_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_25_dilations_0, groups = obj_25_groups_0, pad = obj_25_pad_0, pad_type = obj_25_pad_type_0, strides = obj_25_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = tensor<string, []>("obj_25_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_25_cast_fp16)[name = tensor<string, []>("inputs_11_cast_fp16")]; |
|
tensor<int32, [1]> out_11_axes_0 = const()[name = tensor<string, []>("out_11_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_460_to_fp16 = const()[name = tensor<string, []>("op_460_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_460_to_fp16, x = inputs_11_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")]; |
|
tensor<fp16, [384]> input_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_15_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47278784)))]; |
|
tensor<fp16, [384]> input_15_beta_0_to_fp16 = const()[name = tensor<string, []>("input_15_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47279616)))]; |
|
tensor<fp16, []> input_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")]; |
|
tensor<string, []> input_17_pad_type_0 = const()[name = tensor<string, []>("input_17_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_17_strides_0 = const()[name = tensor<string, []>("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_17_pad_0 = const()[name = tensor<string, []>("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_17_dilations_0 = const()[name = tensor<string, []>("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_17_groups_0 = const()[name = tensor<string, []>("input_17_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47280448)))]; |
|
tensor<fp16, [1536]> layers_1_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48460160)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")]; |
|
tensor<string, []> input_19_mode_0 = const()[name = tensor<string, []>("input_19_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_19_cast_fp16 = gelu(mode = input_19_mode_0, x = input_17_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")]; |
|
tensor<string, []> hidden_states_5_pad_type_0 = const()[name = tensor<string, []>("hidden_states_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = tensor<string, []>("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = tensor<string, []>("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = tensor<string, []>("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_5_groups_0 = const()[name = tensor<string, []>("hidden_states_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48463296)))]; |
|
tensor<fp16, [384]> layers_1_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49643008)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("hidden_states_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor<string, []>("inputs_13_cast_fp16")]; |
|
tensor<int32, []> var_496 = const()[name = tensor<string, []>("op_496"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_13_axes_0 = const()[name = tensor<string, []>("out_13_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_522_to_fp16 = const()[name = tensor<string, []>("op_522_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_13_cast_fp16 = layer_norm(axes = out_13_axes_0, epsilon = var_522_to_fp16, x = inputs_13_cast_fp16)[name = tensor<string, []>("out_13_cast_fp16")]; |
|
tensor<fp16, [384]> obj_29_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_29_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49643840)))]; |
|
tensor<fp16, [384]> obj_29_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_29_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49644672)))]; |
|
tensor<fp16, []> obj_29_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_29_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_29_cast_fp16 = batch_norm(beta = obj_29_beta_0_to_fp16, epsilon = obj_29_epsilon_0_to_fp16, gamma = obj_29_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_13_cast_fp16)[name = tensor<string, []>("obj_29_cast_fp16")]; |
|
tensor<string, []> query_9_pad_type_0 = const()[name = tensor<string, []>("query_9_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_9_strides_0 = const()[name = tensor<string, []>("query_9_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_9_pad_0 = const()[name = tensor<string, []>("query_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_9_dilations_0 = const()[name = tensor<string, []>("query_9_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_9_groups_0 = const()[name = tensor<string, []>("query_9_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49645504)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49940480)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_9_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = query_9_dilations_0, groups = query_9_groups_0, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = query_9_strides_0, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor<string, []>("query_9_cast_fp16")]; |
|
tensor<string, []> current_key_5_pad_type_0 = const()[name = tensor<string, []>("current_key_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_5_strides_0 = const()[name = tensor<string, []>("current_key_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_5_pad_0 = const()[name = tensor<string, []>("current_key_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_5_dilations_0 = const()[name = tensor<string, []>("current_key_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_5_groups_0 = const()[name = tensor<string, []>("current_key_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49941312)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_5_cast_fp16 = conv(dilations = current_key_5_dilations_0, groups = current_key_5_groups_0, pad = current_key_5_pad_0, pad_type = current_key_5_pad_type_0, strides = current_key_5_strides_0, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor<string, []>("current_key_5_cast_fp16")]; |
|
tensor<string, []> current_value_5_pad_type_0 = const()[name = tensor<string, []>("current_value_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_5_strides_0 = const()[name = tensor<string, []>("current_value_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_5_pad_0 = const()[name = tensor<string, []>("current_value_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_5_dilations_0 = const()[name = tensor<string, []>("current_value_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_5_groups_0 = const()[name = tensor<string, []>("current_value_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50236288)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50531264)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = current_value_5_dilations_0, groups = current_value_5_groups_0, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = current_value_5_strides_0, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor<string, []>("current_value_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_560_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_560_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_562_cast_fp16 = mul(x = var_47_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_562_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_9_cast_fp16 = add(x = var_560_cast_fp16, y = var_562_cast_fp16)[name = tensor<string, []>("key_9_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_564_cast_fp16 = mul(x = current_value_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_564_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_566_cast_fp16 = mul(x = var_54_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_9_cast_fp16 = add(x = var_564_cast_fp16, y = var_566_cast_fp16)[name = tensor<string, []>("value_9_cast_fp16")]; |
|
tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_9_cast_fp16 = reshape(shape = var_569, x = query_9_cast_fp16)[name = tensor<string, []>("mh_q_9_cast_fp16")]; |
|
tensor<fp16, []> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_572_cast_fp16 = mul(x = mh_q_9_cast_fp16, y = var_571_to_fp16)[name = tensor<string, []>("op_572_cast_fp16")]; |
|
tensor<int32, [4]> var_573 = const()[name = tensor<string, []>("op_573"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_574_cast_fp16 = reshape(shape = var_573, x = key_9_cast_fp16)[name = tensor<string, []>("op_574_cast_fp16")]; |
|
tensor<bool, []> mh_w_13_transpose_x_0 = const()[name = tensor<string, []>("mh_w_13_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_13_transpose_y_0 = const()[name = tensor<string, []>("mh_w_13_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_572_cast_fp16, y = var_574_cast_fp16)[name = tensor<string, []>("mh_w_13_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_15_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_15_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_582_cast_fp16 = softmax(axis = var_496, x = mh_w_15_cast_fp16)[name = tensor<string, []>("op_582_cast_fp16")]; |
|
tensor<int32, [4]> var_583 = const()[name = tensor<string, []>("op_583"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_584_cast_fp16 = reshape(shape = var_583, x = value_9_cast_fp16)[name = tensor<string, []>("op_584_cast_fp16")]; |
|
tensor<bool, []> attn_9_transpose_x_0 = const()[name = tensor<string, []>("attn_9_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_9_transpose_y_0 = const()[name = tensor<string, []>("attn_9_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_584_cast_fp16, y = var_582_cast_fp16)[name = tensor<string, []>("attn_9_cast_fp16")]; |
|
tensor<int32, [4]> var_587 = const()[name = tensor<string, []>("op_587"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_21_cast_fp16 = reshape(shape = var_587, x = attn_9_cast_fp16)[name = tensor<string, []>("input_21_cast_fp16")]; |
|
tensor<string, []> obj_35_pad_type_0 = const()[name = tensor<string, []>("obj_35_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_35_strides_0 = const()[name = tensor<string, []>("obj_35_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_35_pad_0 = const()[name = tensor<string, []>("obj_35_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_35_dilations_0 = const()[name = tensor<string, []>("obj_35_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_35_groups_0 = const()[name = tensor<string, []>("obj_35_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50532096)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50827072)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_35_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = obj_35_dilations_0, groups = obj_35_groups_0, pad = obj_35_pad_0, pad_type = obj_35_pad_type_0, strides = obj_35_strides_0, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_21_cast_fp16)[name = tensor<string, []>("obj_35_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_35_cast_fp16)[name = tensor<string, []>("inputs_15_cast_fp16")]; |
|
tensor<int32, [1]> out_15_axes_0 = const()[name = tensor<string, []>("out_15_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_609_to_fp16 = const()[name = tensor<string, []>("op_609_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_15_cast_fp16 = layer_norm(axes = out_15_axes_0, epsilon = var_609_to_fp16, x = inputs_15_cast_fp16)[name = tensor<string, []>("out_15_cast_fp16")]; |
|
tensor<fp16, [384]> obj_37_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_37_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50827904)))]; |
|
tensor<fp16, [384]> obj_37_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_37_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50828736)))]; |
|
tensor<fp16, []> obj_37_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_37_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_37_cast_fp16 = batch_norm(beta = obj_37_beta_0_to_fp16, epsilon = obj_37_epsilon_0_to_fp16, gamma = obj_37_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_15_cast_fp16)[name = tensor<string, []>("obj_37_cast_fp16")]; |
|
tensor<string, []> query_11_pad_type_0 = const()[name = tensor<string, []>("query_11_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_11_strides_0 = const()[name = tensor<string, []>("query_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_11_pad_0 = const()[name = tensor<string, []>("query_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_11_dilations_0 = const()[name = tensor<string, []>("query_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_11_groups_0 = const()[name = tensor<string, []>("query_11_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50829568)))]; |
|
tensor<fp16, [384]> layers_2_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51124544)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_11_cast_fp16 = conv(bias = layers_2_encoder_attn_q_proj_bias_to_fp16, dilations = query_11_dilations_0, groups = query_11_groups_0, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = query_11_strides_0, weight = layers_2_encoder_attn_q_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor<string, []>("query_11_cast_fp16")]; |
|
tensor<string, []> key_11_pad_type_0 = const()[name = tensor<string, []>("key_11_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_11_strides_0 = const()[name = tensor<string, []>("key_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_11_pad_0 = const()[name = tensor<string, []>("key_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_11_dilations_0 = const()[name = tensor<string, []>("key_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_11_groups_0 = const()[name = tensor<string, []>("key_11_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51125376)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> key_11_cast_fp16 = conv(dilations = key_11_dilations_0, groups = key_11_groups_0, pad = key_11_pad_0, pad_type = key_11_pad_type_0, strides = key_11_strides_0, weight = layers_2_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_11_cast_fp16")]; |
|
tensor<string, []> value_11_pad_type_0 = const()[name = tensor<string, []>("value_11_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_11_strides_0 = const()[name = tensor<string, []>("value_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_11_pad_0 = const()[name = tensor<string, []>("value_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_11_dilations_0 = const()[name = tensor<string, []>("value_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_11_groups_0 = const()[name = tensor<string, []>("value_11_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51420352)))]; |
|
tensor<fp16, [384]> layers_2_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51715328)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> value_11_cast_fp16 = conv(bias = layers_2_encoder_attn_v_proj_bias_to_fp16, dilations = value_11_dilations_0, groups = value_11_groups_0, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = value_11_strides_0, weight = layers_2_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_11_cast_fp16")]; |
|
tensor<int32, [4]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_11_cast_fp16 = reshape(shape = var_644, x = query_11_cast_fp16)[name = tensor<string, []>("mh_q_11_cast_fp16")]; |
|
tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_647_cast_fp16 = mul(x = mh_q_11_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("op_647_cast_fp16")]; |
|
tensor<int32, [4]> var_648 = const()[name = tensor<string, []>("op_648"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_649_cast_fp16 = reshape(shape = var_648, x = key_11_cast_fp16)[name = tensor<string, []>("op_649_cast_fp16")]; |
|
tensor<bool, []> mh_w_17_transpose_x_0 = const()[name = tensor<string, []>("mh_w_17_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_17_transpose_y_0 = const()[name = tensor<string, []>("mh_w_17_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 1500]> mh_w_17_cast_fp16 = matmul(transpose_x = mh_w_17_transpose_x_0, transpose_y = mh_w_17_transpose_y_0, x = var_647_cast_fp16, y = var_649_cast_fp16)[name = tensor<string, []>("mh_w_17_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1500]> obj_41_cast_fp16 = softmax(axis = var_496, x = mh_w_17_cast_fp16)[name = tensor<string, []>("obj_41_cast_fp16")]; |
|
tensor<int32, [4]> var_653 = const()[name = tensor<string, []>("op_653"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_654_cast_fp16 = reshape(shape = var_653, x = value_11_cast_fp16)[name = tensor<string, []>("op_654_cast_fp16")]; |
|
tensor<bool, []> attn_11_transpose_x_0 = const()[name = tensor<string, []>("attn_11_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_11_transpose_y_0 = const()[name = tensor<string, []>("attn_11_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_11_cast_fp16 = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_654_cast_fp16, y = obj_41_cast_fp16)[name = tensor<string, []>("attn_11_cast_fp16")]; |
|
tensor<int32, [4]> var_657 = const()[name = tensor<string, []>("op_657"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_23_cast_fp16 = reshape(shape = var_657, x = attn_11_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")]; |
|
tensor<string, []> obj_39_pad_type_0 = const()[name = tensor<string, []>("obj_39_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_39_strides_0 = const()[name = tensor<string, []>("obj_39_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_39_pad_0 = const()[name = tensor<string, []>("obj_39_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_39_dilations_0 = const()[name = tensor<string, []>("obj_39_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_39_groups_0 = const()[name = tensor<string, []>("obj_39_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51716160)))]; |
|
tensor<fp16, [384]> layers_2_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(52011136)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_39_cast_fp16 = conv(bias = layers_2_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_2_encoder_attn_o_proj_weight_to_fp16, x = input_23_cast_fp16)[name = tensor<string, []>("obj_39_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_17_cast_fp16 = add(x = inputs_15_cast_fp16, y = obj_39_cast_fp16)[name = tensor<string, []>("inputs_17_cast_fp16")]; |
|
tensor<int32, [1]> out_17_axes_0 = const()[name = tensor<string, []>("out_17_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_678_to_fp16 = const()[name = tensor<string, []>("op_678_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_17_cast_fp16 = layer_norm(axes = out_17_axes_0, epsilon = var_678_to_fp16, x = inputs_17_cast_fp16)[name = tensor<string, []>("out_17_cast_fp16")]; |
|
tensor<fp16, [384]> input_25_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_25_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(52011968)))]; |
|
tensor<fp16, [384]> input_25_beta_0_to_fp16 = const()[name = tensor<string, []>("input_25_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(52012800)))]; |
|
tensor<fp16, []> input_25_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_25_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_25_cast_fp16 = batch_norm(beta = input_25_beta_0_to_fp16, epsilon = input_25_epsilon_0_to_fp16, gamma = input_25_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_17_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")]; |
|
tensor<string, []> input_27_pad_type_0 = const()[name = tensor<string, []>("input_27_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_27_strides_0 = const()[name = tensor<string, []>("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_27_pad_0 = const()[name = tensor<string, []>("input_27_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_27_dilations_0 = const()[name = tensor<string, []>("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_27_groups_0 = const()[name = tensor<string, []>("input_27_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_2_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(52013632)))]; |
|
tensor<fp16, [1536]> layers_2_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53193344)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_27_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = layers_2_fc1_weight_to_fp16, x = input_25_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")]; |
|
tensor<string, []> input_29_mode_0 = const()[name = tensor<string, []>("input_29_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_29_cast_fp16 = gelu(mode = input_29_mode_0, x = input_27_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")]; |
|
tensor<string, []> hidden_states_7_pad_type_0 = const()[name = tensor<string, []>("hidden_states_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_7_strides_0 = const()[name = tensor<string, []>("hidden_states_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_7_pad_0 = const()[name = tensor<string, []>("hidden_states_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_7_dilations_0 = const()[name = tensor<string, []>("hidden_states_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_7_groups_0 = const()[name = tensor<string, []>("hidden_states_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_2_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53196480)))]; |
|
tensor<fp16, [384]> layers_2_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54376192)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_7_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = hidden_states_7_dilations_0, groups = hidden_states_7_groups_0, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = hidden_states_7_strides_0, weight = layers_2_fc2_weight_to_fp16, x = input_29_cast_fp16)[name = tensor<string, []>("hidden_states_7_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_19_cast_fp16 = add(x = inputs_17_cast_fp16, y = hidden_states_7_cast_fp16)[name = tensor<string, []>("inputs_19_cast_fp16")]; |
|
tensor<int32, []> var_714 = const()[name = tensor<string, []>("op_714"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_19_axes_0 = const()[name = tensor<string, []>("out_19_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_740_to_fp16 = const()[name = tensor<string, []>("op_740_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_19_cast_fp16 = layer_norm(axes = out_19_axes_0, epsilon = var_740_to_fp16, x = inputs_19_cast_fp16)[name = tensor<string, []>("out_19_cast_fp16")]; |
|
tensor<fp16, [384]> obj_43_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_43_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54377024)))]; |
|
tensor<fp16, [384]> obj_43_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_43_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54377856)))]; |
|
tensor<fp16, []> obj_43_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_43_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_43_cast_fp16 = batch_norm(beta = obj_43_beta_0_to_fp16, epsilon = obj_43_epsilon_0_to_fp16, gamma = obj_43_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_19_cast_fp16)[name = tensor<string, []>("obj_43_cast_fp16")]; |
|
tensor<string, []> query_13_pad_type_0 = const()[name = tensor<string, []>("query_13_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_13_strides_0 = const()[name = tensor<string, []>("query_13_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_13_pad_0 = const()[name = tensor<string, []>("query_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_13_dilations_0 = const()[name = tensor<string, []>("query_13_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_13_groups_0 = const()[name = tensor<string, []>("query_13_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54378688)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54673664)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_13_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = query_13_dilations_0, groups = query_13_groups_0, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = query_13_strides_0, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor<string, []>("query_13_cast_fp16")]; |
|
tensor<string, []> current_key_pad_type_0 = const()[name = tensor<string, []>("current_key_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_strides_0 = const()[name = tensor<string, []>("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_pad_0 = const()[name = tensor<string, []>("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_dilations_0 = const()[name = tensor<string, []>("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_groups_0 = const()[name = tensor<string, []>("current_key_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54674496)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor<string, []>("current_key_cast_fp16")]; |
|
tensor<string, []> current_value_pad_type_0 = const()[name = tensor<string, []>("current_value_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_strides_0 = const()[name = tensor<string, []>("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_pad_0 = const()[name = tensor<string, []>("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_dilations_0 = const()[name = tensor<string, []>("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_groups_0 = const()[name = tensor<string, []>("current_value_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54969472)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55264448)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor<string, []>("current_value_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_778_cast_fp16 = mul(x = current_key_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_778_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_780_cast_fp16 = mul(x = var_47_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_780_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_13_cast_fp16 = add(x = var_778_cast_fp16, y = var_780_cast_fp16)[name = tensor<string, []>("key_13_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_782_cast_fp16 = mul(x = current_value_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_782_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_784_cast_fp16 = mul(x = var_54_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_784_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_13_cast_fp16 = add(x = var_782_cast_fp16, y = var_784_cast_fp16)[name = tensor<string, []>("value_13_cast_fp16")]; |
|
tensor<int32, [4]> var_787 = const()[name = tensor<string, []>("op_787"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_13_cast_fp16 = reshape(shape = var_787, x = query_13_cast_fp16)[name = tensor<string, []>("mh_q_13_cast_fp16")]; |
|
tensor<fp16, []> var_789_to_fp16 = const()[name = tensor<string, []>("op_789_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_790_cast_fp16 = mul(x = mh_q_13_cast_fp16, y = var_789_to_fp16)[name = tensor<string, []>("op_790_cast_fp16")]; |
|
tensor<int32, [4]> var_791 = const()[name = tensor<string, []>("op_791"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_792_cast_fp16 = reshape(shape = var_791, x = key_13_cast_fp16)[name = tensor<string, []>("op_792_cast_fp16")]; |
|
tensor<bool, []> mh_w_19_transpose_x_0 = const()[name = tensor<string, []>("mh_w_19_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_19_transpose_y_0 = const()[name = tensor<string, []>("mh_w_19_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_19_cast_fp16 = matmul(transpose_x = mh_w_19_transpose_x_0, transpose_y = mh_w_19_transpose_y_0, x = var_790_cast_fp16, y = var_792_cast_fp16)[name = tensor<string, []>("mh_w_19_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_21_cast_fp16 = add(x = mh_w_19_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_21_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_800_cast_fp16 = softmax(axis = var_714, x = mh_w_21_cast_fp16)[name = tensor<string, []>("op_800_cast_fp16")]; |
|
tensor<int32, [4]> var_801 = const()[name = tensor<string, []>("op_801"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_802_cast_fp16 = reshape(shape = var_801, x = value_13_cast_fp16)[name = tensor<string, []>("op_802_cast_fp16")]; |
|
tensor<bool, []> attn_13_transpose_x_0 = const()[name = tensor<string, []>("attn_13_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_13_transpose_y_0 = const()[name = tensor<string, []>("attn_13_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_802_cast_fp16, y = var_800_cast_fp16)[name = tensor<string, []>("attn_13_cast_fp16")]; |
|
tensor<int32, [4]> var_805 = const()[name = tensor<string, []>("op_805"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_31_cast_fp16 = reshape(shape = var_805, x = attn_13_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")]; |
|
tensor<string, []> obj_49_pad_type_0 = const()[name = tensor<string, []>("obj_49_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_49_strides_0 = const()[name = tensor<string, []>("obj_49_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_49_pad_0 = const()[name = tensor<string, []>("obj_49_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_49_dilations_0 = const()[name = tensor<string, []>("obj_49_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_49_groups_0 = const()[name = tensor<string, []>("obj_49_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55265280)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55560256)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_49_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = obj_49_dilations_0, groups = obj_49_groups_0, pad = obj_49_pad_0, pad_type = obj_49_pad_type_0, strides = obj_49_strides_0, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_31_cast_fp16)[name = tensor<string, []>("obj_49_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_21_cast_fp16 = add(x = inputs_19_cast_fp16, y = obj_49_cast_fp16)[name = tensor<string, []>("inputs_21_cast_fp16")]; |
|
tensor<int32, [1]> out_21_axes_0 = const()[name = tensor<string, []>("out_21_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_827_to_fp16 = const()[name = tensor<string, []>("op_827_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_21_cast_fp16 = layer_norm(axes = out_21_axes_0, epsilon = var_827_to_fp16, x = inputs_21_cast_fp16)[name = tensor<string, []>("out_21_cast_fp16")]; |
|
tensor<fp16, [384]> obj_51_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_51_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55561088)))]; |
|
tensor<fp16, [384]> obj_51_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_51_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55561920)))]; |
|
tensor<fp16, []> obj_51_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_51_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_51_cast_fp16 = batch_norm(beta = obj_51_beta_0_to_fp16, epsilon = obj_51_epsilon_0_to_fp16, gamma = obj_51_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_21_cast_fp16)[name = tensor<string, []>("obj_51_cast_fp16")]; |
|
tensor<string, []> query_pad_type_0 = const()[name = tensor<string, []>("query_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_strides_0 = const()[name = tensor<string, []>("query_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_pad_0 = const()[name = tensor<string, []>("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_dilations_0 = const()[name = tensor<string, []>("query_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_groups_0 = const()[name = tensor<string, []>("query_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55562752)))]; |
|
tensor<fp16, [384]> layers_3_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55857728)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_cast_fp16 = conv(bias = layers_3_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_3_encoder_attn_q_proj_weight_to_fp16, x = obj_51_cast_fp16)[name = tensor<string, []>("query_cast_fp16")]; |
|
tensor<string, []> key_pad_type_0 = const()[name = tensor<string, []>("key_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_strides_0 = const()[name = tensor<string, []>("key_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_pad_0 = const()[name = tensor<string, []>("key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_dilations_0 = const()[name = tensor<string, []>("key_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_groups_0 = const()[name = tensor<string, []>("key_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55858560)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> key_cast_fp16 = conv(dilations = key_dilations_0, groups = key_groups_0, pad = key_pad_0, pad_type = key_pad_type_0, strides = key_strides_0, weight = layers_3_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_cast_fp16")]; |
|
tensor<string, []> value_pad_type_0 = const()[name = tensor<string, []>("value_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_strides_0 = const()[name = tensor<string, []>("value_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_pad_0 = const()[name = tensor<string, []>("value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_dilations_0 = const()[name = tensor<string, []>("value_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_groups_0 = const()[name = tensor<string, []>("value_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56153536)))]; |
|
tensor<fp16, [384]> layers_3_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56448512)))]; |
|
tensor<fp16, [1, 384, 1, 1500]> value_cast_fp16 = conv(bias = layers_3_encoder_attn_v_proj_bias_to_fp16, dilations = value_dilations_0, groups = value_groups_0, pad = value_pad_0, pad_type = value_pad_type_0, strides = value_strides_0, weight = layers_3_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_cast_fp16")]; |
|
tensor<int32, [4]> var_862 = const()[name = tensor<string, []>("op_862"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_862, x = query_cast_fp16)[name = tensor<string, []>("mh_q_cast_fp16")]; |
|
tensor<fp16, []> var_864_to_fp16 = const()[name = tensor<string, []>("op_864_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_865_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_864_to_fp16)[name = tensor<string, []>("op_865_cast_fp16")]; |
|
tensor<int32, [4]> var_866 = const()[name = tensor<string, []>("op_866"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_867_cast_fp16 = reshape(shape = var_866, x = key_cast_fp16)[name = tensor<string, []>("op_867_cast_fp16")]; |
|
tensor<bool, []> mh_w_transpose_x_0 = const()[name = tensor<string, []>("mh_w_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_transpose_y_0 = const()[name = tensor<string, []>("mh_w_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 6, 1, 1500]> mh_w_cast_fp16 = matmul(transpose_x = mh_w_transpose_x_0, transpose_y = mh_w_transpose_y_0, x = var_865_cast_fp16, y = var_867_cast_fp16)[name = tensor<string, []>("mh_w_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1500]> obj_55_cast_fp16 = softmax(axis = var_714, x = mh_w_cast_fp16)[name = tensor<string, []>("obj_55_cast_fp16")]; |
|
tensor<int32, [4]> var_871 = const()[name = tensor<string, []>("op_871"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1500]> var_872_cast_fp16 = reshape(shape = var_871, x = value_cast_fp16)[name = tensor<string, []>("op_872_cast_fp16")]; |
|
tensor<bool, []> attn_transpose_x_0 = const()[name = tensor<string, []>("attn_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_transpose_y_0 = const()[name = tensor<string, []>("attn_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_872_cast_fp16, y = obj_55_cast_fp16)[name = tensor<string, []>("attn_cast_fp16")]; |
|
tensor<int32, [4]> var_875 = const()[name = tensor<string, []>("op_875"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_33_cast_fp16 = reshape(shape = var_875, x = attn_cast_fp16)[name = tensor<string, []>("input_33_cast_fp16")]; |
|
tensor<string, []> obj_53_pad_type_0 = const()[name = tensor<string, []>("obj_53_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_53_strides_0 = const()[name = tensor<string, []>("obj_53_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_53_pad_0 = const()[name = tensor<string, []>("obj_53_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_53_dilations_0 = const()[name = tensor<string, []>("obj_53_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_53_groups_0 = const()[name = tensor<string, []>("obj_53_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56449344)))]; |
|
tensor<fp16, [384]> layers_3_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56744320)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_53_cast_fp16 = conv(bias = layers_3_encoder_attn_o_proj_bias_to_fp16, dilations = obj_53_dilations_0, groups = obj_53_groups_0, pad = obj_53_pad_0, pad_type = obj_53_pad_type_0, strides = obj_53_strides_0, weight = layers_3_encoder_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = tensor<string, []>("obj_53_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_23_cast_fp16 = add(x = inputs_21_cast_fp16, y = obj_53_cast_fp16)[name = tensor<string, []>("inputs_23_cast_fp16")]; |
|
tensor<int32, [1]> out_23_axes_0 = const()[name = tensor<string, []>("out_23_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_896_to_fp16 = const()[name = tensor<string, []>("op_896_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_23_cast_fp16 = layer_norm(axes = out_23_axes_0, epsilon = var_896_to_fp16, x = inputs_23_cast_fp16)[name = tensor<string, []>("out_23_cast_fp16")]; |
|
tensor<fp16, [384]> input_35_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_35_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56745152)))]; |
|
tensor<fp16, [384]> input_35_beta_0_to_fp16 = const()[name = tensor<string, []>("input_35_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56745984)))]; |
|
tensor<fp16, []> input_35_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_35_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_35_cast_fp16 = batch_norm(beta = input_35_beta_0_to_fp16, epsilon = input_35_epsilon_0_to_fp16, gamma = input_35_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_23_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")]; |
|
tensor<string, []> input_37_pad_type_0 = const()[name = tensor<string, []>("input_37_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_37_strides_0 = const()[name = tensor<string, []>("input_37_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_37_pad_0 = const()[name = tensor<string, []>("input_37_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_37_dilations_0 = const()[name = tensor<string, []>("input_37_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_37_groups_0 = const()[name = tensor<string, []>("input_37_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_3_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56746816)))]; |
|
tensor<fp16, [1536]> layers_3_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(57926528)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_37_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = input_37_dilations_0, groups = input_37_groups_0, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = input_37_strides_0, weight = layers_3_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")]; |
|
tensor<string, []> input_mode_0 = const()[name = tensor<string, []>("input_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_37_cast_fp16)[name = tensor<string, []>("input_cast_fp16")]; |
|
tensor<string, []> hidden_states_9_pad_type_0 = const()[name = tensor<string, []>("hidden_states_9_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_9_strides_0 = const()[name = tensor<string, []>("hidden_states_9_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_9_pad_0 = const()[name = tensor<string, []>("hidden_states_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_9_dilations_0 = const()[name = tensor<string, []>("hidden_states_9_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_9_groups_0 = const()[name = tensor<string, []>("hidden_states_9_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_3_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(57929664)))]; |
|
tensor<fp16, [384]> layers_3_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(59109376)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_9_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = hidden_states_9_dilations_0, groups = hidden_states_9_groups_0, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = hidden_states_9_strides_0, weight = layers_3_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("hidden_states_9_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_cast_fp16 = add(x = inputs_23_cast_fp16, y = hidden_states_9_cast_fp16)[name = tensor<string, []>("inputs_cast_fp16")]; |
|
tensor<int32, [1]> out_axes_0 = const()[name = tensor<string, []>("out_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_939_to_fp16 = const()[name = tensor<string, []>("op_939_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_939_to_fp16, x = inputs_cast_fp16)[name = tensor<string, []>("out_cast_fp16")]; |
|
tensor<fp16, [384]> hidden_states_gamma_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(59110208)))]; |
|
tensor<fp16, [384]> hidden_states_beta_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(59111040)))]; |
|
tensor<fp16, []> hidden_states_epsilon_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_cast_fp16)[name = tensor<string, []>("hidden_states_cast_fp16")]; |
|
tensor<int32, [1]> var_950_axes_0 = const()[name = tensor<string, []>("op_950_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 384, 1]> var_950_cast_fp16 = squeeze(axes = var_950_axes_0, x = hidden_states_cast_fp16)[name = tensor<string, []>("op_950_cast_fp16")]; |
|
tensor<int32, [3]> var_953_perm_0 = const()[name = tensor<string, []>("op_953_perm_0"), val = tensor<int32, [3]>([0, 2, 1])]; |
|
tensor<fp16, [51864]> linear_0_bias_0_to_fp16 = const()[name = tensor<string, []>("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(59111872)))]; |
|
tensor<fp16, [1, 1, 384]> var_953_cast_fp16 = transpose(perm = var_953_perm_0, x = var_950_cast_fp16)[name = tensor<string, []>("transpose_0")]; |
|
tensor<fp16, [1, 1, 51864]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_953_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")]; |
|
tensor<int32, []> var_957 = const()[name = tensor<string, []>("op_957"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> obj_59_interleave_0 = const()[name = tensor<string, []>("obj_59_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 1536, 1, 1]> key_cache_updates = concat(axis = var_957, interleave = obj_59_interleave_0, values = (current_key_1_cast_fp16, current_key_3_cast_fp16, current_key_5_cast_fp16, current_key_cast_fp16))[name = tensor<string, []>("obj_59_cast_fp16")]; |
|
tensor<int32, []> var_960 = const()[name = tensor<string, []>("op_960"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> obj_61_interleave_0 = const()[name = tensor<string, []>("obj_61_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 1536, 1, 1]> value_cache_updates = concat(axis = var_960, interleave = obj_61_interleave_0, values = (current_value_1_cast_fp16, current_value_3_cast_fp16, current_value_5_cast_fp16, current_value_cast_fp16))[name = tensor<string, []>("obj_61_cast_fp16")]; |
|
tensor<int32, [4]> var_971_begin_0 = const()[name = tensor<string, []>("op_971_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_971_end_0 = const()[name = tensor<string, []>("op_971_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_971_end_mask_0 = const()[name = tensor<string, []>("op_971_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_971_cast_fp16 = slice_by_index(begin = var_971_begin_0, end = var_971_end_0, end_mask = var_971_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_971_cast_fp16")]; |
|
tensor<int32, [4]> var_974_begin_0 = const()[name = tensor<string, []>("op_974_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_974_end_0 = const()[name = tensor<string, []>("op_974_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_974_end_mask_0 = const()[name = tensor<string, []>("op_974_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_974_squeeze_mask_0 = const()[name = tensor<string, []>("op_974_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_974_cast_fp16 = slice_by_index(begin = var_974_begin_0, end = var_974_end_0, end_mask = var_974_end_mask_0, squeeze_mask = var_974_squeeze_mask_0, x = var_971_cast_fp16)[name = tensor<string, []>("op_974_cast_fp16")]; |
|
tensor<int32, [4]> var_989_begin_0 = const()[name = tensor<string, []>("op_989_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_989_end_0 = const()[name = tensor<string, []>("op_989_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_989_end_mask_0 = const()[name = tensor<string, []>("op_989_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_989_cast_fp16 = slice_by_index(begin = var_989_begin_0, end = var_989_end_0, end_mask = var_989_end_mask_0, x = obj_41_cast_fp16)[name = tensor<string, []>("op_989_cast_fp16")]; |
|
tensor<int32, [4]> var_992_begin_0 = const()[name = tensor<string, []>("op_992_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_992_end_0 = const()[name = tensor<string, []>("op_992_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_992_end_mask_0 = const()[name = tensor<string, []>("op_992_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_992_squeeze_mask_0 = const()[name = tensor<string, []>("op_992_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_992_cast_fp16 = slice_by_index(begin = var_992_begin_0, end = var_992_end_0, end_mask = var_992_end_mask_0, squeeze_mask = var_992_squeeze_mask_0, x = var_989_cast_fp16)[name = tensor<string, []>("op_992_cast_fp16")]; |
|
tensor<int32, [4]> var_1007_begin_0 = const()[name = tensor<string, []>("op_1007_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])]; |
|
tensor<int32, [4]> var_1007_end_0 = const()[name = tensor<string, []>("op_1007_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1500])]; |
|
tensor<bool, [4]> var_1007_end_mask_0 = const()[name = tensor<string, []>("op_1007_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1007_cast_fp16 = slice_by_index(begin = var_1007_begin_0, end = var_1007_end_0, end_mask = var_1007_end_mask_0, x = obj_41_cast_fp16)[name = tensor<string, []>("op_1007_cast_fp16")]; |
|
tensor<int32, [4]> var_1010_begin_0 = const()[name = tensor<string, []>("op_1010_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1010_end_0 = const()[name = tensor<string, []>("op_1010_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1010_end_mask_0 = const()[name = tensor<string, []>("op_1010_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1010_squeeze_mask_0 = const()[name = tensor<string, []>("op_1010_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1010_cast_fp16 = slice_by_index(begin = var_1010_begin_0, end = var_1010_end_0, end_mask = var_1010_end_mask_0, squeeze_mask = var_1010_squeeze_mask_0, x = var_1007_cast_fp16)[name = tensor<string, []>("op_1010_cast_fp16")]; |
|
tensor<int32, [4]> var_1025_begin_0 = const()[name = tensor<string, []>("op_1025_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1025_end_0 = const()[name = tensor<string, []>("op_1025_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1025_end_mask_0 = const()[name = tensor<string, []>("op_1025_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1025_cast_fp16 = slice_by_index(begin = var_1025_begin_0, end = var_1025_end_0, end_mask = var_1025_end_mask_0, x = obj_55_cast_fp16)[name = tensor<string, []>("op_1025_cast_fp16")]; |
|
tensor<int32, [4]> var_1028_begin_0 = const()[name = tensor<string, []>("op_1028_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1028_end_0 = const()[name = tensor<string, []>("op_1028_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1028_end_mask_0 = const()[name = tensor<string, []>("op_1028_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1028_squeeze_mask_0 = const()[name = tensor<string, []>("op_1028_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1028_cast_fp16 = slice_by_index(begin = var_1028_begin_0, end = var_1028_end_0, end_mask = var_1028_end_mask_0, squeeze_mask = var_1028_squeeze_mask_0, x = var_1025_cast_fp16)[name = tensor<string, []>("op_1028_cast_fp16")]; |
|
tensor<int32, [4]> var_1043_begin_0 = const()[name = tensor<string, []>("op_1043_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])]; |
|
tensor<int32, [4]> var_1043_end_0 = const()[name = tensor<string, []>("op_1043_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1500])]; |
|
tensor<bool, [4]> var_1043_end_mask_0 = const()[name = tensor<string, []>("op_1043_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1043_cast_fp16 = slice_by_index(begin = var_1043_begin_0, end = var_1043_end_0, end_mask = var_1043_end_mask_0, x = obj_55_cast_fp16)[name = tensor<string, []>("op_1043_cast_fp16")]; |
|
tensor<int32, [4]> var_1046_begin_0 = const()[name = tensor<string, []>("op_1046_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1046_end_0 = const()[name = tensor<string, []>("op_1046_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1046_end_mask_0 = const()[name = tensor<string, []>("op_1046_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1046_squeeze_mask_0 = const()[name = tensor<string, []>("op_1046_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1046_cast_fp16 = slice_by_index(begin = var_1046_begin_0, end = var_1046_end_0, end_mask = var_1046_end_mask_0, squeeze_mask = var_1046_squeeze_mask_0, x = var_1043_cast_fp16)[name = tensor<string, []>("op_1046_cast_fp16")]; |
|
tensor<int32, [4]> var_1061_begin_0 = const()[name = tensor<string, []>("op_1061_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])]; |
|
tensor<int32, [4]> var_1061_end_0 = const()[name = tensor<string, []>("op_1061_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1500])]; |
|
tensor<bool, [4]> var_1061_end_mask_0 = const()[name = tensor<string, []>("op_1061_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1061_cast_fp16 = slice_by_index(begin = var_1061_begin_0, end = var_1061_end_0, end_mask = var_1061_end_mask_0, x = obj_55_cast_fp16)[name = tensor<string, []>("op_1061_cast_fp16")]; |
|
tensor<int32, [4]> var_1064_begin_0 = const()[name = tensor<string, []>("op_1064_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1064_end_0 = const()[name = tensor<string, []>("op_1064_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1064_end_mask_0 = const()[name = tensor<string, []>("op_1064_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1064_squeeze_mask_0 = const()[name = tensor<string, []>("op_1064_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1064_cast_fp16 = slice_by_index(begin = var_1064_begin_0, end = var_1064_end_0, end_mask = var_1064_end_mask_0, squeeze_mask = var_1064_squeeze_mask_0, x = var_1061_cast_fp16)[name = tensor<string, []>("op_1064_cast_fp16")]; |
|
tensor<int32, [4]> var_1079_begin_0 = const()[name = tensor<string, []>("op_1079_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])]; |
|
tensor<int32, [4]> var_1079_end_0 = const()[name = tensor<string, []>("op_1079_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1500])]; |
|
tensor<bool, [4]> var_1079_end_mask_0 = const()[name = tensor<string, []>("op_1079_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1079_cast_fp16 = slice_by_index(begin = var_1079_begin_0, end = var_1079_end_0, end_mask = var_1079_end_mask_0, x = obj_55_cast_fp16)[name = tensor<string, []>("op_1079_cast_fp16")]; |
|
tensor<int32, [4]> var_1082_begin_0 = const()[name = tensor<string, []>("op_1082_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1082_end_0 = const()[name = tensor<string, []>("op_1082_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1082_end_mask_0 = const()[name = tensor<string, []>("op_1082_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1082_squeeze_mask_0 = const()[name = tensor<string, []>("op_1082_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1082_cast_fp16 = slice_by_index(begin = var_1082_begin_0, end = var_1082_end_0, end_mask = var_1082_end_mask_0, squeeze_mask = var_1082_squeeze_mask_0, x = var_1079_cast_fp16)[name = tensor<string, []>("op_1082_cast_fp16")]; |
|
tensor<int32, [4]> var_1097_begin_0 = const()[name = tensor<string, []>("op_1097_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])]; |
|
tensor<int32, [4]> var_1097_end_0 = const()[name = tensor<string, []>("op_1097_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1500])]; |
|
tensor<bool, [4]> var_1097_end_mask_0 = const()[name = tensor<string, []>("op_1097_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_1097_cast_fp16 = slice_by_index(begin = var_1097_begin_0, end = var_1097_end_0, end_mask = var_1097_end_mask_0, x = obj_55_cast_fp16)[name = tensor<string, []>("op_1097_cast_fp16")]; |
|
tensor<int32, [4]> var_1100_begin_0 = const()[name = tensor<string, []>("op_1100_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1100_end_0 = const()[name = tensor<string, []>("op_1100_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_1100_end_mask_0 = const()[name = tensor<string, []>("op_1100_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1100_squeeze_mask_0 = const()[name = tensor<string, []>("op_1100_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_1100_cast_fp16 = slice_by_index(begin = var_1100_begin_0, end = var_1100_end_0, end_mask = var_1100_end_mask_0, squeeze_mask = var_1100_squeeze_mask_0, x = var_1097_cast_fp16)[name = tensor<string, []>("op_1100_cast_fp16")]; |
|
tensor<int32, []> var_1107 = const()[name = tensor<string, []>("op_1107"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> var_1108_interleave_0 = const()[name = tensor<string, []>("op_1108_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 8, 1500]> var_1108_cast_fp16 = concat(axis = var_1107, interleave = var_1108_interleave_0, values = (var_974_cast_fp16, var_992_cast_fp16, var_1010_cast_fp16, var_1028_cast_fp16, var_1046_cast_fp16, var_1064_cast_fp16, var_1082_cast_fp16, var_1100_cast_fp16))[name = tensor<string, []>("op_1108_cast_fp16")]; |
|
tensor<bool, []> var_1111 = const()[name = tensor<string, []>("op_1111"), val = tensor<bool, []>(false)]; |
|
tensor<int32, [1]> obj_axes_0 = const()[name = tensor<string, []>("obj_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1500]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_1111, x = var_1108_cast_fp16)[name = tensor<string, []>("obj_cast_fp16")]; |
|
} -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights); |
|
} |