#include "llm/shl_llm.h"
struct shl_llm_model *load_shl_model(void *base)
{
    struct shl_llm_model *model = shl_mem_alloc(sizeof(struct shl_llm_model));
    struct csinn_tensor *t0 = csinn_alloc_tensor(NULL);
    t0->data = base + 0x0;
    t0->dtype = CSINN_DTYPE_FLOAT16;
    t0->dim_count = 2;
    t0->dim[0] = 32000;
    t0->dim[1] = 4096;
    t0->name = "token_embd.weight";
    model->tok_embeddings = t0;
    struct csinn_tensor *t1 = csinn_alloc_tensor(NULL);
    t1->data = base + 0xfa00000;
    t1->dtype = CSINN_DTYPE_FLOAT32;
    t1->dim_count = 1;
    t1->dim[0] = 4096;
    t1->name = "output_norm.weight";
    model->output_norm = t1;
    struct csinn_tensor *t2 = csinn_alloc_tensor(NULL);
    t2->data = base + 0xfa04000;
    t2->dtype = CSINN_DTYPE_FLOAT16;
    t2->dim_count = 2;
    t2->dim[0] = 32000;
    t2->dim[1] = 4096;
    t2->name = "output.weight";
    model->output = t2;
    struct csinn_tensor *t3 = csinn_alloc_tensor(NULL);
    t3->data = base + 0x1f404000;
    t3->dtype = CSINN_DTYPE_FLOAT16;
    t3->dim_count = 2;
    t3->dim[0] = 4096;
    t3->dim[1] = 4096;
    t3->name = "blk.0.attn_q.weight";
    model->layers[0].wq = t3;
    struct csinn_tensor *t4 = csinn_alloc_tensor(NULL);
    t4->data = base + 0x21404000;
    t4->dtype = CSINN_DTYPE_FLOAT16;
    t4->dim_count = 2;
    t4->dim[0] = 4096;
    t4->dim[1] = 4096;
    t4->name = "blk.0.attn_k.weight";
    model->layers[0].wk = t4;
    struct csinn_tensor *t5 = csinn_alloc_tensor(NULL);
    t5->data = base + 0x23404000;
    t5->dtype = CSINN_DTYPE_FLOAT16;
    t5->dim_count = 2;
    t5->dim[0] = 4096;
    t5->dim[1] = 4096;
    t5->name = "blk.0.attn_v.weight";
    model->layers[0].wv = t5;
    struct csinn_tensor *t6 = csinn_alloc_tensor(NULL);
    t6->data = base + 0x25404000;
    t6->dtype = CSINN_DTYPE_FLOAT16;
    t6->dim_count = 2;
    t6->dim[0] = 4096;
    t6->dim[1] = 4096;
    t6->name = "blk.0.attn_output.weight";
    model->layers[0].wo = t6;
    struct csinn_tensor *t7 = csinn_alloc_tensor(NULL);
    t7->data = base + 0x27404000;
    t7->dtype = CSINN_DTYPE_FLOAT16;
    t7->dim_count = 2;
    t7->dim[0] = 11008;
    t7->dim[1] = 4096;
    t7->name = "blk.0.ffn_gate.weight";
    model->layers[0].w1 = t7;
    struct csinn_tensor *t8 = csinn_alloc_tensor(NULL);
    t8->data = base + 0x2ca04000;
    t8->dtype = CSINN_DTYPE_FLOAT16;
    t8->dim_count = 2;
    t8->dim[0] = 4096;
    t8->dim[1] = 11008;
    t8->name = "blk.0.ffn_down.weight";
    model->layers[0].w2 = t8;
    struct csinn_tensor *t9 = csinn_alloc_tensor(NULL);
    t9->data = base + 0x32004000;
    t9->dtype = CSINN_DTYPE_FLOAT16;
    t9->dim_count = 2;
    t9->dim[0] = 11008;
    t9->dim[1] = 4096;
    t9->name = "blk.0.ffn_up.weight";
    model->layers[0].w3 = t9;
    struct csinn_tensor *t10 = csinn_alloc_tensor(NULL);
    t10->data = base + 0x37604000;
    t10->dtype = CSINN_DTYPE_FLOAT32;
    t10->dim_count = 1;
    t10->dim[0] = 4096;
    t10->name = "blk.0.attn_norm.weight";
    model->layers[0].attn_norm = t10;
    struct csinn_tensor *t11 = csinn_alloc_tensor(NULL);
    t11->data = base + 0x37608000;
    t11->dtype = CSINN_DTYPE_FLOAT32;
    t11->dim_count = 1;
    t11->dim[0] = 4096;
    t11->name = "blk.0.ffn_norm.weight";
    model->layers[0].ffn_norm = t11;
    struct csinn_tensor *t12 = csinn_alloc_tensor(NULL);
    t12->data = base + 0x3760c000;
    t12->dtype = CSINN_DTYPE_FLOAT16;
    t12->dim_count = 2;
    t12->dim[0] = 4096;
    t12->dim[1] = 4096;
    t12->name = "blk.1.attn_q.weight";
    model->layers[1].wq = t12;
    struct csinn_tensor *t13 = csinn_alloc_tensor(NULL);
    t13->data = base + 0x3960c000;
    t13->dtype = CSINN_DTYPE_FLOAT16;
    t13->dim_count = 2;
    t13->dim[0] = 4096;
    t13->dim[1] = 4096;
    t13->name = "blk.1.attn_k.weight";
    model->layers[1].wk = t13;
    struct csinn_tensor *t14 = csinn_alloc_tensor(NULL);
    t14->data = base + 0x3b60c000;
    t14->dtype = CSINN_DTYPE_FLOAT16;
    t14->dim_count = 2;
    t14->dim[0] = 4096;
    t14->dim[1] = 4096;
    t14->name = "blk.1.attn_v.weight";
    model->layers[1].wv = t14;
    struct csinn_tensor *t15 = csinn_alloc_tensor(NULL);
    t15->data = base + 0x3d60c000;
    t15->dtype = CSINN_DTYPE_FLOAT16;
    t15->dim_count = 2;
    t15->dim[0] = 4096;
    t15->dim[1] = 4096;
    t15->name = "blk.1.attn_output.weight";
    model->layers[1].wo = t15;
    struct csinn_tensor *t16 = csinn_alloc_tensor(NULL);
    t16->data = base + 0x3f60c000;
    t16->dtype = CSINN_DTYPE_FLOAT16;
    t16->dim_count = 2;
    t16->dim[0] = 11008;
    t16->dim[1] = 4096;
    t16->name = "blk.1.ffn_gate.weight";
    model->layers[1].w1 = t16;
    struct csinn_tensor *t17 = csinn_alloc_tensor(NULL);
    t17->data = base + 0x44c0c000;
    t17->dtype = CSINN_DTYPE_FLOAT16;
    t17->dim_count = 2;
    t17->dim[0] = 4096;
    t17->dim[1] = 11008;
    t17->name = "blk.1.ffn_down.weight";
    model->layers[1].w2 = t17;
    struct csinn_tensor *t18 = csinn_alloc_tensor(NULL);
    t18->data = base + 0x4a20c000;
    t18->dtype = CSINN_DTYPE_FLOAT16;
    t18->dim_count = 2;
    t18->dim[0] = 11008;
    t18->dim[1] = 4096;
    t18->name = "blk.1.ffn_up.weight";
    model->layers[1].w3 = t18;
    struct csinn_tensor *t19 = csinn_alloc_tensor(NULL);
    t19->data = base + 0x4f80c000;
    t19->dtype = CSINN_DTYPE_FLOAT32;
    t19->dim_count = 1;
    t19->dim[0] = 4096;
    t19->name = "blk.1.attn_norm.weight";
    model->layers[1].attn_norm = t19;
    struct csinn_tensor *t20 = csinn_alloc_tensor(NULL);
    t20->data = base + 0x4f810000;
    t20->dtype = CSINN_DTYPE_FLOAT32;
    t20->dim_count = 1;
    t20->dim[0] = 4096;
    t20->name = "blk.1.ffn_norm.weight";
    model->layers[1].ffn_norm = t20;
    struct csinn_tensor *t21 = csinn_alloc_tensor(NULL);
    t21->data = base + 0x4f814000;
    t21->dtype = CSINN_DTYPE_FLOAT16;
    t21->dim_count = 2;
    t21->dim[0] = 4096;
    t21->dim[1] = 4096;
    t21->name = "blk.2.attn_q.weight";
    model->layers[2].wq = t21;
    struct csinn_tensor *t22 = csinn_alloc_tensor(NULL);
    t22->data = base + 0x51814000;
    t22->dtype = CSINN_DTYPE_FLOAT16;
    t22->dim_count = 2;
    t22->dim[0] = 4096;
    t22->dim[1] = 4096;
    t22->name = "blk.2.attn_k.weight";
    model->layers[2].wk = t22;
    struct csinn_tensor *t23 = csinn_alloc_tensor(NULL);
    t23->data = base + 0x53814000;
    t23->dtype = CSINN_DTYPE_FLOAT16;
    t23->dim_count = 2;
    t23->dim[0] = 4096;
    t23->dim[1] = 4096;
    t23->name = "blk.2.attn_v.weight";
    model->layers[2].wv = t23;
    struct csinn_tensor *t24 = csinn_alloc_tensor(NULL);
    t24->data = base + 0x55814000;
    t24->dtype = CSINN_DTYPE_FLOAT16;
    t24->dim_count = 2;
    t24->dim[0] = 4096;
    t24->dim[1] = 4096;
    t24->name = "blk.2.attn_output.weight";
    model->layers[2].wo = t24;
    struct csinn_tensor *t25 = csinn_alloc_tensor(NULL);
    t25->data = base + 0x57814000;
    t25->dtype = CSINN_DTYPE_FLOAT16;
    t25->dim_count = 2;
    t25->dim[0] = 11008;
    t25->dim[1] = 4096;
    t25->name = "blk.2.ffn_gate.weight";
    model->layers[2].w1 = t25;
    struct csinn_tensor *t26 = csinn_alloc_tensor(NULL);
    t26->data = base + 0x5ce14000;
    t26->dtype = CSINN_DTYPE_FLOAT16;
    t26->dim_count = 2;
    t26->dim[0] = 4096;
    t26->dim[1] = 11008;
    t26->name = "blk.2.ffn_down.weight";
    model->layers[2].w2 = t26;
    struct csinn_tensor *t27 = csinn_alloc_tensor(NULL);
    t27->data = base + 0x62414000;
    t27->dtype = CSINN_DTYPE_FLOAT16;
    t27->dim_count = 2;
    t27->dim[0] = 11008;
    t27->dim[1] = 4096;
    t27->name = "blk.2.ffn_up.weight";
    model->layers[2].w3 = t27;
    struct csinn_tensor *t28 = csinn_alloc_tensor(NULL);
    t28->data = base + 0x67a14000;
    t28->dtype = CSINN_DTYPE_FLOAT32;
    t28->dim_count = 1;
    t28->dim[0] = 4096;
    t28->name = "blk.2.attn_norm.weight";
    model->layers[2].attn_norm = t28;
    struct csinn_tensor *t29 = csinn_alloc_tensor(NULL);
    t29->data = base + 0x67a18000;
    t29->dtype = CSINN_DTYPE_FLOAT32;
    t29->dim_count = 1;
    t29->dim[0] = 4096;
    t29->name = "blk.2.ffn_norm.weight";
    model->layers[2].ffn_norm = t29;
    struct csinn_tensor *t30 = csinn_alloc_tensor(NULL);
    t30->data = base + 0x67a1c000;
    t30->dtype = CSINN_DTYPE_FLOAT16;
    t30->dim_count = 2;
    t30->dim[0] = 4096;
    t30->dim[1] = 4096;
    t30->name = "blk.3.attn_q.weight";
    model->layers[3].wq = t30;
    struct csinn_tensor *t31 = csinn_alloc_tensor(NULL);
    t31->data = base + 0x69a1c000;
    t31->dtype = CSINN_DTYPE_FLOAT16;
    t31->dim_count = 2;
    t31->dim[0] = 4096;
    t31->dim[1] = 4096;
    t31->name = "blk.3.attn_k.weight";
    model->layers[3].wk = t31;
    struct csinn_tensor *t32 = csinn_alloc_tensor(NULL);
    t32->data = base + 0x6ba1c000;
    t32->dtype = CSINN_DTYPE_FLOAT16;
    t32->dim_count = 2;
    t32->dim[0] = 4096;
    t32->dim[1] = 4096;
    t32->name = "blk.3.attn_v.weight";
    model->layers[3].wv = t32;
    struct csinn_tensor *t33 = csinn_alloc_tensor(NULL);
    t33->data = base + 0x6da1c000;
    t33->dtype = CSINN_DTYPE_FLOAT16;
    t33->dim_count = 2;
    t33->dim[0] = 4096;
    t33->dim[1] = 4096;
    t33->name = "blk.3.attn_output.weight";
    model->layers[3].wo = t33;
    struct csinn_tensor *t34 = csinn_alloc_tensor(NULL);
    t34->data = base + 0x6fa1c000;
    t34->dtype = CSINN_DTYPE_FLOAT16;
    t34->dim_count = 2;
    t34->dim[0] = 11008;
    t34->dim[1] = 4096;
    t34->name = "blk.3.ffn_gate.weight";
    model->layers[3].w1 = t34;
    struct csinn_tensor *t35 = csinn_alloc_tensor(NULL);
    t35->data = base + 0x7501c000;
    t35->dtype = CSINN_DTYPE_FLOAT16;
    t35->dim_count = 2;
    t35->dim[0] = 4096;
    t35->dim[1] = 11008;
    t35->name = "blk.3.ffn_down.weight";
    model->layers[3].w2 = t35;
    struct csinn_tensor *t36 = csinn_alloc_tensor(NULL);
    t36->data = base + 0x7a61c000;
    t36->dtype = CSINN_DTYPE_FLOAT16;
    t36->dim_count = 2;
    t36->dim[0] = 11008;
    t36->dim[1] = 4096;
    t36->name = "blk.3.ffn_up.weight";
    model->layers[3].w3 = t36;
    struct csinn_tensor *t37 = csinn_alloc_tensor(NULL);
    t37->data = base + 0x7fc1c000;
    t37->dtype = CSINN_DTYPE_FLOAT32;
    t37->dim_count = 1;
    t37->dim[0] = 4096;
    t37->name = "blk.3.attn_norm.weight";
    model->layers[3].attn_norm = t37;
    struct csinn_tensor *t38 = csinn_alloc_tensor(NULL);
    t38->data = base + 0x7fc20000;
    t38->dtype = CSINN_DTYPE_FLOAT32;
    t38->dim_count = 1;
    t38->dim[0] = 4096;
    t38->name = "blk.3.ffn_norm.weight";
    model->layers[3].ffn_norm = t38;
    struct csinn_tensor *t39 = csinn_alloc_tensor(NULL);
    t39->data = base + 0x7fc24000;
    t39->dtype = CSINN_DTYPE_FLOAT16;
    t39->dim_count = 2;
    t39->dim[0] = 4096;
    t39->dim[1] = 4096;
    t39->name = "blk.4.attn_q.weight";
    model->layers[4].wq = t39;
    struct csinn_tensor *t40 = csinn_alloc_tensor(NULL);
    t40->data = base + 0x81c24000;
    t40->dtype = CSINN_DTYPE_FLOAT16;
    t40->dim_count = 2;
    t40->dim[0] = 4096;
    t40->dim[1] = 4096;
    t40->name = "blk.4.attn_k.weight";
    model->layers[4].wk = t40;
    struct csinn_tensor *t41 = csinn_alloc_tensor(NULL);
    t41->data = base + 0x83c24000;
    t41->dtype = CSINN_DTYPE_FLOAT16;
    t41->dim_count = 2;
    t41->dim[0] = 4096;
    t41->dim[1] = 4096;
    t41->name = "blk.4.attn_v.weight";
    model->layers[4].wv = t41;
    struct csinn_tensor *t42 = csinn_alloc_tensor(NULL);
    t42->data = base + 0x85c24000;
    t42->dtype = CSINN_DTYPE_FLOAT16;
    t42->dim_count = 2;
    t42->dim[0] = 4096;
    t42->dim[1] = 4096;
    t42->name = "blk.4.attn_output.weight";
    model->layers[4].wo = t42;
    struct csinn_tensor *t43 = csinn_alloc_tensor(NULL);
    t43->data = base + 0x87c24000;
    t43->dtype = CSINN_DTYPE_FLOAT16;
    t43->dim_count = 2;
    t43->dim[0] = 11008;
    t43->dim[1] = 4096;
    t43->name = "blk.4.ffn_gate.weight";
    model->layers[4].w1 = t43;
    struct csinn_tensor *t44 = csinn_alloc_tensor(NULL);
    t44->data = base + 0x8d224000;
    t44->dtype = CSINN_DTYPE_FLOAT16;
    t44->dim_count = 2;
    t44->dim[0] = 4096;
    t44->dim[1] = 11008;
    t44->name = "blk.4.ffn_down.weight";
    model->layers[4].w2 = t44;
    struct csinn_tensor *t45 = csinn_alloc_tensor(NULL);
    t45->data = base + 0x92824000;
    t45->dtype = CSINN_DTYPE_FLOAT16;
    t45->dim_count = 2;
    t45->dim[0] = 11008;
    t45->dim[1] = 4096;
    t45->name = "blk.4.ffn_up.weight";
    model->layers[4].w3 = t45;
    struct csinn_tensor *t46 = csinn_alloc_tensor(NULL);
    t46->data = base + 0x97e24000;
    t46->dtype = CSINN_DTYPE_FLOAT32;
    t46->dim_count = 1;
    t46->dim[0] = 4096;
    t46->name = "blk.4.attn_norm.weight";
    model->layers[4].attn_norm = t46;
    struct csinn_tensor *t47 = csinn_alloc_tensor(NULL);
    t47->data = base + 0x97e28000;
    t47->dtype = CSINN_DTYPE_FLOAT32;
    t47->dim_count = 1;
    t47->dim[0] = 4096;
    t47->name = "blk.4.ffn_norm.weight";
    model->layers[4].ffn_norm = t47;
    struct csinn_tensor *t48 = csinn_alloc_tensor(NULL);
    t48->data = base + 0x97e2c000;
    t48->dtype = CSINN_DTYPE_FLOAT16;
    t48->dim_count = 2;
    t48->dim[0] = 4096;
    t48->dim[1] = 4096;
    t48->name = "blk.5.attn_q.weight";
    model->layers[5].wq = t48;
    struct csinn_tensor *t49 = csinn_alloc_tensor(NULL);
    t49->data = base + 0x99e2c000;
    t49->dtype = CSINN_DTYPE_FLOAT16;
    t49->dim_count = 2;
    t49->dim[0] = 4096;
    t49->dim[1] = 4096;
    t49->name = "blk.5.attn_k.weight";
    model->layers[5].wk = t49;
    struct csinn_tensor *t50 = csinn_alloc_tensor(NULL);
    t50->data = base + 0x9be2c000;
    t50->dtype = CSINN_DTYPE_FLOAT16;
    t50->dim_count = 2;
    t50->dim[0] = 4096;
    t50->dim[1] = 4096;
    t50->name = "blk.5.attn_v.weight";
    model->layers[5].wv = t50;
    struct csinn_tensor *t51 = csinn_alloc_tensor(NULL);
    t51->data = base + 0x9de2c000;
    t51->dtype = CSINN_DTYPE_FLOAT16;
    t51->dim_count = 2;
    t51->dim[0] = 4096;
    t51->dim[1] = 4096;
    t51->name = "blk.5.attn_output.weight";
    model->layers[5].wo = t51;
    struct csinn_tensor *t52 = csinn_alloc_tensor(NULL);
    t52->data = base + 0x9fe2c000;
    t52->dtype = CSINN_DTYPE_FLOAT16;
    t52->dim_count = 2;
    t52->dim[0] = 11008;
    t52->dim[1] = 4096;
    t52->name = "blk.5.ffn_gate.weight";
    model->layers[5].w1 = t52;
    struct csinn_tensor *t53 = csinn_alloc_tensor(NULL);
    t53->data = base + 0xa542c000;
    t53->dtype = CSINN_DTYPE_FLOAT16;
    t53->dim_count = 2;
    t53->dim[0] = 4096;
    t53->dim[1] = 11008;
    t53->name = "blk.5.ffn_down.weight";
    model->layers[5].w2 = t53;
    struct csinn_tensor *t54 = csinn_alloc_tensor(NULL);
    t54->data = base + 0xaaa2c000;
    t54->dtype = CSINN_DTYPE_FLOAT16;
    t54->dim_count = 2;
    t54->dim[0] = 11008;
    t54->dim[1] = 4096;
    t54->name = "blk.5.ffn_up.weight";
    model->layers[5].w3 = t54;
    struct csinn_tensor *t55 = csinn_alloc_tensor(NULL);
    t55->data = base + 0xb002c000;
    t55->dtype = CSINN_DTYPE_FLOAT32;
    t55->dim_count = 1;
    t55->dim[0] = 4096;
    t55->name = "blk.5.attn_norm.weight";
    model->layers[5].attn_norm = t55;
    struct csinn_tensor *t56 = csinn_alloc_tensor(NULL);
    t56->data = base + 0xb0030000;
    t56->dtype = CSINN_DTYPE_FLOAT32;
    t56->dim_count = 1;
    t56->dim[0] = 4096;
    t56->name = "blk.5.ffn_norm.weight";
    model->layers[5].ffn_norm = t56;
    struct csinn_tensor *t57 = csinn_alloc_tensor(NULL);
    t57->data = base + 0xb0034000;
    t57->dtype = CSINN_DTYPE_FLOAT16;
    t57->dim_count = 2;
    t57->dim[0] = 4096;
    t57->dim[1] = 4096;
    t57->name = "blk.6.attn_q.weight";
    model->layers[6].wq = t57;
    struct csinn_tensor *t58 = csinn_alloc_tensor(NULL);
    t58->data = base + 0xb2034000;
    t58->dtype = CSINN_DTYPE_FLOAT16;
    t58->dim_count = 2;
    t58->dim[0] = 4096;
    t58->dim[1] = 4096;
    t58->name = "blk.6.attn_k.weight";
    model->layers[6].wk = t58;
    struct csinn_tensor *t59 = csinn_alloc_tensor(NULL);
    t59->data = base + 0xb4034000;
    t59->dtype = CSINN_DTYPE_FLOAT16;
    t59->dim_count = 2;
    t59->dim[0] = 4096;
    t59->dim[1] = 4096;
    t59->name = "blk.6.attn_v.weight";
    model->layers[6].wv = t59;
    struct csinn_tensor *t60 = csinn_alloc_tensor(NULL);
    t60->data = base + 0xb6034000;
    t60->dtype = CSINN_DTYPE_FLOAT16;
    t60->dim_count = 2;
    t60->dim[0] = 4096;
    t60->dim[1] = 4096;
    t60->name = "blk.6.attn_output.weight";
    model->layers[6].wo = t60;
    struct csinn_tensor *t61 = csinn_alloc_tensor(NULL);
    t61->data = base + 0xb8034000;
    t61->dtype = CSINN_DTYPE_FLOAT16;
    t61->dim_count = 2;
    t61->dim[0] = 11008;
    t61->dim[1] = 4096;
    t61->name = "blk.6.ffn_gate.weight";
    model->layers[6].w1 = t61;
    struct csinn_tensor *t62 = csinn_alloc_tensor(NULL);
    t62->data = base + 0xbd634000;
    t62->dtype = CSINN_DTYPE_FLOAT16;
    t62->dim_count = 2;
    t62->dim[0] = 4096;
    t62->dim[1] = 11008;
    t62->name = "blk.6.ffn_down.weight";
    model->layers[6].w2 = t62;
    struct csinn_tensor *t63 = csinn_alloc_tensor(NULL);
    t63->data = base + 0xc2c34000;
    t63->dtype = CSINN_DTYPE_FLOAT16;
    t63->dim_count = 2;
    t63->dim[0] = 11008;
    t63->dim[1] = 4096;
    t63->name = "blk.6.ffn_up.weight";
    model->layers[6].w3 = t63;
    struct csinn_tensor *t64 = csinn_alloc_tensor(NULL);
    t64->data = base + 0xc8234000;
    t64->dtype = CSINN_DTYPE_FLOAT32;
    t64->dim_count = 1;
    t64->dim[0] = 4096;
    t64->name = "blk.6.attn_norm.weight";
    model->layers[6].attn_norm = t64;
    struct csinn_tensor *t65 = csinn_alloc_tensor(NULL);
    t65->data = base + 0xc8238000;
    t65->dtype = CSINN_DTYPE_FLOAT32;
    t65->dim_count = 1;
    t65->dim[0] = 4096;
    t65->name = "blk.6.ffn_norm.weight";
    model->layers[6].ffn_norm = t65;
    struct csinn_tensor *t66 = csinn_alloc_tensor(NULL);
    t66->data = base + 0xc823c000;
    t66->dtype = CSINN_DTYPE_FLOAT16;
    t66->dim_count = 2;
    t66->dim[0] = 4096;
    t66->dim[1] = 4096;
    t66->name = "blk.7.attn_q.weight";
    model->layers[7].wq = t66;
    struct csinn_tensor *t67 = csinn_alloc_tensor(NULL);
    t67->data = base + 0xca23c000;
    t67->dtype = CSINN_DTYPE_FLOAT16;
    t67->dim_count = 2;
    t67->dim[0] = 4096;
    t67->dim[1] = 4096;
    t67->name = "blk.7.attn_k.weight";
    model->layers[7].wk = t67;
    struct csinn_tensor *t68 = csinn_alloc_tensor(NULL);
    t68->data = base + 0xcc23c000;
    t68->dtype = CSINN_DTYPE_FLOAT16;
    t68->dim_count = 2;
    t68->dim[0] = 4096;
    t68->dim[1] = 4096;
    t68->name = "blk.7.attn_v.weight";
    model->layers[7].wv = t68;
    struct csinn_tensor *t69 = csinn_alloc_tensor(NULL);
    t69->data = base + 0xce23c000;
    t69->dtype = CSINN_DTYPE_FLOAT16;
    t69->dim_count = 2;
    t69->dim[0] = 4096;
    t69->dim[1] = 4096;
    t69->name = "blk.7.attn_output.weight";
    model->layers[7].wo = t69;
    struct csinn_tensor *t70 = csinn_alloc_tensor(NULL);
    t70->data = base + 0xd023c000;
    t70->dtype = CSINN_DTYPE_FLOAT16;
    t70->dim_count = 2;
    t70->dim[0] = 11008;
    t70->dim[1] = 4096;
    t70->name = "blk.7.ffn_gate.weight";
    model->layers[7].w1 = t70;
    struct csinn_tensor *t71 = csinn_alloc_tensor(NULL);
    t71->data = base + 0xd583c000;
    t71->dtype = CSINN_DTYPE_FLOAT16;
    t71->dim_count = 2;
    t71->dim[0] = 4096;
    t71->dim[1] = 11008;
    t71->name = "blk.7.ffn_down.weight";
    model->layers[7].w2 = t71;
    struct csinn_tensor *t72 = csinn_alloc_tensor(NULL);
    t72->data = base + 0xdae3c000;
    t72->dtype = CSINN_DTYPE_FLOAT16;
    t72->dim_count = 2;
    t72->dim[0] = 11008;
    t72->dim[1] = 4096;
    t72->name = "blk.7.ffn_up.weight";
    model->layers[7].w3 = t72;
    struct csinn_tensor *t73 = csinn_alloc_tensor(NULL);
    t73->data = base + 0xe043c000;
    t73->dtype = CSINN_DTYPE_FLOAT32;
    t73->dim_count = 1;
    t73->dim[0] = 4096;
    t73->name = "blk.7.attn_norm.weight";
    model->layers[7].attn_norm = t73;
    struct csinn_tensor *t74 = csinn_alloc_tensor(NULL);
    t74->data = base + 0xe0440000;
    t74->dtype = CSINN_DTYPE_FLOAT32;
    t74->dim_count = 1;
    t74->dim[0] = 4096;
    t74->name = "blk.7.ffn_norm.weight";
    model->layers[7].ffn_norm = t74;
    struct csinn_tensor *t75 = csinn_alloc_tensor(NULL);
    t75->data = base + 0xe0444000;
    t75->dtype = CSINN_DTYPE_FLOAT16;
    t75->dim_count = 2;
    t75->dim[0] = 4096;
    t75->dim[1] = 4096;
    t75->name = "blk.8.attn_q.weight";
    model->layers[8].wq = t75;
    struct csinn_tensor *t76 = csinn_alloc_tensor(NULL);
    t76->data = base + 0xe2444000;
    t76->dtype = CSINN_DTYPE_FLOAT16;
    t76->dim_count = 2;
    t76->dim[0] = 4096;
    t76->dim[1] = 4096;
    t76->name = "blk.8.attn_k.weight";
    model->layers[8].wk = t76;
    struct csinn_tensor *t77 = csinn_alloc_tensor(NULL);
    t77->data = base + 0xe4444000;
    t77->dtype = CSINN_DTYPE_FLOAT16;
    t77->dim_count = 2;
    t77->dim[0] = 4096;
    t77->dim[1] = 4096;
    t77->name = "blk.8.attn_v.weight";
    model->layers[8].wv = t77;
    struct csinn_tensor *t78 = csinn_alloc_tensor(NULL);
    t78->data = base + 0xe6444000;
    t78->dtype = CSINN_DTYPE_FLOAT16;
    t78->dim_count = 2;
    t78->dim[0] = 4096;
    t78->dim[1] = 4096;
    t78->name = "blk.8.attn_output.weight";
    model->layers[8].wo = t78;
    struct csinn_tensor *t79 = csinn_alloc_tensor(NULL);
    t79->data = base + 0xe8444000;
    t79->dtype = CSINN_DTYPE_FLOAT16;
    t79->dim_count = 2;
    t79->dim[0] = 11008;
    t79->dim[1] = 4096;
    t79->name = "blk.8.ffn_gate.weight";
    model->layers[8].w1 = t79;
    struct csinn_tensor *t80 = csinn_alloc_tensor(NULL);
    t80->data = base + 0xeda44000;
    t80->dtype = CSINN_DTYPE_FLOAT16;
    t80->dim_count = 2;
    t80->dim[0] = 4096;
    t80->dim[1] = 11008;
    t80->name = "blk.8.ffn_down.weight";
    model->layers[8].w2 = t80;
    struct csinn_tensor *t81 = csinn_alloc_tensor(NULL);
    t81->data = base + 0xf3044000;
    t81->dtype = CSINN_DTYPE_FLOAT16;
    t81->dim_count = 2;
    t81->dim[0] = 11008;
    t81->dim[1] = 4096;
    t81->name = "blk.8.ffn_up.weight";
    model->layers[8].w3 = t81;
    struct csinn_tensor *t82 = csinn_alloc_tensor(NULL);
    t82->data = base + 0xf8644000;
    t82->dtype = CSINN_DTYPE_FLOAT32;
    t82->dim_count = 1;
    t82->dim[0] = 4096;
    t82->name = "blk.8.attn_norm.weight";
    model->layers[8].attn_norm = t82;
    struct csinn_tensor *t83 = csinn_alloc_tensor(NULL);
    t83->data = base + 0xf8648000;
    t83->dtype = CSINN_DTYPE_FLOAT32;
    t83->dim_count = 1;
    t83->dim[0] = 4096;
    t83->name = "blk.8.ffn_norm.weight";
    model->layers[8].ffn_norm = t83;
    struct csinn_tensor *t84 = csinn_alloc_tensor(NULL);
    t84->data = base + 0xf864c000;
    t84->dtype = CSINN_DTYPE_FLOAT16;
    t84->dim_count = 2;
    t84->dim[0] = 4096;
    t84->dim[1] = 4096;
    t84->name = "blk.9.attn_q.weight";
    model->layers[9].wq = t84;
    struct csinn_tensor *t85 = csinn_alloc_tensor(NULL);
    t85->data = base + 0xfa64c000;
    t85->dtype = CSINN_DTYPE_FLOAT16;
    t85->dim_count = 2;
    t85->dim[0] = 4096;
    t85->dim[1] = 4096;
    t85->name = "blk.9.attn_k.weight";
    model->layers[9].wk = t85;
    struct csinn_tensor *t86 = csinn_alloc_tensor(NULL);
    t86->data = base + 0xfc64c000;
    t86->dtype = CSINN_DTYPE_FLOAT16;
    t86->dim_count = 2;
    t86->dim[0] = 4096;
    t86->dim[1] = 4096;
    t86->name = "blk.9.attn_v.weight";
    model->layers[9].wv = t86;
    struct csinn_tensor *t87 = csinn_alloc_tensor(NULL);
    t87->data = base + 0xfe64c000;
    t87->dtype = CSINN_DTYPE_FLOAT16;
    t87->dim_count = 2;
    t87->dim[0] = 4096;
    t87->dim[1] = 4096;
    t87->name = "blk.9.attn_output.weight";
    model->layers[9].wo = t87;
    struct csinn_tensor *t88 = csinn_alloc_tensor(NULL);
    t88->data = base + 0x10064c000;
    t88->dtype = CSINN_DTYPE_FLOAT16;
    t88->dim_count = 2;
    t88->dim[0] = 11008;
    t88->dim[1] = 4096;
    t88->name = "blk.9.ffn_gate.weight";
    model->layers[9].w1 = t88;
    struct csinn_tensor *t89 = csinn_alloc_tensor(NULL);
    t89->data = base + 0x105c4c000;
    t89->dtype = CSINN_DTYPE_FLOAT16;
    t89->dim_count = 2;
    t89->dim[0] = 4096;
    t89->dim[1] = 11008;
    t89->name = "blk.9.ffn_down.weight";
    model->layers[9].w2 = t89;
    struct csinn_tensor *t90 = csinn_alloc_tensor(NULL);
    t90->data = base + 0x10b24c000;
    t90->dtype = CSINN_DTYPE_FLOAT16;
    t90->dim_count = 2;
    t90->dim[0] = 11008;
    t90->dim[1] = 4096;
    t90->name = "blk.9.ffn_up.weight";
    model->layers[9].w3 = t90;
    struct csinn_tensor *t91 = csinn_alloc_tensor(NULL);
    t91->data = base + 0x11084c000;
    t91->dtype = CSINN_DTYPE_FLOAT32;
    t91->dim_count = 1;
    t91->dim[0] = 4096;
    t91->name = "blk.9.attn_norm.weight";
    model->layers[9].attn_norm = t91;
    struct csinn_tensor *t92 = csinn_alloc_tensor(NULL);
    t92->data = base + 0x110850000;
    t92->dtype = CSINN_DTYPE_FLOAT32;
    t92->dim_count = 1;
    t92->dim[0] = 4096;
    t92->name = "blk.9.ffn_norm.weight";
    model->layers[9].ffn_norm = t92;
    struct csinn_tensor *t93 = csinn_alloc_tensor(NULL);
    t93->data = base + 0x110854000;
    t93->dtype = CSINN_DTYPE_FLOAT16;
    t93->dim_count = 2;
    t93->dim[0] = 4096;
    t93->dim[1] = 4096;
    t93->name = "blk.10.attn_q.weight";
    model->layers[10].wq = t93;
    struct csinn_tensor *t94 = csinn_alloc_tensor(NULL);
    t94->data = base + 0x112854000;
    t94->dtype = CSINN_DTYPE_FLOAT16;
    t94->dim_count = 2;
    t94->dim[0] = 4096;
    t94->dim[1] = 4096;
    t94->name = "blk.10.attn_k.weight";
    model->layers[10].wk = t94;
    struct csinn_tensor *t95 = csinn_alloc_tensor(NULL);
    t95->data = base + 0x114854000;
    t95->dtype = CSINN_DTYPE_FLOAT16;
    t95->dim_count = 2;
    t95->dim[0] = 4096;
    t95->dim[1] = 4096;
    t95->name = "blk.10.attn_v.weight";
    model->layers[10].wv = t95;
    struct csinn_tensor *t96 = csinn_alloc_tensor(NULL);
    t96->data = base + 0x116854000;
    t96->dtype = CSINN_DTYPE_FLOAT16;
    t96->dim_count = 2;
    t96->dim[0] = 4096;
    t96->dim[1] = 4096;
    t96->name = "blk.10.attn_output.weight";
    model->layers[10].wo = t96;
    struct csinn_tensor *t97 = csinn_alloc_tensor(NULL);
    t97->data = base + 0x118854000;
    t97->dtype = CSINN_DTYPE_FLOAT16;
    t97->dim_count = 2;
    t97->dim[0] = 11008;
    t97->dim[1] = 4096;
    t97->name = "blk.10.ffn_gate.weight";
    model->layers[10].w1 = t97;
    struct csinn_tensor *t98 = csinn_alloc_tensor(NULL);
    t98->data = base + 0x11de54000;
    t98->dtype = CSINN_DTYPE_FLOAT16;
    t98->dim_count = 2;
    t98->dim[0] = 4096;
    t98->dim[1] = 11008;
    t98->name = "blk.10.ffn_down.weight";
    model->layers[10].w2 = t98;
    struct csinn_tensor *t99 = csinn_alloc_tensor(NULL);
    t99->data = base + 0x123454000;
    t99->dtype = CSINN_DTYPE_FLOAT16;
    t99->dim_count = 2;
    t99->dim[0] = 11008;
    t99->dim[1] = 4096;
    t99->name = "blk.10.ffn_up.weight";
    model->layers[10].w3 = t99;
    struct csinn_tensor *t100 = csinn_alloc_tensor(NULL);
    t100->data = base + 0x128a54000;
    t100->dtype = CSINN_DTYPE_FLOAT32;
    t100->dim_count = 1;
    t100->dim[0] = 4096;
    t100->name = "blk.10.attn_norm.weight";
    model->layers[10].attn_norm = t100;
    struct csinn_tensor *t101 = csinn_alloc_tensor(NULL);
    t101->data = base + 0x128a58000;
    t101->dtype = CSINN_DTYPE_FLOAT32;
    t101->dim_count = 1;
    t101->dim[0] = 4096;
    t101->name = "blk.10.ffn_norm.weight";
    model->layers[10].ffn_norm = t101;
    struct csinn_tensor *t102 = csinn_alloc_tensor(NULL);
    t102->data = base + 0x128a5c000;
    t102->dtype = CSINN_DTYPE_FLOAT16;
    t102->dim_count = 2;
    t102->dim[0] = 4096;
    t102->dim[1] = 4096;
    t102->name = "blk.11.attn_q.weight";
    model->layers[11].wq = t102;
    struct csinn_tensor *t103 = csinn_alloc_tensor(NULL);
    t103->data = base + 0x12aa5c000;
    t103->dtype = CSINN_DTYPE_FLOAT16;
    t103->dim_count = 2;
    t103->dim[0] = 4096;
    t103->dim[1] = 4096;
    t103->name = "blk.11.attn_k.weight";
    model->layers[11].wk = t103;
    struct csinn_tensor *t104 = csinn_alloc_tensor(NULL);
    t104->data = base + 0x12ca5c000;
    t104->dtype = CSINN_DTYPE_FLOAT16;
    t104->dim_count = 2;
    t104->dim[0] = 4096;
    t104->dim[1] = 4096;
    t104->name = "blk.11.attn_v.weight";
    model->layers[11].wv = t104;
    struct csinn_tensor *t105 = csinn_alloc_tensor(NULL);
    t105->data = base + 0x12ea5c000;
    t105->dtype = CSINN_DTYPE_FLOAT16;
    t105->dim_count = 2;
    t105->dim[0] = 4096;
    t105->dim[1] = 4096;
    t105->name = "blk.11.attn_output.weight";
    model->layers[11].wo = t105;
    struct csinn_tensor *t106 = csinn_alloc_tensor(NULL);
    t106->data = base + 0x130a5c000;
    t106->dtype = CSINN_DTYPE_FLOAT16;
    t106->dim_count = 2;
    t106->dim[0] = 11008;
    t106->dim[1] = 4096;
    t106->name = "blk.11.ffn_gate.weight";
    model->layers[11].w1 = t106;
    struct csinn_tensor *t107 = csinn_alloc_tensor(NULL);
    t107->data = base + 0x13605c000;
    t107->dtype = CSINN_DTYPE_FLOAT16;
    t107->dim_count = 2;
    t107->dim[0] = 4096;
    t107->dim[1] = 11008;
    t107->name = "blk.11.ffn_down.weight";
    model->layers[11].w2 = t107;
    struct csinn_tensor *t108 = csinn_alloc_tensor(NULL);
    t108->data = base + 0x13b65c000;
    t108->dtype = CSINN_DTYPE_FLOAT16;
    t108->dim_count = 2;
    t108->dim[0] = 11008;
    t108->dim[1] = 4096;
    t108->name = "blk.11.ffn_up.weight";
    model->layers[11].w3 = t108;
    struct csinn_tensor *t109 = csinn_alloc_tensor(NULL);
    t109->data = base + 0x140c5c000;
    t109->dtype = CSINN_DTYPE_FLOAT32;
    t109->dim_count = 1;
    t109->dim[0] = 4096;
    t109->name = "blk.11.attn_norm.weight";
    model->layers[11].attn_norm = t109;
    struct csinn_tensor *t110 = csinn_alloc_tensor(NULL);
    t110->data = base + 0x140c60000;
    t110->dtype = CSINN_DTYPE_FLOAT32;
    t110->dim_count = 1;
    t110->dim[0] = 4096;
    t110->name = "blk.11.ffn_norm.weight";
    model->layers[11].ffn_norm = t110;
    struct csinn_tensor *t111 = csinn_alloc_tensor(NULL);
    t111->data = base + 0x140c64000;
    t111->dtype = CSINN_DTYPE_FLOAT16;
    t111->dim_count = 2;
    t111->dim[0] = 4096;
    t111->dim[1] = 4096;
    t111->name = "blk.12.attn_q.weight";
    model->layers[12].wq = t111;
    struct csinn_tensor *t112 = csinn_alloc_tensor(NULL);
    t112->data = base + 0x142c64000;
    t112->dtype = CSINN_DTYPE_FLOAT16;
    t112->dim_count = 2;
    t112->dim[0] = 4096;
    t112->dim[1] = 4096;
    t112->name = "blk.12.attn_k.weight";
    model->layers[12].wk = t112;
    struct csinn_tensor *t113 = csinn_alloc_tensor(NULL);
    t113->data = base + 0x144c64000;
    t113->dtype = CSINN_DTYPE_FLOAT16;
    t113->dim_count = 2;
    t113->dim[0] = 4096;
    t113->dim[1] = 4096;
    t113->name = "blk.12.attn_v.weight";
    model->layers[12].wv = t113;
    struct csinn_tensor *t114 = csinn_alloc_tensor(NULL);
    t114->data = base + 0x146c64000;
    t114->dtype = CSINN_DTYPE_FLOAT16;
    t114->dim_count = 2;
    t114->dim[0] = 4096;
    t114->dim[1] = 4096;
    t114->name = "blk.12.attn_output.weight";
    model->layers[12].wo = t114;
    struct csinn_tensor *t115 = csinn_alloc_tensor(NULL);
    t115->data = base + 0x148c64000;
    t115->dtype = CSINN_DTYPE_FLOAT16;
    t115->dim_count = 2;
    t115->dim[0] = 11008;
    t115->dim[1] = 4096;
    t115->name = "blk.12.ffn_gate.weight";
    model->layers[12].w1 = t115;
    struct csinn_tensor *t116 = csinn_alloc_tensor(NULL);
    t116->data = base + 0x14e264000;
    t116->dtype = CSINN_DTYPE_FLOAT16;
    t116->dim_count = 2;
    t116->dim[0] = 4096;
    t116->dim[1] = 11008;
    t116->name = "blk.12.ffn_down.weight";
    model->layers[12].w2 = t116;
    struct csinn_tensor *t117 = csinn_alloc_tensor(NULL);
    t117->data = base + 0x153864000;
    t117->dtype = CSINN_DTYPE_FLOAT16;
    t117->dim_count = 2;
    t117->dim[0] = 11008;
    t117->dim[1] = 4096;
    t117->name = "blk.12.ffn_up.weight";
    model->layers[12].w3 = t117;
    struct csinn_tensor *t118 = csinn_alloc_tensor(NULL);
    t118->data = base + 0x158e64000;
    t118->dtype = CSINN_DTYPE_FLOAT32;
    t118->dim_count = 1;
    t118->dim[0] = 4096;
    t118->name = "blk.12.attn_norm.weight";
    model->layers[12].attn_norm = t118;
    struct csinn_tensor *t119 = csinn_alloc_tensor(NULL);
    t119->data = base + 0x158e68000;
    t119->dtype = CSINN_DTYPE_FLOAT32;
    t119->dim_count = 1;
    t119->dim[0] = 4096;
    t119->name = "blk.12.ffn_norm.weight";
    model->layers[12].ffn_norm = t119;
    struct csinn_tensor *t120 = csinn_alloc_tensor(NULL);
    t120->data = base + 0x158e6c000;
    t120->dtype = CSINN_DTYPE_FLOAT16;
    t120->dim_count = 2;
    t120->dim[0] = 4096;
    t120->dim[1] = 4096;
    t120->name = "blk.13.attn_q.weight";
    model->layers[13].wq = t120;
    struct csinn_tensor *t121 = csinn_alloc_tensor(NULL);
    t121->data = base + 0x15ae6c000;
    t121->dtype = CSINN_DTYPE_FLOAT16;
    t121->dim_count = 2;
    t121->dim[0] = 4096;
    t121->dim[1] = 4096;
    t121->name = "blk.13.attn_k.weight";
    model->layers[13].wk = t121;
    struct csinn_tensor *t122 = csinn_alloc_tensor(NULL);
    t122->data = base + 0x15ce6c000;
    t122->dtype = CSINN_DTYPE_FLOAT16;
    t122->dim_count = 2;
    t122->dim[0] = 4096;
    t122->dim[1] = 4096;
    t122->name = "blk.13.attn_v.weight";
    model->layers[13].wv = t122;
    struct csinn_tensor *t123 = csinn_alloc_tensor(NULL);
    t123->data = base + 0x15ee6c000;
    t123->dtype = CSINN_DTYPE_FLOAT16;
    t123->dim_count = 2;
    t123->dim[0] = 4096;
    t123->dim[1] = 4096;
    t123->name = "blk.13.attn_output.weight";
    model->layers[13].wo = t123;
    struct csinn_tensor *t124 = csinn_alloc_tensor(NULL);
    t124->data = base + 0x160e6c000;
    t124->dtype = CSINN_DTYPE_FLOAT16;
    t124->dim_count = 2;
    t124->dim[0] = 11008;
    t124->dim[1] = 4096;
    t124->name = "blk.13.ffn_gate.weight";
    model->layers[13].w1 = t124;
    struct csinn_tensor *t125 = csinn_alloc_tensor(NULL);
    t125->data = base + 0x16646c000;
    t125->dtype = CSINN_DTYPE_FLOAT16;
    t125->dim_count = 2;
    t125->dim[0] = 4096;
    t125->dim[1] = 11008;
    t125->name = "blk.13.ffn_down.weight";
    model->layers[13].w2 = t125;
    struct csinn_tensor *t126 = csinn_alloc_tensor(NULL);
    t126->data = base + 0x16ba6c000;
    t126->dtype = CSINN_DTYPE_FLOAT16;
    t126->dim_count = 2;
    t126->dim[0] = 11008;
    t126->dim[1] = 4096;
    t126->name = "blk.13.ffn_up.weight";
    model->layers[13].w3 = t126;
    struct csinn_tensor *t127 = csinn_alloc_tensor(NULL);
    t127->data = base + 0x17106c000;
    t127->dtype = CSINN_DTYPE_FLOAT32;
    t127->dim_count = 1;
    t127->dim[0] = 4096;
    t127->name = "blk.13.attn_norm.weight";
    model->layers[13].attn_norm = t127;
    struct csinn_tensor *t128 = csinn_alloc_tensor(NULL);
    t128->data = base + 0x171070000;
    t128->dtype = CSINN_DTYPE_FLOAT32;
    t128->dim_count = 1;
    t128->dim[0] = 4096;
    t128->name = "blk.13.ffn_norm.weight";
    model->layers[13].ffn_norm = t128;
    struct csinn_tensor *t129 = csinn_alloc_tensor(NULL);
    t129->data = base + 0x171074000;
    t129->dtype = CSINN_DTYPE_FLOAT16;
    t129->dim_count = 2;
    t129->dim[0] = 4096;
    t129->dim[1] = 4096;
    t129->name = "blk.14.attn_q.weight";
    model->layers[14].wq = t129;
    struct csinn_tensor *t130 = csinn_alloc_tensor(NULL);
    t130->data = base + 0x173074000;
    t130->dtype = CSINN_DTYPE_FLOAT16;
    t130->dim_count = 2;
    t130->dim[0] = 4096;
    t130->dim[1] = 4096;
    t130->name = "blk.14.attn_k.weight";
    model->layers[14].wk = t130;
    struct csinn_tensor *t131 = csinn_alloc_tensor(NULL);
    t131->data = base + 0x175074000;
    t131->dtype = CSINN_DTYPE_FLOAT16;
    t131->dim_count = 2;
    t131->dim[0] = 4096;
    t131->dim[1] = 4096;
    t131->name = "blk.14.attn_v.weight";
    model->layers[14].wv = t131;
    struct csinn_tensor *t132 = csinn_alloc_tensor(NULL);
    t132->data = base + 0x177074000;
    t132->dtype = CSINN_DTYPE_FLOAT16;
    t132->dim_count = 2;
    t132->dim[0] = 4096;
    t132->dim[1] = 4096;
    t132->name = "blk.14.attn_output.weight";
    model->layers[14].wo = t132;
    struct csinn_tensor *t133 = csinn_alloc_tensor(NULL);
    t133->data = base + 0x179074000;
    t133->dtype = CSINN_DTYPE_FLOAT16;
    t133->dim_count = 2;
    t133->dim[0] = 11008;
    t133->dim[1] = 4096;
    t133->name = "blk.14.ffn_gate.weight";
    model->layers[14].w1 = t133;
    struct csinn_tensor *t134 = csinn_alloc_tensor(NULL);
    t134->data = base + 0x17e674000;
    t134->dtype = CSINN_DTYPE_FLOAT16;
    t134->dim_count = 2;
    t134->dim[0] = 4096;
    t134->dim[1] = 11008;
    t134->name = "blk.14.ffn_down.weight";
    model->layers[14].w2 = t134;
    struct csinn_tensor *t135 = csinn_alloc_tensor(NULL);
    t135->data = base + 0x183c74000;
    t135->dtype = CSINN_DTYPE_FLOAT16;
    t135->dim_count = 2;
    t135->dim[0] = 11008;
    t135->dim[1] = 4096;
    t135->name = "blk.14.ffn_up.weight";
    model->layers[14].w3 = t135;
    struct csinn_tensor *t136 = csinn_alloc_tensor(NULL);
    t136->data = base + 0x189274000;
    t136->dtype = CSINN_DTYPE_FLOAT32;
    t136->dim_count = 1;
    t136->dim[0] = 4096;
    t136->name = "blk.14.attn_norm.weight";
    model->layers[14].attn_norm = t136;
    struct csinn_tensor *t137 = csinn_alloc_tensor(NULL);
    t137->data = base + 0x189278000;
    t137->dtype = CSINN_DTYPE_FLOAT32;
    t137->dim_count = 1;
    t137->dim[0] = 4096;
    t137->name = "blk.14.ffn_norm.weight";
    model->layers[14].ffn_norm = t137;
    struct csinn_tensor *t138 = csinn_alloc_tensor(NULL);
    t138->data = base + 0x18927c000;
    t138->dtype = CSINN_DTYPE_FLOAT16;
    t138->dim_count = 2;
    t138->dim[0] = 4096;
    t138->dim[1] = 4096;
    t138->name = "blk.15.attn_q.weight";
    model->layers[15].wq = t138;
    struct csinn_tensor *t139 = csinn_alloc_tensor(NULL);
    t139->data = base + 0x18b27c000;
    t139->dtype = CSINN_DTYPE_FLOAT16;
    t139->dim_count = 2;
    t139->dim[0] = 4096;
    t139->dim[1] = 4096;
    t139->name = "blk.15.attn_k.weight";
    model->layers[15].wk = t139;
    struct csinn_tensor *t140 = csinn_alloc_tensor(NULL);
    t140->data = base + 0x18d27c000;
    t140->dtype = CSINN_DTYPE_FLOAT16;
    t140->dim_count = 2;
    t140->dim[0] = 4096;
    t140->dim[1] = 4096;
    t140->name = "blk.15.attn_v.weight";
    model->layers[15].wv = t140;
    struct csinn_tensor *t141 = csinn_alloc_tensor(NULL);
    t141->data = base + 0x18f27c000;
    t141->dtype = CSINN_DTYPE_FLOAT16;
    t141->dim_count = 2;
    t141->dim[0] = 4096;
    t141->dim[1] = 4096;
    t141->name = "blk.15.attn_output.weight";
    model->layers[15].wo = t141;
    struct csinn_tensor *t142 = csinn_alloc_tensor(NULL);
    t142->data = base + 0x19127c000;
    t142->dtype = CSINN_DTYPE_FLOAT16;
    t142->dim_count = 2;
    t142->dim[0] = 11008;
    t142->dim[1] = 4096;
    t142->name = "blk.15.ffn_gate.weight";
    model->layers[15].w1 = t142;
    struct csinn_tensor *t143 = csinn_alloc_tensor(NULL);
    t143->data = base + 0x19687c000;
    t143->dtype = CSINN_DTYPE_FLOAT16;
    t143->dim_count = 2;
    t143->dim[0] = 4096;
    t143->dim[1] = 11008;
    t143->name = "blk.15.ffn_down.weight";
    model->layers[15].w2 = t143;
    struct csinn_tensor *t144 = csinn_alloc_tensor(NULL);
    t144->data = base + 0x19be7c000;
    t144->dtype = CSINN_DTYPE_FLOAT16;
    t144->dim_count = 2;
    t144->dim[0] = 11008;
    t144->dim[1] = 4096;
    t144->name = "blk.15.ffn_up.weight";
    model->layers[15].w3 = t144;
    struct csinn_tensor *t145 = csinn_alloc_tensor(NULL);
    t145->data = base + 0x1a147c000;
    t145->dtype = CSINN_DTYPE_FLOAT32;
    t145->dim_count = 1;
    t145->dim[0] = 4096;
    t145->name = "blk.15.attn_norm.weight";
    model->layers[15].attn_norm = t145;
    struct csinn_tensor *t146 = csinn_alloc_tensor(NULL);
    t146->data = base + 0x1a1480000;
    t146->dtype = CSINN_DTYPE_FLOAT32;
    t146->dim_count = 1;
    t146->dim[0] = 4096;
    t146->name = "blk.15.ffn_norm.weight";
    model->layers[15].ffn_norm = t146;
    struct csinn_tensor *t147 = csinn_alloc_tensor(NULL);
    t147->data = base + 0x1a1484000;
    t147->dtype = CSINN_DTYPE_FLOAT16;
    t147->dim_count = 2;
    t147->dim[0] = 4096;
    t147->dim[1] = 4096;
    t147->name = "blk.16.attn_q.weight";
    model->layers[16].wq = t147;
    struct csinn_tensor *t148 = csinn_alloc_tensor(NULL);
    t148->data = base + 0x1a3484000;
    t148->dtype = CSINN_DTYPE_FLOAT16;
    t148->dim_count = 2;
    t148->dim[0] = 4096;
    t148->dim[1] = 4096;
    t148->name = "blk.16.attn_k.weight";
    model->layers[16].wk = t148;
    struct csinn_tensor *t149 = csinn_alloc_tensor(NULL);
    t149->data = base + 0x1a5484000;
    t149->dtype = CSINN_DTYPE_FLOAT16;
    t149->dim_count = 2;
    t149->dim[0] = 4096;
    t149->dim[1] = 4096;
    t149->name = "blk.16.attn_v.weight";
    model->layers[16].wv = t149;
    struct csinn_tensor *t150 = csinn_alloc_tensor(NULL);
    t150->data = base + 0x1a7484000;
    t150->dtype = CSINN_DTYPE_FLOAT16;
    t150->dim_count = 2;
    t150->dim[0] = 4096;
    t150->dim[1] = 4096;
    t150->name = "blk.16.attn_output.weight";
    model->layers[16].wo = t150;
    struct csinn_tensor *t151 = csinn_alloc_tensor(NULL);
    t151->data = base + 0x1a9484000;
    t151->dtype = CSINN_DTYPE_FLOAT16;
    t151->dim_count = 2;
    t151->dim[0] = 11008;
    t151->dim[1] = 4096;
    t151->name = "blk.16.ffn_gate.weight";
    model->layers[16].w1 = t151;
    struct csinn_tensor *t152 = csinn_alloc_tensor(NULL);
    t152->data = base + 0x1aea84000;
    t152->dtype = CSINN_DTYPE_FLOAT16;
    t152->dim_count = 2;
    t152->dim[0] = 4096;
    t152->dim[1] = 11008;
    t152->name = "blk.16.ffn_down.weight";
    model->layers[16].w2 = t152;
    struct csinn_tensor *t153 = csinn_alloc_tensor(NULL);
    t153->data = base + 0x1b4084000;
    t153->dtype = CSINN_DTYPE_FLOAT16;
    t153->dim_count = 2;
    t153->dim[0] = 11008;
    t153->dim[1] = 4096;
    t153->name = "blk.16.ffn_up.weight";
    model->layers[16].w3 = t153;
    struct csinn_tensor *t154 = csinn_alloc_tensor(NULL);
    t154->data = base + 0x1b9684000;
    t154->dtype = CSINN_DTYPE_FLOAT32;
    t154->dim_count = 1;
    t154->dim[0] = 4096;
    t154->name = "blk.16.attn_norm.weight";
    model->layers[16].attn_norm = t154;
    struct csinn_tensor *t155 = csinn_alloc_tensor(NULL);
    t155->data = base + 0x1b9688000;
    t155->dtype = CSINN_DTYPE_FLOAT32;
    t155->dim_count = 1;
    t155->dim[0] = 4096;
    t155->name = "blk.16.ffn_norm.weight";
    model->layers[16].ffn_norm = t155;
    struct csinn_tensor *t156 = csinn_alloc_tensor(NULL);
    t156->data = base + 0x1b968c000;
    t156->dtype = CSINN_DTYPE_FLOAT16;
    t156->dim_count = 2;
    t156->dim[0] = 4096;
    t156->dim[1] = 4096;
    t156->name = "blk.17.attn_q.weight";
    model->layers[17].wq = t156;
    struct csinn_tensor *t157 = csinn_alloc_tensor(NULL);
    t157->data = base + 0x1bb68c000;
    t157->dtype = CSINN_DTYPE_FLOAT16;
    t157->dim_count = 2;
    t157->dim[0] = 4096;
    t157->dim[1] = 4096;
    t157->name = "blk.17.attn_k.weight";
    model->layers[17].wk = t157;
    struct csinn_tensor *t158 = csinn_alloc_tensor(NULL);
    t158->data = base + 0x1bd68c000;
    t158->dtype = CSINN_DTYPE_FLOAT16;
    t158->dim_count = 2;
    t158->dim[0] = 4096;
    t158->dim[1] = 4096;
    t158->name = "blk.17.attn_v.weight";
    model->layers[17].wv = t158;
    struct csinn_tensor *t159 = csinn_alloc_tensor(NULL);
    t159->data = base + 0x1bf68c000;
    t159->dtype = CSINN_DTYPE_FLOAT16;
    t159->dim_count = 2;
    t159->dim[0] = 4096;
    t159->dim[1] = 4096;
    t159->name = "blk.17.attn_output.weight";
    model->layers[17].wo = t159;
    struct csinn_tensor *t160 = csinn_alloc_tensor(NULL);
    t160->data = base + 0x1c168c000;
    t160->dtype = CSINN_DTYPE_FLOAT16;
    t160->dim_count = 2;
    t160->dim[0] = 11008;
    t160->dim[1] = 4096;
    t160->name = "blk.17.ffn_gate.weight";
    model->layers[17].w1 = t160;
    struct csinn_tensor *t161 = csinn_alloc_tensor(NULL);
    t161->data = base + 0x1c6c8c000;
    t161->dtype = CSINN_DTYPE_FLOAT16;
    t161->dim_count = 2;
    t161->dim[0] = 4096;
    t161->dim[1] = 11008;
    t161->name = "blk.17.ffn_down.weight";
    model->layers[17].w2 = t161;
    struct csinn_tensor *t162 = csinn_alloc_tensor(NULL);
    t162->data = base + 0x1cc28c000;
    t162->dtype = CSINN_DTYPE_FLOAT16;
    t162->dim_count = 2;
    t162->dim[0] = 11008;
    t162->dim[1] = 4096;
    t162->name = "blk.17.ffn_up.weight";
    model->layers[17].w3 = t162;
    struct csinn_tensor *t163 = csinn_alloc_tensor(NULL);
    t163->data = base + 0x1d188c000;
    t163->dtype = CSINN_DTYPE_FLOAT32;
    t163->dim_count = 1;
    t163->dim[0] = 4096;
    t163->name = "blk.17.attn_norm.weight";
    model->layers[17].attn_norm = t163;
    struct csinn_tensor *t164 = csinn_alloc_tensor(NULL);
    t164->data = base + 0x1d1890000;
    t164->dtype = CSINN_DTYPE_FLOAT32;
    t164->dim_count = 1;
    t164->dim[0] = 4096;
    t164->name = "blk.17.ffn_norm.weight";
    model->layers[17].ffn_norm = t164;
    struct csinn_tensor *t165 = csinn_alloc_tensor(NULL);
    t165->data = base + 0x1d1894000;
    t165->dtype = CSINN_DTYPE_FLOAT16;
    t165->dim_count = 2;
    t165->dim[0] = 4096;
    t165->dim[1] = 4096;
    t165->name = "blk.18.attn_q.weight";
    model->layers[18].wq = t165;
    struct csinn_tensor *t166 = csinn_alloc_tensor(NULL);
    t166->data = base + 0x1d3894000;
    t166->dtype = CSINN_DTYPE_FLOAT16;
    t166->dim_count = 2;
    t166->dim[0] = 4096;
    t166->dim[1] = 4096;
    t166->name = "blk.18.attn_k.weight";
    model->layers[18].wk = t166;
    struct csinn_tensor *t167 = csinn_alloc_tensor(NULL);
    t167->data = base + 0x1d5894000;
    t167->dtype = CSINN_DTYPE_FLOAT16;
    t167->dim_count = 2;
    t167->dim[0] = 4096;
    t167->dim[1] = 4096;
    t167->name = "blk.18.attn_v.weight";
    model->layers[18].wv = t167;
    struct csinn_tensor *t168 = csinn_alloc_tensor(NULL);
    t168->data = base + 0x1d7894000;
    t168->dtype = CSINN_DTYPE_FLOAT16;
    t168->dim_count = 2;
    t168->dim[0] = 4096;
    t168->dim[1] = 4096;
    t168->name = "blk.18.attn_output.weight";
    model->layers[18].wo = t168;
    struct csinn_tensor *t169 = csinn_alloc_tensor(NULL);
    t169->data = base + 0x1d9894000;
    t169->dtype = CSINN_DTYPE_FLOAT16;
    t169->dim_count = 2;
    t169->dim[0] = 11008;
    t169->dim[1] = 4096;
    t169->name = "blk.18.ffn_gate.weight";
    model->layers[18].w1 = t169;
    struct csinn_tensor *t170 = csinn_alloc_tensor(NULL);
    t170->data = base + 0x1dee94000;
    t170->dtype = CSINN_DTYPE_FLOAT16;
    t170->dim_count = 2;
    t170->dim[0] = 4096;
    t170->dim[1] = 11008;
    t170->name = "blk.18.ffn_down.weight";
    model->layers[18].w2 = t170;
    struct csinn_tensor *t171 = csinn_alloc_tensor(NULL);
    t171->data = base + 0x1e4494000;
    t171->dtype = CSINN_DTYPE_FLOAT16;
    t171->dim_count = 2;
    t171->dim[0] = 11008;
    t171->dim[1] = 4096;
    t171->name = "blk.18.ffn_up.weight";
    model->layers[18].w3 = t171;
    struct csinn_tensor *t172 = csinn_alloc_tensor(NULL);
    t172->data = base + 0x1e9a94000;
    t172->dtype = CSINN_DTYPE_FLOAT32;
    t172->dim_count = 1;
    t172->dim[0] = 4096;
    t172->name = "blk.18.attn_norm.weight";
    model->layers[18].attn_norm = t172;
    struct csinn_tensor *t173 = csinn_alloc_tensor(NULL);
    t173->data = base + 0x1e9a98000;
    t173->dtype = CSINN_DTYPE_FLOAT32;
    t173->dim_count = 1;
    t173->dim[0] = 4096;
    t173->name = "blk.18.ffn_norm.weight";
    model->layers[18].ffn_norm = t173;
    struct csinn_tensor *t174 = csinn_alloc_tensor(NULL);
    t174->data = base + 0x1e9a9c000;
    t174->dtype = CSINN_DTYPE_FLOAT16;
    t174->dim_count = 2;
    t174->dim[0] = 4096;
    t174->dim[1] = 4096;
    t174->name = "blk.19.attn_q.weight";
    model->layers[19].wq = t174;
    struct csinn_tensor *t175 = csinn_alloc_tensor(NULL);
    t175->data = base + 0x1eba9c000;
    t175->dtype = CSINN_DTYPE_FLOAT16;
    t175->dim_count = 2;
    t175->dim[0] = 4096;
    t175->dim[1] = 4096;
    t175->name = "blk.19.attn_k.weight";
    model->layers[19].wk = t175;
    struct csinn_tensor *t176 = csinn_alloc_tensor(NULL);
    t176->data = base + 0x1eda9c000;
    t176->dtype = CSINN_DTYPE_FLOAT16;
    t176->dim_count = 2;
    t176->dim[0] = 4096;
    t176->dim[1] = 4096;
    t176->name = "blk.19.attn_v.weight";
    model->layers[19].wv = t176;
    struct csinn_tensor *t177 = csinn_alloc_tensor(NULL);
    t177->data = base + 0x1efa9c000;
    t177->dtype = CSINN_DTYPE_FLOAT16;
    t177->dim_count = 2;
    t177->dim[0] = 4096;
    t177->dim[1] = 4096;
    t177->name = "blk.19.attn_output.weight";
    model->layers[19].wo = t177;
    struct csinn_tensor *t178 = csinn_alloc_tensor(NULL);
    t178->data = base + 0x1f1a9c000;
    t178->dtype = CSINN_DTYPE_FLOAT16;
    t178->dim_count = 2;
    t178->dim[0] = 11008;
    t178->dim[1] = 4096;
    t178->name = "blk.19.ffn_gate.weight";
    model->layers[19].w1 = t178;
    struct csinn_tensor *t179 = csinn_alloc_tensor(NULL);
    t179->data = base + 0x1f709c000;
    t179->dtype = CSINN_DTYPE_FLOAT16;
    t179->dim_count = 2;
    t179->dim[0] = 4096;
    t179->dim[1] = 11008;
    t179->name = "blk.19.ffn_down.weight";
    model->layers[19].w2 = t179;
    struct csinn_tensor *t180 = csinn_alloc_tensor(NULL);
    t180->data = base + 0x1fc69c000;
    t180->dtype = CSINN_DTYPE_FLOAT16;
    t180->dim_count = 2;
    t180->dim[0] = 11008;
    t180->dim[1] = 4096;
    t180->name = "blk.19.ffn_up.weight";
    model->layers[19].w3 = t180;
    struct csinn_tensor *t181 = csinn_alloc_tensor(NULL);
    t181->data = base + 0x201c9c000;
    t181->dtype = CSINN_DTYPE_FLOAT32;
    t181->dim_count = 1;
    t181->dim[0] = 4096;
    t181->name = "blk.19.attn_norm.weight";
    model->layers[19].attn_norm = t181;
    struct csinn_tensor *t182 = csinn_alloc_tensor(NULL);
    t182->data = base + 0x201ca0000;
    t182->dtype = CSINN_DTYPE_FLOAT32;
    t182->dim_count = 1;
    t182->dim[0] = 4096;
    t182->name = "blk.19.ffn_norm.weight";
    model->layers[19].ffn_norm = t182;
    struct csinn_tensor *t183 = csinn_alloc_tensor(NULL);
    t183->data = base + 0x201ca4000;
    t183->dtype = CSINN_DTYPE_FLOAT16;
    t183->dim_count = 2;
    t183->dim[0] = 4096;
    t183->dim[1] = 4096;
    t183->name = "blk.20.attn_q.weight";
    model->layers[20].wq = t183;
    struct csinn_tensor *t184 = csinn_alloc_tensor(NULL);
    t184->data = base + 0x203ca4000;
    t184->dtype = CSINN_DTYPE_FLOAT16;
    t184->dim_count = 2;
    t184->dim[0] = 4096;
    t184->dim[1] = 4096;
    t184->name = "blk.20.attn_k.weight";
    model->layers[20].wk = t184;
    struct csinn_tensor *t185 = csinn_alloc_tensor(NULL);
    t185->data = base + 0x205ca4000;
    t185->dtype = CSINN_DTYPE_FLOAT16;
    t185->dim_count = 2;
    t185->dim[0] = 4096;
    t185->dim[1] = 4096;
    t185->name = "blk.20.attn_v.weight";
    model->layers[20].wv = t185;
    struct csinn_tensor *t186 = csinn_alloc_tensor(NULL);
    t186->data = base + 0x207ca4000;
    t186->dtype = CSINN_DTYPE_FLOAT16;
    t186->dim_count = 2;
    t186->dim[0] = 4096;
    t186->dim[1] = 4096;
    t186->name = "blk.20.attn_output.weight";
    model->layers[20].wo = t186;
    struct csinn_tensor *t187 = csinn_alloc_tensor(NULL);
    t187->data = base + 0x209ca4000;
    t187->dtype = CSINN_DTYPE_FLOAT16;
    t187->dim_count = 2;
    t187->dim[0] = 11008;
    t187->dim[1] = 4096;
    t187->name = "blk.20.ffn_gate.weight";
    model->layers[20].w1 = t187;
    struct csinn_tensor *t188 = csinn_alloc_tensor(NULL);
    t188->data = base + 0x20f2a4000;
    t188->dtype = CSINN_DTYPE_FLOAT16;
    t188->dim_count = 2;
    t188->dim[0] = 4096;
    t188->dim[1] = 11008;
    t188->name = "blk.20.ffn_down.weight";
    model->layers[20].w2 = t188;
    struct csinn_tensor *t189 = csinn_alloc_tensor(NULL);
    t189->data = base + 0x2148a4000;
    t189->dtype = CSINN_DTYPE_FLOAT16;
    t189->dim_count = 2;
    t189->dim[0] = 11008;
    t189->dim[1] = 4096;
    t189->name = "blk.20.ffn_up.weight";
    model->layers[20].w3 = t189;
    struct csinn_tensor *t190 = csinn_alloc_tensor(NULL);
    t190->data = base + 0x219ea4000;
    t190->dtype = CSINN_DTYPE_FLOAT32;
    t190->dim_count = 1;
    t190->dim[0] = 4096;
    t190->name = "blk.20.attn_norm.weight";
    model->layers[20].attn_norm = t190;
    struct csinn_tensor *t191 = csinn_alloc_tensor(NULL);
    t191->data = base + 0x219ea8000;
    t191->dtype = CSINN_DTYPE_FLOAT32;
    t191->dim_count = 1;
    t191->dim[0] = 4096;
    t191->name = "blk.20.ffn_norm.weight";
    model->layers[20].ffn_norm = t191;
    struct csinn_tensor *t192 = csinn_alloc_tensor(NULL);
    t192->data = base + 0x219eac000;
    t192->dtype = CSINN_DTYPE_FLOAT16;
    t192->dim_count = 2;
    t192->dim[0] = 4096;
    t192->dim[1] = 4096;
    t192->name = "blk.21.attn_q.weight";
    model->layers[21].wq = t192;
    struct csinn_tensor *t193 = csinn_alloc_tensor(NULL);
    t193->data = base + 0x21beac000;
    t193->dtype = CSINN_DTYPE_FLOAT16;
    t193->dim_count = 2;
    t193->dim[0] = 4096;
    t193->dim[1] = 4096;
    t193->name = "blk.21.attn_k.weight";
    model->layers[21].wk = t193;
    struct csinn_tensor *t194 = csinn_alloc_tensor(NULL);
    t194->data = base + 0x21deac000;
    t194->dtype = CSINN_DTYPE_FLOAT16;
    t194->dim_count = 2;
    t194->dim[0] = 4096;
    t194->dim[1] = 4096;
    t194->name = "blk.21.attn_v.weight";
    model->layers[21].wv = t194;
    struct csinn_tensor *t195 = csinn_alloc_tensor(NULL);
    t195->data = base + 0x21feac000;
    t195->dtype = CSINN_DTYPE_FLOAT16;
    t195->dim_count = 2;
    t195->dim[0] = 4096;
    t195->dim[1] = 4096;
    t195->name = "blk.21.attn_output.weight";
    model->layers[21].wo = t195;
    struct csinn_tensor *t196 = csinn_alloc_tensor(NULL);
    t196->data = base + 0x221eac000;
    t196->dtype = CSINN_DTYPE_FLOAT16;
    t196->dim_count = 2;
    t196->dim[0] = 11008;
    t196->dim[1] = 4096;
    t196->name = "blk.21.ffn_gate.weight";
    model->layers[21].w1 = t196;
    struct csinn_tensor *t197 = csinn_alloc_tensor(NULL);
    t197->data = base + 0x2274ac000;
    t197->dtype = CSINN_DTYPE_FLOAT16;
    t197->dim_count = 2;
    t197->dim[0] = 4096;
    t197->dim[1] = 11008;
    t197->name = "blk.21.ffn_down.weight";
    model->layers[21].w2 = t197;
    struct csinn_tensor *t198 = csinn_alloc_tensor(NULL);
    t198->data = base + 0x22caac000;
    t198->dtype = CSINN_DTYPE_FLOAT16;
    t198->dim_count = 2;
    t198->dim[0] = 11008;
    t198->dim[1] = 4096;
    t198->name = "blk.21.ffn_up.weight";
    model->layers[21].w3 = t198;
    struct csinn_tensor *t199 = csinn_alloc_tensor(NULL);
    t199->data = base + 0x2320ac000;
    t199->dtype = CSINN_DTYPE_FLOAT32;
    t199->dim_count = 1;
    t199->dim[0] = 4096;
    t199->name = "blk.21.attn_norm.weight";
    model->layers[21].attn_norm = t199;
    struct csinn_tensor *t200 = csinn_alloc_tensor(NULL);
    t200->data = base + 0x2320b0000;
    t200->dtype = CSINN_DTYPE_FLOAT32;
    t200->dim_count = 1;
    t200->dim[0] = 4096;
    t200->name = "blk.21.ffn_norm.weight";
    model->layers[21].ffn_norm = t200;
    struct csinn_tensor *t201 = csinn_alloc_tensor(NULL);
    t201->data = base + 0x2320b4000;
    t201->dtype = CSINN_DTYPE_FLOAT16;
    t201->dim_count = 2;
    t201->dim[0] = 4096;
    t201->dim[1] = 4096;
    t201->name = "blk.22.attn_q.weight";
    model->layers[22].wq = t201;
    struct csinn_tensor *t202 = csinn_alloc_tensor(NULL);
    t202->data = base + 0x2340b4000;
    t202->dtype = CSINN_DTYPE_FLOAT16;
    t202->dim_count = 2;
    t202->dim[0] = 4096;
    t202->dim[1] = 4096;
    t202->name = "blk.22.attn_k.weight";
    model->layers[22].wk = t202;
    struct csinn_tensor *t203 = csinn_alloc_tensor(NULL);
    t203->data = base + 0x2360b4000;
    t203->dtype = CSINN_DTYPE_FLOAT16;
    t203->dim_count = 2;
    t203->dim[0] = 4096;
    t203->dim[1] = 4096;
    t203->name = "blk.22.attn_v.weight";
    model->layers[22].wv = t203;
    struct csinn_tensor *t204 = csinn_alloc_tensor(NULL);
    t204->data = base + 0x2380b4000;
    t204->dtype = CSINN_DTYPE_FLOAT16;
    t204->dim_count = 2;
    t204->dim[0] = 4096;
    t204->dim[1] = 4096;
    t204->name = "blk.22.attn_output.weight";
    model->layers[22].wo = t204;
    struct csinn_tensor *t205 = csinn_alloc_tensor(NULL);
    t205->data = base + 0x23a0b4000;
    t205->dtype = CSINN_DTYPE_FLOAT16;
    t205->dim_count = 2;
    t205->dim[0] = 11008;
    t205->dim[1] = 4096;
    t205->name = "blk.22.ffn_gate.weight";
    model->layers[22].w1 = t205;
    struct csinn_tensor *t206 = csinn_alloc_tensor(NULL);
    t206->data = base + 0x23f6b4000;
    t206->dtype = CSINN_DTYPE_FLOAT16;
    t206->dim_count = 2;
    t206->dim[0] = 4096;
    t206->dim[1] = 11008;
    t206->name = "blk.22.ffn_down.weight";
    model->layers[22].w2 = t206;
    struct csinn_tensor *t207 = csinn_alloc_tensor(NULL);
    t207->data = base + 0x244cb4000;
    t207->dtype = CSINN_DTYPE_FLOAT16;
    t207->dim_count = 2;
    t207->dim[0] = 11008;
    t207->dim[1] = 4096;
    t207->name = "blk.22.ffn_up.weight";
    model->layers[22].w3 = t207;
    struct csinn_tensor *t208 = csinn_alloc_tensor(NULL);
    t208->data = base + 0x24a2b4000;
    t208->dtype = CSINN_DTYPE_FLOAT32;
    t208->dim_count = 1;
    t208->dim[0] = 4096;
    t208->name = "blk.22.attn_norm.weight";
    model->layers[22].attn_norm = t208;
    struct csinn_tensor *t209 = csinn_alloc_tensor(NULL);
    t209->data = base + 0x24a2b8000;
    t209->dtype = CSINN_DTYPE_FLOAT32;
    t209->dim_count = 1;
    t209->dim[0] = 4096;
    t209->name = "blk.22.ffn_norm.weight";
    model->layers[22].ffn_norm = t209;
    struct csinn_tensor *t210 = csinn_alloc_tensor(NULL);
    t210->data = base + 0x24a2bc000;
    t210->dtype = CSINN_DTYPE_FLOAT16;
    t210->dim_count = 2;
    t210->dim[0] = 4096;
    t210->dim[1] = 4096;
    t210->name = "blk.23.attn_q.weight";
    model->layers[23].wq = t210;
    struct csinn_tensor *t211 = csinn_alloc_tensor(NULL);
    t211->data = base + 0x24c2bc000;
    t211->dtype = CSINN_DTYPE_FLOAT16;
    t211->dim_count = 2;
    t211->dim[0] = 4096;
    t211->dim[1] = 4096;
    t211->name = "blk.23.attn_k.weight";
    model->layers[23].wk = t211;
    struct csinn_tensor *t212 = csinn_alloc_tensor(NULL);
    t212->data = base + 0x24e2bc000;
    t212->dtype = CSINN_DTYPE_FLOAT16;
    t212->dim_count = 2;
    t212->dim[0] = 4096;
    t212->dim[1] = 4096;
    t212->name = "blk.23.attn_v.weight";
    model->layers[23].wv = t212;
    struct csinn_tensor *t213 = csinn_alloc_tensor(NULL);
    t213->data = base + 0x2502bc000;
    t213->dtype = CSINN_DTYPE_FLOAT16;
    t213->dim_count = 2;
    t213->dim[0] = 4096;
    t213->dim[1] = 4096;
    t213->name = "blk.23.attn_output.weight";
    model->layers[23].wo = t213;
    struct csinn_tensor *t214 = csinn_alloc_tensor(NULL);
    t214->data = base + 0x2522bc000;
    t214->dtype = CSINN_DTYPE_FLOAT16;
    t214->dim_count = 2;
    t214->dim[0] = 11008;
    t214->dim[1] = 4096;
    t214->name = "blk.23.ffn_gate.weight";
    model->layers[23].w1 = t214;
    struct csinn_tensor *t215 = csinn_alloc_tensor(NULL);
    t215->data = base + 0x2578bc000;
    t215->dtype = CSINN_DTYPE_FLOAT16;
    t215->dim_count = 2;
    t215->dim[0] = 4096;
    t215->dim[1] = 11008;
    t215->name = "blk.23.ffn_down.weight";
    model->layers[23].w2 = t215;
    struct csinn_tensor *t216 = csinn_alloc_tensor(NULL);
    t216->data = base + 0x25cebc000;
    t216->dtype = CSINN_DTYPE_FLOAT16;
    t216->dim_count = 2;
    t216->dim[0] = 11008;
    t216->dim[1] = 4096;
    t216->name = "blk.23.ffn_up.weight";
    model->layers[23].w3 = t216;
    struct csinn_tensor *t217 = csinn_alloc_tensor(NULL);
    t217->data = base + 0x2624bc000;
    t217->dtype = CSINN_DTYPE_FLOAT32;
    t217->dim_count = 1;
    t217->dim[0] = 4096;
    t217->name = "blk.23.attn_norm.weight";
    model->layers[23].attn_norm = t217;
    struct csinn_tensor *t218 = csinn_alloc_tensor(NULL);
    t218->data = base + 0x2624c0000;
    t218->dtype = CSINN_DTYPE_FLOAT32;
    t218->dim_count = 1;
    t218->dim[0] = 4096;
    t218->name = "blk.23.ffn_norm.weight";
    model->layers[23].ffn_norm = t218;
    struct csinn_tensor *t219 = csinn_alloc_tensor(NULL);
    t219->data = base + 0x2624c4000;
    t219->dtype = CSINN_DTYPE_FLOAT16;
    t219->dim_count = 2;
    t219->dim[0] = 4096;
    t219->dim[1] = 4096;
    t219->name = "blk.24.attn_q.weight";
    model->layers[24].wq = t219;
    struct csinn_tensor *t220 = csinn_alloc_tensor(NULL);
    t220->data = base + 0x2644c4000;
    t220->dtype = CSINN_DTYPE_FLOAT16;
    t220->dim_count = 2;
    t220->dim[0] = 4096;
    t220->dim[1] = 4096;
    t220->name = "blk.24.attn_k.weight";
    model->layers[24].wk = t220;
    struct csinn_tensor *t221 = csinn_alloc_tensor(NULL);
    t221->data = base + 0x2664c4000;
    t221->dtype = CSINN_DTYPE_FLOAT16;
    t221->dim_count = 2;
    t221->dim[0] = 4096;
    t221->dim[1] = 4096;
    t221->name = "blk.24.attn_v.weight";
    model->layers[24].wv = t221;
    struct csinn_tensor *t222 = csinn_alloc_tensor(NULL);
    t222->data = base + 0x2684c4000;
    t222->dtype = CSINN_DTYPE_FLOAT16;
    t222->dim_count = 2;
    t222->dim[0] = 4096;
    t222->dim[1] = 4096;
    t222->name = "blk.24.attn_output.weight";
    model->layers[24].wo = t222;
    struct csinn_tensor *t223 = csinn_alloc_tensor(NULL);
    t223->data = base + 0x26a4c4000;
    t223->dtype = CSINN_DTYPE_FLOAT16;
    t223->dim_count = 2;
    t223->dim[0] = 11008;
    t223->dim[1] = 4096;
    t223->name = "blk.24.ffn_gate.weight";
    model->layers[24].w1 = t223;
    struct csinn_tensor *t224 = csinn_alloc_tensor(NULL);
    t224->data = base + 0x26fac4000;
    t224->dtype = CSINN_DTYPE_FLOAT16;
    t224->dim_count = 2;
    t224->dim[0] = 4096;
    t224->dim[1] = 11008;
    t224->name = "blk.24.ffn_down.weight";
    model->layers[24].w2 = t224;
    struct csinn_tensor *t225 = csinn_alloc_tensor(NULL);
    t225->data = base + 0x2750c4000;
    t225->dtype = CSINN_DTYPE_FLOAT16;
    t225->dim_count = 2;
    t225->dim[0] = 11008;
    t225->dim[1] = 4096;
    t225->name = "blk.24.ffn_up.weight";
    model->layers[24].w3 = t225;
    struct csinn_tensor *t226 = csinn_alloc_tensor(NULL);
    t226->data = base + 0x27a6c4000;
    t226->dtype = CSINN_DTYPE_FLOAT32;
    t226->dim_count = 1;
    t226->dim[0] = 4096;
    t226->name = "blk.24.attn_norm.weight";
    model->layers[24].attn_norm = t226;
    struct csinn_tensor *t227 = csinn_alloc_tensor(NULL);
    t227->data = base + 0x27a6c8000;
    t227->dtype = CSINN_DTYPE_FLOAT32;
    t227->dim_count = 1;
    t227->dim[0] = 4096;
    t227->name = "blk.24.ffn_norm.weight";
    model->layers[24].ffn_norm = t227;
    struct csinn_tensor *t228 = csinn_alloc_tensor(NULL);
    t228->data = base + 0x27a6cc000;
    t228->dtype = CSINN_DTYPE_FLOAT16;
    t228->dim_count = 2;
    t228->dim[0] = 4096;
    t228->dim[1] = 4096;
    t228->name = "blk.25.attn_q.weight";
    model->layers[25].wq = t228;
    struct csinn_tensor *t229 = csinn_alloc_tensor(NULL);
    t229->data = base + 0x27c6cc000;
    t229->dtype = CSINN_DTYPE_FLOAT16;
    t229->dim_count = 2;
    t229->dim[0] = 4096;
    t229->dim[1] = 4096;
    t229->name = "blk.25.attn_k.weight";
    model->layers[25].wk = t229;
    struct csinn_tensor *t230 = csinn_alloc_tensor(NULL);
    t230->data = base + 0x27e6cc000;
    t230->dtype = CSINN_DTYPE_FLOAT16;
    t230->dim_count = 2;
    t230->dim[0] = 4096;
    t230->dim[1] = 4096;
    t230->name = "blk.25.attn_v.weight";
    model->layers[25].wv = t230;
    struct csinn_tensor *t231 = csinn_alloc_tensor(NULL);
    t231->data = base + 0x2806cc000;
    t231->dtype = CSINN_DTYPE_FLOAT16;
    t231->dim_count = 2;
    t231->dim[0] = 4096;
    t231->dim[1] = 4096;
    t231->name = "blk.25.attn_output.weight";
    model->layers[25].wo = t231;
    struct csinn_tensor *t232 = csinn_alloc_tensor(NULL);
    t232->data = base + 0x2826cc000;
    t232->dtype = CSINN_DTYPE_FLOAT16;
    t232->dim_count = 2;
    t232->dim[0] = 11008;
    t232->dim[1] = 4096;
    t232->name = "blk.25.ffn_gate.weight";
    model->layers[25].w1 = t232;
    struct csinn_tensor *t233 = csinn_alloc_tensor(NULL);
    t233->data = base + 0x287ccc000;
    t233->dtype = CSINN_DTYPE_FLOAT16;
    t233->dim_count = 2;
    t233->dim[0] = 4096;
    t233->dim[1] = 11008;
    t233->name = "blk.25.ffn_down.weight";
    model->layers[25].w2 = t233;
    struct csinn_tensor *t234 = csinn_alloc_tensor(NULL);
    t234->data = base + 0x28d2cc000;
    t234->dtype = CSINN_DTYPE_FLOAT16;
    t234->dim_count = 2;
    t234->dim[0] = 11008;
    t234->dim[1] = 4096;
    t234->name = "blk.25.ffn_up.weight";
    model->layers[25].w3 = t234;
    struct csinn_tensor *t235 = csinn_alloc_tensor(NULL);
    t235->data = base + 0x2928cc000;
    t235->dtype = CSINN_DTYPE_FLOAT32;
    t235->dim_count = 1;
    t235->dim[0] = 4096;
    t235->name = "blk.25.attn_norm.weight";
    model->layers[25].attn_norm = t235;
    struct csinn_tensor *t236 = csinn_alloc_tensor(NULL);
    t236->data = base + 0x2928d0000;
    t236->dtype = CSINN_DTYPE_FLOAT32;
    t236->dim_count = 1;
    t236->dim[0] = 4096;
    t236->name = "blk.25.ffn_norm.weight";
    model->layers[25].ffn_norm = t236;
    struct csinn_tensor *t237 = csinn_alloc_tensor(NULL);
    t237->data = base + 0x2928d4000;
    t237->dtype = CSINN_DTYPE_FLOAT16;
    t237->dim_count = 2;
    t237->dim[0] = 4096;
    t237->dim[1] = 4096;
    t237->name = "blk.26.attn_q.weight";
    model->layers[26].wq = t237;
    struct csinn_tensor *t238 = csinn_alloc_tensor(NULL);
    t238->data = base + 0x2948d4000;
    t238->dtype = CSINN_DTYPE_FLOAT16;
    t238->dim_count = 2;
    t238->dim[0] = 4096;
    t238->dim[1] = 4096;
    t238->name = "blk.26.attn_k.weight";
    model->layers[26].wk = t238;
    struct csinn_tensor *t239 = csinn_alloc_tensor(NULL);
    t239->data = base + 0x2968d4000;
    t239->dtype = CSINN_DTYPE_FLOAT16;
    t239->dim_count = 2;
    t239->dim[0] = 4096;
    t239->dim[1] = 4096;
    t239->name = "blk.26.attn_v.weight";
    model->layers[26].wv = t239;
    struct csinn_tensor *t240 = csinn_alloc_tensor(NULL);
    t240->data = base + 0x2988d4000;
    t240->dtype = CSINN_DTYPE_FLOAT16;
    t240->dim_count = 2;
    t240->dim[0] = 4096;
    t240->dim[1] = 4096;
    t240->name = "blk.26.attn_output.weight";
    model->layers[26].wo = t240;
    struct csinn_tensor *t241 = csinn_alloc_tensor(NULL);
    t241->data = base + 0x29a8d4000;
    t241->dtype = CSINN_DTYPE_FLOAT16;
    t241->dim_count = 2;
    t241->dim[0] = 11008;
    t241->dim[1] = 4096;
    t241->name = "blk.26.ffn_gate.weight";
    model->layers[26].w1 = t241;
    struct csinn_tensor *t242 = csinn_alloc_tensor(NULL);
    t242->data = base + 0x29fed4000;
    t242->dtype = CSINN_DTYPE_FLOAT16;
    t242->dim_count = 2;
    t242->dim[0] = 4096;
    t242->dim[1] = 11008;
    t242->name = "blk.26.ffn_down.weight";
    model->layers[26].w2 = t242;
    struct csinn_tensor *t243 = csinn_alloc_tensor(NULL);
    t243->data = base + 0x2a54d4000;
    t243->dtype = CSINN_DTYPE_FLOAT16;
    t243->dim_count = 2;
    t243->dim[0] = 11008;
    t243->dim[1] = 4096;
    t243->name = "blk.26.ffn_up.weight";
    model->layers[26].w3 = t243;
    struct csinn_tensor *t244 = csinn_alloc_tensor(NULL);
    t244->data = base + 0x2aaad4000;
    t244->dtype = CSINN_DTYPE_FLOAT32;
    t244->dim_count = 1;
    t244->dim[0] = 4096;
    t244->name = "blk.26.attn_norm.weight";
    model->layers[26].attn_norm = t244;
    struct csinn_tensor *t245 = csinn_alloc_tensor(NULL);
    t245->data = base + 0x2aaad8000;
    t245->dtype = CSINN_DTYPE_FLOAT32;
    t245->dim_count = 1;
    t245->dim[0] = 4096;
    t245->name = "blk.26.ffn_norm.weight";
    model->layers[26].ffn_norm = t245;
    struct csinn_tensor *t246 = csinn_alloc_tensor(NULL);
    t246->data = base + 0x2aaadc000;
    t246->dtype = CSINN_DTYPE_FLOAT16;
    t246->dim_count = 2;
    t246->dim[0] = 4096;
    t246->dim[1] = 4096;
    t246->name = "blk.27.attn_q.weight";
    model->layers[27].wq = t246;
    struct csinn_tensor *t247 = csinn_alloc_tensor(NULL);
    t247->data = base + 0x2acadc000;
    t247->dtype = CSINN_DTYPE_FLOAT16;
    t247->dim_count = 2;
    t247->dim[0] = 4096;
    t247->dim[1] = 4096;
    t247->name = "blk.27.attn_k.weight";
    model->layers[27].wk = t247;
    struct csinn_tensor *t248 = csinn_alloc_tensor(NULL);
    t248->data = base + 0x2aeadc000;
    t248->dtype = CSINN_DTYPE_FLOAT16;
    t248->dim_count = 2;
    t248->dim[0] = 4096;
    t248->dim[1] = 4096;
    t248->name = "blk.27.attn_v.weight";
    model->layers[27].wv = t248;
    struct csinn_tensor *t249 = csinn_alloc_tensor(NULL);
    t249->data = base + 0x2b0adc000;
    t249->dtype = CSINN_DTYPE_FLOAT16;
    t249->dim_count = 2;
    t249->dim[0] = 4096;
    t249->dim[1] = 4096;
    t249->name = "blk.27.attn_output.weight";
    model->layers[27].wo = t249;
    struct csinn_tensor *t250 = csinn_alloc_tensor(NULL);
    t250->data = base + 0x2b2adc000;
    t250->dtype = CSINN_DTYPE_FLOAT16;
    t250->dim_count = 2;
    t250->dim[0] = 11008;
    t250->dim[1] = 4096;
    t250->name = "blk.27.ffn_gate.weight";
    model->layers[27].w1 = t250;
    struct csinn_tensor *t251 = csinn_alloc_tensor(NULL);
    t251->data = base + 0x2b80dc000;
    t251->dtype = CSINN_DTYPE_FLOAT16;
    t251->dim_count = 2;
    t251->dim[0] = 4096;
    t251->dim[1] = 11008;
    t251->name = "blk.27.ffn_down.weight";
    model->layers[27].w2 = t251;
    struct csinn_tensor *t252 = csinn_alloc_tensor(NULL);
    t252->data = base + 0x2bd6dc000;
    t252->dtype = CSINN_DTYPE_FLOAT16;
    t252->dim_count = 2;
    t252->dim[0] = 11008;
    t252->dim[1] = 4096;
    t252->name = "blk.27.ffn_up.weight";
    model->layers[27].w3 = t252;
    struct csinn_tensor *t253 = csinn_alloc_tensor(NULL);
    t253->data = base + 0x2c2cdc000;
    t253->dtype = CSINN_DTYPE_FLOAT32;
    t253->dim_count = 1;
    t253->dim[0] = 4096;
    t253->name = "blk.27.attn_norm.weight";
    model->layers[27].attn_norm = t253;
    struct csinn_tensor *t254 = csinn_alloc_tensor(NULL);
    t254->data = base + 0x2c2ce0000;
    t254->dtype = CSINN_DTYPE_FLOAT32;
    t254->dim_count = 1;
    t254->dim[0] = 4096;
    t254->name = "blk.27.ffn_norm.weight";
    model->layers[27].ffn_norm = t254;
    struct csinn_tensor *t255 = csinn_alloc_tensor(NULL);
    t255->data = base + 0x2c2ce4000;
    t255->dtype = CSINN_DTYPE_FLOAT16;
    t255->dim_count = 2;
    t255->dim[0] = 4096;
    t255->dim[1] = 4096;
    t255->name = "blk.28.attn_q.weight";
    model->layers[28].wq = t255;
    struct csinn_tensor *t256 = csinn_alloc_tensor(NULL);
    t256->data = base + 0x2c4ce4000;
    t256->dtype = CSINN_DTYPE_FLOAT16;
    t256->dim_count = 2;
    t256->dim[0] = 4096;
    t256->dim[1] = 4096;
    t256->name = "blk.28.attn_k.weight";
    model->layers[28].wk = t256;
    struct csinn_tensor *t257 = csinn_alloc_tensor(NULL);
    t257->data = base + 0x2c6ce4000;
    t257->dtype = CSINN_DTYPE_FLOAT16;
    t257->dim_count = 2;
    t257->dim[0] = 4096;
    t257->dim[1] = 4096;
    t257->name = "blk.28.attn_v.weight";
    model->layers[28].wv = t257;
    struct csinn_tensor *t258 = csinn_alloc_tensor(NULL);
    t258->data = base + 0x2c8ce4000;
    t258->dtype = CSINN_DTYPE_FLOAT16;
    t258->dim_count = 2;
    t258->dim[0] = 4096;
    t258->dim[1] = 4096;
    t258->name = "blk.28.attn_output.weight";
    model->layers[28].wo = t258;
    struct csinn_tensor *t259 = csinn_alloc_tensor(NULL);
    t259->data = base + 0x2cace4000;
    t259->dtype = CSINN_DTYPE_FLOAT16;
    t259->dim_count = 2;
    t259->dim[0] = 11008;
    t259->dim[1] = 4096;
    t259->name = "blk.28.ffn_gate.weight";
    model->layers[28].w1 = t259;
    struct csinn_tensor *t260 = csinn_alloc_tensor(NULL);
    t260->data = base + 0x2d02e4000;
    t260->dtype = CSINN_DTYPE_FLOAT16;
    t260->dim_count = 2;
    t260->dim[0] = 4096;
    t260->dim[1] = 11008;
    t260->name = "blk.28.ffn_down.weight";
    model->layers[28].w2 = t260;
    struct csinn_tensor *t261 = csinn_alloc_tensor(NULL);
    t261->data = base + 0x2d58e4000;
    t261->dtype = CSINN_DTYPE_FLOAT16;
    t261->dim_count = 2;
    t261->dim[0] = 11008;
    t261->dim[1] = 4096;
    t261->name = "blk.28.ffn_up.weight";
    model->layers[28].w3 = t261;
    struct csinn_tensor *t262 = csinn_alloc_tensor(NULL);
    t262->data = base + 0x2daee4000;
    t262->dtype = CSINN_DTYPE_FLOAT32;
    t262->dim_count = 1;
    t262->dim[0] = 4096;
    t262->name = "blk.28.attn_norm.weight";
    model->layers[28].attn_norm = t262;
    struct csinn_tensor *t263 = csinn_alloc_tensor(NULL);
    t263->data = base + 0x2daee8000;
    t263->dtype = CSINN_DTYPE_FLOAT32;
    t263->dim_count = 1;
    t263->dim[0] = 4096;
    t263->name = "blk.28.ffn_norm.weight";
    model->layers[28].ffn_norm = t263;
    struct csinn_tensor *t264 = csinn_alloc_tensor(NULL);
    t264->data = base + 0x2daeec000;
    t264->dtype = CSINN_DTYPE_FLOAT16;
    t264->dim_count = 2;
    t264->dim[0] = 4096;
    t264->dim[1] = 4096;
    t264->name = "blk.29.attn_q.weight";
    model->layers[29].wq = t264;
    struct csinn_tensor *t265 = csinn_alloc_tensor(NULL);
    t265->data = base + 0x2dceec000;
    t265->dtype = CSINN_DTYPE_FLOAT16;
    t265->dim_count = 2;
    t265->dim[0] = 4096;
    t265->dim[1] = 4096;
    t265->name = "blk.29.attn_k.weight";
    model->layers[29].wk = t265;
    struct csinn_tensor *t266 = csinn_alloc_tensor(NULL);
    t266->data = base + 0x2deeec000;
    t266->dtype = CSINN_DTYPE_FLOAT16;
    t266->dim_count = 2;
    t266->dim[0] = 4096;
    t266->dim[1] = 4096;
    t266->name = "blk.29.attn_v.weight";
    model->layers[29].wv = t266;
    struct csinn_tensor *t267 = csinn_alloc_tensor(NULL);
    t267->data = base + 0x2e0eec000;
    t267->dtype = CSINN_DTYPE_FLOAT16;
    t267->dim_count = 2;
    t267->dim[0] = 4096;
    t267->dim[1] = 4096;
    t267->name = "blk.29.attn_output.weight";
    model->layers[29].wo = t267;
    struct csinn_tensor *t268 = csinn_alloc_tensor(NULL);
    t268->data = base + 0x2e2eec000;
    t268->dtype = CSINN_DTYPE_FLOAT16;
    t268->dim_count = 2;
    t268->dim[0] = 11008;
    t268->dim[1] = 4096;
    t268->name = "blk.29.ffn_gate.weight";
    model->layers[29].w1 = t268;
    struct csinn_tensor *t269 = csinn_alloc_tensor(NULL);
    t269->data = base + 0x2e84ec000;
    t269->dtype = CSINN_DTYPE_FLOAT16;
    t269->dim_count = 2;
    t269->dim[0] = 4096;
    t269->dim[1] = 11008;
    t269->name = "blk.29.ffn_down.weight";
    model->layers[29].w2 = t269;
    struct csinn_tensor *t270 = csinn_alloc_tensor(NULL);
    t270->data = base + 0x2edaec000;
    t270->dtype = CSINN_DTYPE_FLOAT16;
    t270->dim_count = 2;
    t270->dim[0] = 11008;
    t270->dim[1] = 4096;
    t270->name = "blk.29.ffn_up.weight";
    model->layers[29].w3 = t270;
    struct csinn_tensor *t271 = csinn_alloc_tensor(NULL);
    t271->data = base + 0x2f30ec000;
    t271->dtype = CSINN_DTYPE_FLOAT32;
    t271->dim_count = 1;
    t271->dim[0] = 4096;
    t271->name = "blk.29.attn_norm.weight";
    model->layers[29].attn_norm = t271;
    struct csinn_tensor *t272 = csinn_alloc_tensor(NULL);
    t272->data = base + 0x2f30f0000;
    t272->dtype = CSINN_DTYPE_FLOAT32;
    t272->dim_count = 1;
    t272->dim[0] = 4096;
    t272->name = "blk.29.ffn_norm.weight";
    model->layers[29].ffn_norm = t272;
    struct csinn_tensor *t273 = csinn_alloc_tensor(NULL);
    t273->data = base + 0x2f30f4000;
    t273->dtype = CSINN_DTYPE_FLOAT16;
    t273->dim_count = 2;
    t273->dim[0] = 4096;
    t273->dim[1] = 4096;
    t273->name = "blk.30.attn_q.weight";
    model->layers[30].wq = t273;
    struct csinn_tensor *t274 = csinn_alloc_tensor(NULL);
    t274->data = base + 0x2f50f4000;
    t274->dtype = CSINN_DTYPE_FLOAT16;
    t274->dim_count = 2;
    t274->dim[0] = 4096;
    t274->dim[1] = 4096;
    t274->name = "blk.30.attn_k.weight";
    model->layers[30].wk = t274;
    struct csinn_tensor *t275 = csinn_alloc_tensor(NULL);
    t275->data = base + 0x2f70f4000;
    t275->dtype = CSINN_DTYPE_FLOAT16;
    t275->dim_count = 2;
    t275->dim[0] = 4096;
    t275->dim[1] = 4096;
    t275->name = "blk.30.attn_v.weight";
    model->layers[30].wv = t275;
    struct csinn_tensor *t276 = csinn_alloc_tensor(NULL);
    t276->data = base + 0x2f90f4000;
    t276->dtype = CSINN_DTYPE_FLOAT16;
    t276->dim_count = 2;
    t276->dim[0] = 4096;
    t276->dim[1] = 4096;
    t276->name = "blk.30.attn_output.weight";
    model->layers[30].wo = t276;
    struct csinn_tensor *t277 = csinn_alloc_tensor(NULL);
    t277->data = base + 0x2fb0f4000;
    t277->dtype = CSINN_DTYPE_FLOAT16;
    t277->dim_count = 2;
    t277->dim[0] = 11008;
    t277->dim[1] = 4096;
    t277->name = "blk.30.ffn_gate.weight";
    model->layers[30].w1 = t277;
    struct csinn_tensor *t278 = csinn_alloc_tensor(NULL);
    t278->data = base + 0x3006f4000;
    t278->dtype = CSINN_DTYPE_FLOAT16;
    t278->dim_count = 2;
    t278->dim[0] = 4096;
    t278->dim[1] = 11008;
    t278->name = "blk.30.ffn_down.weight";
    model->layers[30].w2 = t278;
    struct csinn_tensor *t279 = csinn_alloc_tensor(NULL);
    t279->data = base + 0x305cf4000;
    t279->dtype = CSINN_DTYPE_FLOAT16;
    t279->dim_count = 2;
    t279->dim[0] = 11008;
    t279->dim[1] = 4096;
    t279->name = "blk.30.ffn_up.weight";
    model->layers[30].w3 = t279;
    struct csinn_tensor *t280 = csinn_alloc_tensor(NULL);
    t280->data = base + 0x30b2f4000;
    t280->dtype = CSINN_DTYPE_FLOAT32;
    t280->dim_count = 1;
    t280->dim[0] = 4096;
    t280->name = "blk.30.attn_norm.weight";
    model->layers[30].attn_norm = t280;
    struct csinn_tensor *t281 = csinn_alloc_tensor(NULL);
    t281->data = base + 0x30b2f8000;
    t281->dtype = CSINN_DTYPE_FLOAT32;
    t281->dim_count = 1;
    t281->dim[0] = 4096;
    t281->name = "blk.30.ffn_norm.weight";
    model->layers[30].ffn_norm = t281;
    struct csinn_tensor *t282 = csinn_alloc_tensor(NULL);
    t282->data = base + 0x30b2fc000;
    t282->dtype = CSINN_DTYPE_FLOAT16;
    t282->dim_count = 2;
    t282->dim[0] = 4096;
    t282->dim[1] = 4096;
    t282->name = "blk.31.attn_q.weight";
    model->layers[31].wq = t282;
    struct csinn_tensor *t283 = csinn_alloc_tensor(NULL);
    t283->data = base + 0x30d2fc000;
    t283->dtype = CSINN_DTYPE_FLOAT16;
    t283->dim_count = 2;
    t283->dim[0] = 4096;
    t283->dim[1] = 4096;
    t283->name = "blk.31.attn_k.weight";
    model->layers[31].wk = t283;
    struct csinn_tensor *t284 = csinn_alloc_tensor(NULL);
    t284->data = base + 0x30f2fc000;
    t284->dtype = CSINN_DTYPE_FLOAT16;
    t284->dim_count = 2;
    t284->dim[0] = 4096;
    t284->dim[1] = 4096;
    t284->name = "blk.31.attn_v.weight";
    model->layers[31].wv = t284;
    struct csinn_tensor *t285 = csinn_alloc_tensor(NULL);
    t285->data = base + 0x3112fc000;
    t285->dtype = CSINN_DTYPE_FLOAT16;
    t285->dim_count = 2;
    t285->dim[0] = 4096;
    t285->dim[1] = 4096;
    t285->name = "blk.31.attn_output.weight";
    model->layers[31].wo = t285;
    struct csinn_tensor *t286 = csinn_alloc_tensor(NULL);
    t286->data = base + 0x3132fc000;
    t286->dtype = CSINN_DTYPE_FLOAT16;
    t286->dim_count = 2;
    t286->dim[0] = 11008;
    t286->dim[1] = 4096;
    t286->name = "blk.31.ffn_gate.weight";
    model->layers[31].w1 = t286;
    struct csinn_tensor *t287 = csinn_alloc_tensor(NULL);
    t287->data = base + 0x3188fc000;
    t287->dtype = CSINN_DTYPE_FLOAT16;
    t287->dim_count = 2;
    t287->dim[0] = 4096;
    t287->dim[1] = 11008;
    t287->name = "blk.31.ffn_down.weight";
    model->layers[31].w2 = t287;
    struct csinn_tensor *t288 = csinn_alloc_tensor(NULL);
    t288->data = base + 0x31defc000;
    t288->dtype = CSINN_DTYPE_FLOAT16;
    t288->dim_count = 2;
    t288->dim[0] = 11008;
    t288->dim[1] = 4096;
    t288->name = "blk.31.ffn_up.weight";
    model->layers[31].w3 = t288;
    struct csinn_tensor *t289 = csinn_alloc_tensor(NULL);
    t289->data = base + 0x3234fc000;
    t289->dtype = CSINN_DTYPE_FLOAT32;
    t289->dim_count = 1;
    t289->dim[0] = 4096;
    t289->name = "blk.31.attn_norm.weight";
    model->layers[31].attn_norm = t289;
    struct csinn_tensor *t290 = csinn_alloc_tensor(NULL);
    t290->data = base + 0x323500000;
    t290->dtype = CSINN_DTYPE_FLOAT32;
    t290->dim_count = 1;
    t290->dim[0] = 4096;
    t290->name = "blk.31.ffn_norm.weight";
    model->layers[31].ffn_norm = t290;
    return model;
}
