#include "model.h"

nn::nn(){
    this->input_num=225;
    this->h1_num=100;
    this->h2_num=40;
    this->h3_num=10;
    this->output_num=10;
    this->l1=new Linear(this->input_num,this->h1_num);
    this->l2=new Linear(this->h1_num,this->h2_num);
    this->l3=new Linear(this->h2_num,this->h3_num);
    this->l4=new Linear(this->h3_num,this->output_num);
    std::cout<<"网络初始化成功！"<<std::endl;
}
void nn::forward(float* input,int input_num){
    // int input_num=Func::arraylenth(input);
    if(input_num!=this->input_num){
        std::cout<<"输入数据维度为："<<input_num<<",输入数据维度不正确"<<std::endl;
        return;
    }
    this->input=input;
    this->res=this->l1->forward(input,this->input_num);
    this->res1=Func::relu(res,this->h1_num);
    this->res2=this->l2->forward(res1,this->h1_num);
    this->res3=Func::relu(res2,this->h2_num);
    this->res4=this->l3->forward(res3,this->h2_num);
    this->res5=Func::relu(res4,this->h3_num);
    // this->res6=Func::softmax(res5,this->h3_num);
    this->res7=this->l4->forward(res4,this->h3_num);
    this->res8=Func::relu(res7,this->output_num);
}
void nn::backward(float* target,float learning_rate){

    float* gra_los=gradient::gra_loss(this->res8,target,this->output_num);
// tag::taghere("gra_los:",gra_los,this->output_num);
    float* gra_l4_relu=gradient::gra_relu(this->res7,this->output_num,gra_los,this->output_num);
// tag::taghere("taghe",1);
// tag::taghere("gra_l4_relu:",gra_l4_relu,this->output_num);
    this->l4->update_para(this->res7,gra_l4_relu,learning_rate,this->h3_num,this->output_num);
    float* gra_l4=gradient::gra_weight(this->h3_num,this->l4->weight,gra_l4_relu,this->output_num);
    // float* gra_l3_sf=gradient::gra_softmax(gra_l4,this->h3_num);
    float* gra_l3_relu=gradient::gra_relu(this->res4,this->h3_num,gra_l4,this->h3_num);
// tag::taghere("gra_l3_relu:",gra_l3_relu,this->h3_num);
// tag::taghere("taghe",2);
    this->l3->update_para(this->res3,gra_l3_relu,learning_rate,this->h2_num,this->h3_num);
// tag::taghere("taghe",2);
    float* gra_l3=gradient::gra_weight(this->h2_num,this->l3->weight,gra_l3_relu,this->h3_num);
// tag::taghere("taghe",2);
    float* gra_l2_relu=gradient::gra_relu(this->res2,this->h2_num,gra_l3,this->h2_num);

// tag::taghere("taghe",3);
// tag::taghere("gra_l2_relu:",gra_l2_relu,this->h2_num);
    this->l2->update_para(this->res1,gra_l2_relu,learning_rate,this->h1_num,this->h2_num);
    float* gra_l2=gradient::gra_weight(this->h1_num,this->l2->weight,gra_l2_relu,this->h1_num);
    float* gra_l1_relu=gradient::gra_relu(this->res,this->h1_num,gra_l2,this->h1_num);
// tag::taghere("taghe",4);
    this->l1->update_para(this->input,gra_l1_relu,learning_rate,this->input_num,this->h1_num);
    // float* gra_l1=gradient::gra_weight(this->input_num,this->l1->weight,gra_l1_relu);
}