#include "runtime/c_api.h"
#include "mem/sram.h"
#include "mem/dram.h"
#include "device/conv_path.h"
#include "type.h"
#include <string.h>

inline u64 tensor_size(nne_tensor t){
    return t.dim1 * t.dim2 * t.dim3 * t.dim4;
}

nne_status nne_conv(nne_conv_param conv_param, nne_tensor input_tensor, nne_tensor weight_tensor, nne_tensor bias_tensor, nne_tensor output_tensor)
{    
//    if(tensor_size(input_tensor) + tensor_size(weight_tensor) 
//            + bias_tensor.dim1 * 8 > 8*1024*1024)
//        CHECK_MSG(false, "too large tensor size");
//    auto dram_obj = std::make_shared<Dram>(1024);
//    auto sram_obj = std::make_shared<Sram>(8*1024*1024);
//    auto conv_obj = std::make_shared<ConvPath>(sram_obj, dram_obj);
//    s8* sram_array_ptr = sram_obj->getArrayPointer();
//    conv_obj->ifm_start_L = 0;
//    conv_obj->ifm_start_m = 0;
//    memcpy(sram_array_ptr, input_tensor.tensor_addr, tensor_size(input_tensor));
//    conv_obj->knl_start_L = tensor_size(input_tensor);
//    conv_obj->knl_start_m = 0;
//    memcpy(sram_array_ptr + conv_obj->knl_start_L,
//            weight_tensor.tensor_addr, tensor_size(weight_tensor));
//    conv_obj->ofm_bias_start_L = conv_obj->knl_start_L + tensor_size(weight_tensor);
//    conv_obj->ofm_bias_start_m = 0;
//    u32* scale_bias_start = reinterpret_cast<u32*>(
//            sram_array_ptr + conv_obj->ofm_bias_start_L);
//    for(int i=0; i<bias_tensor.dim1;i++)
//    {
//        scale_bias_start[i*2] = reinterpret_cast<u32*>(bias_tensor.tensor_addr)[i];
//        
//        scale_bias_start[i*2 + 1] = ;
//    }
//
//
//    s32* bias_array_ptr = reinterpret_cast<s32*>(sram_array_ptr + 4*1024*1024);
//    for(int i=0;i<2*ofm_num;i = i+2){
//        bias_array_ptr[i] = (s32)(rand()%2048 -1024);
//        *reinterpret_cast<u32*>(&bias_array_ptr[i+1]) = (u64)1<<24;
//    }
//    s8* output_t = new s8[8*1024*1024];
//    //conv_path_ptr->irq_status = irq_status;
//    //conv_path_ptr->irq_mask = irq_mask;
//    //conv_path_ptr->debug_reg0 = debug_reg0;
//    //conv_path_ptr->cvnn_start = cvnn_start;
//    conv_path_ptr->kernel_stride = kernel_stride;
//    //conv_path_ptr->fc_detail = fc_detail;
//    conv_path_ptr->ifm_width = ifm_width;
//    conv_path_ptr->ifm_height = ifm_height;
//    conv_path_ptr->ofm_width = ofm_width;
//    conv_path_ptr->ofm_height = ofm_height;
//    conv_path_ptr->ifm_num = ifm_num;
//    conv_path_ptr->ofm_num = ofm_num;
//    //conv_path_ptr->fc_input_size = fc_input_size;
//    //conv_path_ptr->fc_output_size = fc_output_size;
//    conv_path_ptr->padding_idc = padding_idc;
//    conv_path_ptr->padding_word = padding_word;
//    conv_path_ptr->padding_size = padding_size;
//    conv_path_ptr->op_type = op_type;
//    conv_path_ptr->conv_detail = conv_detail;
//    //conv_path_ptr->ifm_start_L = ifm_start_addr_l;
//    //conv_path_ptr->ifm_start_m = ifm_start_addr_m;
//    conv_path_ptr->ifm_offset = ifm_offset;
//    ////conv_path_ptr->valid_ifm_offset = valid_ifm_offset;
//    //conv_path_ptr->out_scale = out_scale;
//    //conv_path_ptr->knl_start_L = knl_start_L;
//    //conv_path_ptr->knl_start_m = knl_start_m;
//    conv_path_ptr->knl_offset = kernel_offset;
//    //conv_path_ptr->valid_knl_offset = valid_knl_offset;
//    conv_path_ptr->knl_size = kernel_size;
//    //conv_path_ptr->ofm_bias_start_L = ofm_bias_start_L;
//    //conv_path_ptr->ofm_bias_start_m = ofm_bias_start_m;
//    //conv_path_ptr->valid_bias_offset = valid_bias_offset;
//    //conv_path_ptr->ofm_start_L = ofm_start_L;
//    //conv_path_ptr->ofm_start_m = ofm_start_m;
//    conv_path_ptr->ofm_offset = ofm_offset;
//    conv_path_ptr->out_shift = out_shift;
//    conv_path_ptr->lut_bypass = lut_bypass;
//    conv_path_ptr->ifm_sub_width = ifm_sub_width;
//    conv_path_ptr->ofm_sub_width_flag = ofm_sub_width_flag;
//    conv_path_ptr->ofm_sub_width = ofm_sub_width;
//    //conv_path_ptr->axi_port_base_addr_L[0] = axi_port_base_addr_L[AXI_PORT_NUM];
//    //conv_path_ptr->axi_port_base_addr_m[AXI_PORT_NUM] = axi_port_base_addr_m[AXI_PORT_NUM];
//    conv_path_ptr->ofm_bias_start_L = 4*1024*1024;
//    conv_path_ptr->ofm_bias_start_m = 0;
//    conv_path_ptr->ifm_start_L = 0;
//    conv_path_ptr->ifm_start_m = 0;
//    conv_path_ptr->knl_start_L = 2*1024*1024;
//    conv_path_ptr->knl_start_m = 2*1024*1024;
//    conv_path_ptr->ofm_start_L = 0;
//    conv_path_ptr->ofm_start_m = 0;
//    conv_path_ptr->cvnn_start = true;
//    conv_path_ptr->irq_status = false;
//    conv_path_ptr->Run();
//    dram_ptr->read_mem(output_t,0,ofm_width * ofm_height * ofm_num);
//
//    u32 ifm_w = ifm_width&0xfff;
//
//    dump_to2file("ifm_h1_ref.data","ifm_h2_ref.data",
//            reinterpret_cast<u8*>(sram_array_ptr),ifm_w * ifm_height * ifm_num);
//    dump_to2file("knl_h1_ref.data","knl_h2_ref.data",
//            reinterpret_cast<u8*>(sram_array_ptr + 2*1024*1024),
//            kernel_size * ifm_num * ofm_num
//            );
//    dump_to2file("bias_h1_ref.data","bias_h2_ref.data",
//            reinterpret_cast<u8*>(sram_array_ptr + 4*1024*1024),
//            ofm_num * 8);
//    dump_to1file("ofm_ref.data",reinterpret_cast<u8*>(output_t),ofm_width * ofm_height * ofm_num);
//#ifdef DEBUG
//    dump_to1file("ofm_debug_mac_ref.data", reinterpret_cast<u8*>(conv_path_ptr->debug_arr), conv_path_ptr->debug_arr_size * 4);
//    dump_to1file("ofm_debug_acc_ref.data", reinterpret_cast<u8*>(conv_path_ptr->debug_arr2), ofm_width * ofm_height * ofm_num * 4);
//    dump_to1file("ofm_debug_beforeRelu_ref.data", reinterpret_cast<u8*>(conv_path_ptr->debug_arr3), ofm_width * ofm_height * ofm_num);
//#endif
//    delete conv_path_ptr;
//    delete dram_ptr;
//    delete sram_ptr;
//    delete [] output_t;
//    return;
//
}


nne_status nne_dwconv(nne_conv_param dwconv_param, nne_tensor input_tensor, nne_tensor weight_tensor, nne_tensor bias_tensor, nne_tensor output_tensor)
{

}


nne_status nne_maxpool(nne_pool_param maxpool_param, nne_tensor input_tensor, nne_tensor output_tensor)
{

}


nne_status nne_avepool(nne_pool_param avepool_param, nne_tensor input_tensor, nne_tensor output_tensor)
{

}

nne_status nne_fc(nne_activation_param activation_param, nne_tensor input_tensor, nne_tensor weight_tensor, nne_tensor bias_tensor, nne_tensor output_tensor)
{

}

nne_status nne_ew_add(nne_tensor input_tensor_1, nne_tensor input_tensor_2, nne_tensor output_tensor)
{

}

nne_status nne_ew_mul(nne_tensor input_tensor_1, nne_tensor input_tensor_2, nne_tensor output_tensor)
{

}
