#if(1)
#include <Log/output.h>
#include <Classifier/torch.h>

#include "opencv2/opencv.hpp"   

using namespace Eigen;
using namespace cv;
using namespace std;


CpuTensor4d::CpuTensor4d(const vector<int>& _shape)
{
    //data.resize(shape[0]);
    memcpy(shape, _shape.data(), sizeof(shape));
    data = MatrixXf(shape[0] * shape[1] * shape[2] * shape[3],1);
}

CpuTensor4d::CpuTensor4d(CpuTensor4d&& t)noexcept
{
    //cout << " && " << endl;
    memcpy(shape, t.shape, sizeof(shape));
    data = move(t.data);
}

CpuTensor4d& CpuTensor4d::operator=(CpuTensor4d&& t)noexcept
{
    //cout << " &&= " << endl;
    data = move(t.data);
    memcpy(shape, t.shape, sizeof(shape));
    return *this;
}

CpuTensor4d& CpuTensor4d::operator=(const CpuTensor4d& t)
{
    //cout << " &= " << endl;
    data = t.data;
    memcpy(shape, t.shape, sizeof(shape));
    return *this;
}

CpuTensor4d::CpuTensor4d(const CpuTensor4d& t)
{
    //cout << " & " << endl;
    data = t.data;
    memcpy(shape, t.shape, sizeof(shape));
}

CpuTensor4d::CpuTensor4d()
{
    memset(shape, 0, sizeof(int) * 4);
}

Map<MatrixXf> CpuTensor4d::operator()(int dim0, int dim1)
{
    return Map<MatrixXf>(data.data() + dim0* shape[1]*shape[2]*shape[3] + dim1 * shape[2] * shape[3],shape[2], shape[3]);
}

Map<const MatrixXf> CpuTensor4d::operator()(int dim0, int dim1) const
{
    return Map<const MatrixXf>(data.data() + dim0 * shape[1] * shape[2] * shape[3] + dim1 * shape[2] * shape[3], shape[2], shape[3]);
}


vector<int> CpuTensor4d::getShape() const
{
    return vector<int>{shape[0], shape[1], shape[2], shape[3]};
}

MatrixXf Conv2d::conv(const MatrixXf& filter, const MatrixXf& input) const
{
#if(1)
    int result_rows = input.rows() - filter.rows() + 1;
    int result_cols = input.cols() - filter.cols() + 1;
    MatrixXf result(result_rows, result_cols);
    for (int row = 0; row < result_rows; ++row) 
    {
        for (int col = 0; col < result_cols; ++col) 
        {
           /* float val = 0;
            for (int x = 0, xend = filter.cols(); x < xend; x++) 
            {
                for (int y = 0, yend = filter.rows(); y < yend; y++) 
                {
                    val += input(row + x, col + y) * filter(x, y);
                }
            }
            result(row, col) = val;*/
            result(row, col) = (input.block(row, col, filter.rows(), filter.cols()).array() * filter.array()).sum();
        }
    }
    return move(result);
#else
    int result_rows = input.rows() - filter.rows() + 1;
    int result_cols = input.cols() - filter.cols() + 1;
    int input_rows = input.rows();
    int input_cols = input.cols();
    int filter_rows = filter.rows();
    int filter_cols = filter.cols();
    int colsMat_rows = filter_rows * filter_cols; 
    int colsMat_cols = result_rows * result_cols;

    MatrixXf colsMat = MatrixXf(colsMat_rows, colsMat_cols);
    Map<const MatrixXf> colsFilter(filter.data(), filter_rows * filter_cols, 1);
    auto t1 = getTickCount();
    int i, j, offsetsrc, offsetdst;
    int col, n;
    const float* src = input.data();
    float * dst = colsMat.data();
    for (col = 0; col < colsMat_cols; ++col)
    {
        i = col / result_rows;
        j = col % result_rows;
        for (n = 0; n < filter_cols; ++n)
        {
            offsetdst = col * colsMat_rows + n * filter_rows;
            offsetsrc = (i + n) * input_rows + j;
            memcpy(dst+offsetdst, src+offsetsrc, sizeof(float) * filter_rows);
        }
        //for (int row = 0; row < colsMat_rows; ++row)
        //{
        //    int m = row / filter_rows;
        //    int n = row % filter_rows;
        //    //*(colsMat.data() + (col * colsMat_rows + row)) = *(input.data() + ((m+i)*input_rows+(n+j)));

        //}
    }
    auto t2 = getTickCount();
    //cout << colsMat << endl;
    MatrixXf result = colsMat.transpose() * colsFilter;
    result.resize(result_rows, result_cols);
    auto t3 = getTickCount();
    auto dt1 = (double)(t2 - t1) / CLOCKS_PER_SEC;
    auto dt2 = (double)(t3 - t2) / CLOCKS_PER_SEC;

    //LOG("cvt time is " + to_string(dt1) + "ms; multi time is " + to_string(dt1) + "\n");
    return move(result);
#endif
    
}


//void im2col_cpu(float* data_im,
//    int channels, int height, int width,
//    int ksize, int stride, int pad, float* data_col)
//{
//    int c, h, w;
//    int height_col = (height + 2 * pad - ksize) / stride + 1;
//    int width_col = (width + 2 * pad - ksize) / stride + 1;
//
//    int channels_col = channels * ksize * ksize;
//    for (c = 0; c < channels_col; ++c) {
//        int w_offset = c % ksize;
//        int h_offset = (c / ksize) % ksize;
//        int c_im = c / ksize / ksize;
//        for (h = 0; h < height_col; ++h) {
//            for (w = 0; w < width_col; ++w) {
//                int im_row = h_offset + h * stride;
//                int im_col = w_offset + w * stride;
//                int col_index = (c * height_col + h) * width_col + w;
//                data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
//                    im_row, im_col, c_im, pad);
//            }
//        }
//    }
//}

Conv2d::Conv2d(const Tensor4d& w, const Tensor4d& b):conv_w(w), conv_b(b) {}

Tensor4d Conv2d::operator()(const Tensor4d& input) const 
{
#if(1)
    //尺寸检查 略
    const int batch_size = input.shape[0];
    const int output_channels = conv_w.shape[0];
    const int output_rows = input.shape[2] - conv_w.shape[2] + 1;
    const int output_cols = input.shape[3] - conv_w.shape[3] + 1;
    Tensor4d output({ batch_size, output_channels, output_rows, output_cols });

    const int input_channels = input.shape[1];
    const int input_rows = input.shape[2];
    const int input_cols = input.shape[3];

    //int input_channels = conv_w.shape[1];
    const int kernel_rows = conv_w.shape[2];
    const int kernel_cols = conv_w.shape[3];

    const int column_input_cols = output_rows * output_cols;
    const int column_input_rows = kernel_rows * kernel_cols * input_channels;

//#pragma omp parallel for
    for (int batch = 0; batch < batch_size; ++batch)
    {
        //vector<int>times;

       /* const int batch_size = input.shape[0];
        const int output_channels = conv_w.shape[0];
        const int output_rows = input.shape[2] - conv_w.shape[2] + 1;
        const int output_cols = input.shape[3] - conv_w.shape[3] + 1;*/
        
        MatrixXf column_input(column_input_rows, column_input_cols);
        int row,col,chl, kcol;
        const float* src = input.data.data();
        float * dst = column_input.data();

        const int inputoffset0 = batch * input_channels * input_rows * input_cols;
        const int outputoffset0 = batch * output_channels * output_rows * output_cols;

        int srcoffset, dstoffset, dstoffset0, srcoffset0, dstoffset1, srcoffset1;

        //times.push_back(getTickCount());

        for (col = 0; col < output_cols; ++col)
        {
            for (row = 0; row < output_rows; ++row)
            {   
                srcoffset0 = inputoffset0 + row;
                dstoffset0 = (col * output_rows + row) * column_input_rows;
                for (chl = 0; chl < input_channels; ++chl)
                {
                    srcoffset1 = srcoffset0 + chl * input_rows * input_cols;
                    dstoffset1 = dstoffset0 + chl * kernel_rows * kernel_cols;
                    for (kcol = 0; kcol < kernel_cols; ++kcol)
                    {
                        srcoffset = srcoffset1 + (col + kcol) * input_rows;
                        dstoffset = dstoffset1 + kcol * kernel_rows;
                        memcpy(dst + dstoffset, src + srcoffset, sizeof(float) * kernel_rows);
                    }
                }
            }
        }

        //times.push_back(getTickCount());
        
        Map<MatrixXf> rst(output.data.data() + outputoffset0, column_input_cols, output_channels);
        //cout << column_input << endl;
        Map<const MatrixXf> kernel(conv_w.data.data(), column_input_rows, output_channels);
        //cout << kernel << endl;
        rst = column_input.transpose() * kernel;

        //times.push_back(getTickCount());

        for (int i = 0; i < output_channels; ++i)
        {
            rst.col(i) = rst.col(i).array()+ conv_b(0, 0)(i, 0);
        }
       /* 
        for (int i = 1; i < times.size(); ++i)
        {
            auto dt = (times[i] - times[i - 1]) / getTickFrequency() * 1000;
            LOG("Thread is"+to_string(omp_get_thread_num())+" Conv No " + to_string(i) + " time is " + to_string(dt) + "ms\n");
        }*/
    }
    return move(output);
    //return Tensor4d();

#else
        //尺寸检查 略
    const int batch_size = input.shape[0];
    const int out_channels = conv_w.shape[0];
    const int output_rows = input.shape[2] - conv_w.shape[2] + 1;
    const int output_cols = input.shape[3] - conv_w.shape[3] + 1;
    Tensor4d result({ batch_size, out_channels, output_rows, output_cols });
    const int in_channels = input.shape[1];
    const int input_rows = input.shape[2];
    const int input_cols = input.shape[3];
        
        
    int total = batch_size * out_channels;
#pragma omp parallel for num_threads(8)
    for(int idx = 0; idx < total; ++idx)
    {
        int batch = idx / out_channels;
        int out_channel_idx = idx % out_channels;
        Map<MatrixXf>val = result(batch, out_channel_idx);
        //MatrixXf val = result.data[];
        //cout << val << endl;
        float bias = conv_b(0, 0)(out_channel_idx, 0);
        for (int in_channel_idx = 0; in_channel_idx < in_channels; in_channel_idx++) 
        {
            if (in_channel_idx == 0)
            {
                val = conv(conv_w(out_channel_idx, in_channel_idx), input(batch, in_channel_idx));
                continue;
            }
            val += conv(conv_w(out_channel_idx,in_channel_idx), input(batch,in_channel_idx)); // 卷积
        }
        //cout << val << endl;
        val = val.array() + bias;// 加bias            
        //cout << val << endl;        
    }
    return move(result);
#endif
}

MatrixXf Relu::relu(const MatrixXf& input) const
{
    MatrixXf output = input.unaryExpr([](float val) {
        return (val > 0) ? (val) : (0);
        });
    return move(output);
}

Relu::Relu(){}

Tensor4d Relu::operator()(const Tensor4d& input) const 
{
    //int batch_size = input.shape[0];
    //int channels = input.shape[1];

    Tensor4d result(input.getShape());
    result.data = relu(input.data);
    //for (int batch = 0; batch < batch_size; batch++) 
    //{       
    //    for (int channel_idx = 0; channel_idx < channels; channel_idx++) 
    //    {
    //        result(batch, channel_idx) = relu(input(batch,channel_idx));
    //    }        
    //}
    return move(result);
}

Max_pool2d::Max_pool2d(const int kernel_size):kernel_size(kernel_size) {}

Tensor4d Max_pool2d::operator()(const Tensor4d& input)const
{
    int batch_size = input.shape[0];
    int channels = input.shape[1];
    int result_rows = input.shape[2] / kernel_size;
    int result_cols = input.shape[3] / kernel_size;

    Tensor4d result({batch_size, channels, result_rows, result_cols});
    
    for (int batch = 0; batch < batch_size; ++batch) 
    {   
        for (int channel_idx = 0; channel_idx < channels; ++channel_idx) 
        {
            Map<const MatrixXf> mat = input(batch, channel_idx);
            for (int row = 0; row < result_rows; row++) 
            {
                for (int col = 0; col < result_cols; col++) 
                {
                    float max = 0, val;
                    for (int x = 0; x < kernel_size; ++x) 
                    {
                        for (int y = 0; y < kernel_size; ++y) 
                        {         
                            val = mat(row * kernel_size + y, col * kernel_size + x);
                            if (max < val) 
                            {
                                max = val;
                            }
                        }
                    }
                    result(batch, channel_idx)(row, col) = max;
                }
            }
        }
    }
    return move(result);
}

Tensor4d Flatten::operator()(const Tensor4d& input) const 
{
    int batch_size = input.shape[0];
    int channels = input.shape[1];
    int rows = input.shape[2];
    int cols = input.shape[3];
    int result_len = channels * rows * cols;
    int one_channel_len = rows * cols;

    Tensor4d result({1, 1, result_len, batch_size});

    for (int batch = 0; batch < batch_size; batch++) 
    {
        int flatten_idx = 0;
        for (int channel_idx = 0; channel_idx < channels; channel_idx++)
        {            
            for (int row = 0; row < rows; row++)
            {
                for (int col = 0; col < cols; col++)
                {
                    result(0,0)(flatten_idx, batch) = input(batch,channel_idx)(row, col);
                    flatten_idx++;
                }
            }
        }
    }
    return move(result);
}

Flatten::Flatten(){}

Linear::Linear(const Tensor4d & w, const Tensor4d & b) :fc_w(w), fc_b(b) {};

Tensor4d Linear::operator()(const Tensor4d& input)const 
{
    Tensor4d result({1, 1, fc_w.shape[2],input.shape[3]});
    //MatrixXf a =  
    result(0, 0) = fc_w(0, 0) * input(0, 0);
    result(0,0).colwise() += fc_b(0,0).col(0);
    return move(result);
}

Tensor4d NNModule::calculate(const Tensor4d& input) const
{
    Tensor4d output = input;
    for (NNLayer *layer: layers)
    {
        output = (*layer)(output);
        /*cout << output(0,0) << endl;
        cout << "\n\n\n =  ===============================================" << endl;*/
    }    
    return move(output);
}

void NNModule::normalize(const cv::Mat& src, cv::Mat& dst) const
{
    int h0 = height, w0 = width;
    Mat dst_ = Mat::zeros(h0, w0, CV_8UC1);
    double w = src.cols, h = src.rows;
    double kw = w0 / w, kh = h0 / h;
    double k = MIN(kw, kh);
    int w1 = int(w * k), h1 = int(h * k);
    if (w1 > 0 && h1 > 0)
    {   
        Mat tmp;
        resize(src, tmp, Size(w1, h1));
        int sx = int((w0 - w1) / 2), sy = int((h0 - h1) / 2);
        int ex = int(sx + w1), ey = int(sy + h1);
        tmp.copyTo(dst_.rowRange(sy, ey).colRange(sx, ex));
    }
    dst = dst_;
}

bool NNModule::load(const string& modelfile)
{
    auto read_Tensor4d = [&](FileNode& node) 
    {        
        FileNode shape_node = node["shape"];
        FileNode data_node = node["listdata"];
        auto si = shape_node.begin();
        int out_channels = *(si++);
        int in_channels = *(si++);
        int rows = *(si++);
        int cols = *(si++);
        Tensor4d t({out_channels, in_channels, rows, cols});

        auto di = data_node.begin();
        for (int out_channel_idx = 0; out_channel_idx < out_channels; ++out_channel_idx)
        {            
            for (int in_channel_idx = 0; in_channel_idx < in_channels; ++in_channel_idx) 
            {
                Map<MatrixXf> mat = t(out_channel_idx, in_channel_idx);
                for (int row = 0; row < rows; ++row) 
                {
                    for (int col = 0; col < cols; ++col) 
                    {
                        mat(row, col) = (double)(*di++);
                        if (di == node.end()) 
                        {
                            throw "出错了，张量读取出错\n";
                            //出错了，张量读取出错
                        }
                    }
                }               
            }            
        }
        return move(t);
    };

    FileStorage fs(modelfile, FileStorage::READ); //填入读操作
    if (!fs.isOpened())
    {
        cerr << modelfile + "is not opened!!!\n";
        throw runtime_error(modelfile + "is not opened!!!\n");
        return false;
    }
    FileNode size_node = fs["size"];
    auto it = size_node.begin();
    this->height = *(it++);
    this->width = *(it++);
    FileNode model_node = fs["modelinfo"];    
    int dataIdx = 0;  

    for (auto i = model_node.begin(); i != model_node.end(); i++)
    {
        string type = (*i)["type"];
        if (type == "conv2d")
        {           
            string weightparam ="data" + to_string(dataIdx++);
            string biasparam = "data" + to_string(dataIdx++);
            FileNode weight_node = fs[weightparam];
            FileNode bias_node = fs[biasparam];
            Tensor4d w = read_Tensor4d(weight_node);
            Tensor4d b = read_Tensor4d(bias_node);
            NNLayer* p = new Conv2d(w, b);
            layers.emplace_back(p);
        }
        else if (type == "linear")
        {
            string weightparam = "data" + to_string(dataIdx++);
            string biasparam = "data" + to_string(dataIdx++);
            FileNode weight_node = fs[weightparam];
            FileNode bias_node = fs[biasparam];
            Tensor4d w = read_Tensor4d(weight_node);
            Tensor4d b = read_Tensor4d(bias_node);
            NNLayer* p = new Linear(w, b);
            layers.emplace_back(p);
        }
        else if (type == "maxpool2d")
        {
            int kernel_size = (*i)["kernel_size"];
            NNLayer* p = new Max_pool2d(kernel_size);
            layers.emplace_back(p);
        }
        else if (type == "relu")
        {
            NNLayer* p = new Relu();
            layers.emplace_back(p);
        }
        else if (type == "flatten")
        {
            NNLayer* p = new Flatten();
            layers.emplace_back(p);
        }
        else
        {
            //type error
        }
    }

    return true;
}

NNModule::NNModule():height(0),width(0) {};

NNModule::NNModule(const string& modelfile)
{
    load(modelfile);
}

vector<int> NNModule::operator()(const vector<cv::Mat>& images) const
{
    Tensor4d input({ (int)images.size(),1,height, width});
    vector<int>result(images.size());

    for (int idx = 0, end = images.size(); idx < end; ++idx)
    {
        Mat imgNorm;
        MatrixXf gray;

        if (images[idx].channels() != 1)
        {
            throw "input image channels error!\n";
            return result;
        }
        normalize(images[idx], imgNorm);
        cv2eigen(imgNorm, gray);
        gray /= 255.0;
        input(idx, 0) = gray;
    }
   
   //cout << gray << endl;
    Tensor4d output = calculate(input);
    Map<MatrixXf> outmat = output(0, 0);
    float val;
    for (int col = 0,cols = outmat.cols(); col < cols;++col)
    {
        int maxIdx = 0;
        float maxVal = 0;
        
        for (int row = 0, rows = outmat.rows(); row < rows;++row)
        {
            val = outmat(row, col);
            if (val > 0.9 && val > maxVal )
            {
                maxVal = outmat(row, col);
                maxIdx = row;
            }
        }
        result[col] = maxIdx;
    }

    return move(result);
    //result(0,0).maxCoeff(&maxRow, &maxCol);
    ////cout << result(0, 0) << endl;
    //if (result(0,0)(maxRow, maxCol) > 0.9) 
    //{
    //    return maxRow;
    //}
    //else 
    //{
    //    return 0;
    //}
}

int NNModule::operator()(const cv::Mat& image) const
{
    Tensor4d input({ 1,1,height, width });
    
    Mat imgNorm;
    MatrixXf gray;

    if (image.channels() != 1)
    {
        throw "input image channels error!\n";
        return 0;
    }
    normalize(image, imgNorm);
    cv2eigen(imgNorm, gray);
    gray /= 255.0;
    input.data = gray;
    
    //cout << gray << endl;
    Tensor4d output = calculate(input);
    Map<MatrixXf> outmat = output(0, 0);
    //float val;
    Eigen::Index maxRow, maxCol;
    output(0,0).maxCoeff(&maxRow, &maxCol);
    //cout << result(0, 0) << endl;
    if (output(0,0)(maxRow, maxCol) > 0.9) 
    {
        return maxRow;
    }
    else 
    {
        return 0;
    }
}

NNModule::~NNModule()
{
    for (NNLayer* layer : layers)
    {
       delete layer;
    }
}

#endif