#include <math.h>
#include <omp.h>
#include <string.h>
#include <cassert>
#include <sstream>
#include "tensor.h"
using namespace std;

template <class Datatype>
Tensor<Datatype>::Tensor() {}

template <class Datatype>
Tensor<Datatype>::Tensor(size_t len) {
    // len = l;
    if (len <= 0) {
        throw "Illegal data size";
    } else {
        data.reserve(len);
    }
}

// To construct Tensor from a Datatype pointer
template <class Datatype>
Tensor<Datatype>::Tensor(const Datatype* data_ptr, size_t _n, size_t _h, size_t _w, size_t _c) {
    if (_n <= 0 || _h <= 0 || _w <= 0 || _c <= 0) {
        throw "Illegal data size";
    } else {
        n = _n;
        h = _h;
        w = _w;
        c = _c;
        data = vector<Datatype>(data_ptr, data_ptr + n * h * w * c);
    }
}
template <class Datatype>
Tensor<Datatype>::Tensor(size_t _n, size_t _h, size_t _w, size_t _c) {
    if (_n <= 0 || _h <= 0 || _w <= 0 || _c <= 0) {
        throw "Illegal data size";
    } else {
        n = _n;
        h = _h;
        w = _w;
        c = _c;
        data.reserve(n * h * w * c);
    }
}
template <class Datatype>
Tensor<Datatype>::Tensor(size_t _n, size_t _h, size_t _w, size_t _c, Datatype d) {
    if (_n <= 0 || _h <= 0 || _w <= 0 || _c <= 0) {
        throw "Illegal data size";
    } else {
        n = _n;
        h = _h;
        w = _w;
        c = _c;
        data.resize(n * h * w * c, d);
    }
}
template <class Datatype>
Tensor<Datatype>::Tensor(const vector<Datatype>& d, const vector<uint32_t>& shapes)
    : data(d) {
    // this->data = data;
    n = shapes[0];
    h = shapes[1];
    w = shapes[2];
    c = shapes[3];
}

template <class Datatype>
Tensor<Datatype>::~Tensor() {
    std::vector<Datatype>().swap(data);
}
template <class Datatype>
Datatype* Tensor<Datatype>::getData() const {
    return (Datatype*)data.data();
}
template <class Datatype>
Datatype* Tensor<Datatype>::getData() {
    return data.data();
}
template <class Datatype>
std::vector<Datatype>& Tensor<Datatype>::getDataA() {
    return data;
}

template <class Datatype>
void Tensor<Datatype>::setDataA(std::vector<Datatype> d) {
    data = d;
}

template <class Datatype>
void Tensor<Datatype>::getSize(uint32_t* size) const {
    size[0] = n;
    size[1] = h;
    size[2] = w;
    size[3] = c;
}

template <class Datatype>
const std::vector<uint32_t> Tensor<Datatype>::getSize() const {
    return {(uint32_t)n, (uint32_t)h, (uint32_t)w, (uint32_t)c};
}
/*
为tensor扩容，使其能够容纳更多数据
若_n*_h*_w*_c > 现有空间，则扩容
否则，不改变tensor空间大小
*/
template <class Datatype>
void Tensor<Datatype>::setSize(size_t _n, size_t _h, size_t _w, size_t _c) {
    n = _n;
    h = _h;
    w = _w;
    c = _c;
    data.reserve(n * h * w * c);
}

template <class Datatype>
bool Tensor<Datatype>::empty() {
    return data.empty();
}

template <class Datatype>
Tensor<Datatype>& Tensor<Datatype>::operator+(const Tensor& d) {
    if (n != d.n || h != d.h || w != d.w || c != d.c) {
        throw "Tensor Report illegal data size";
    } else {
        int size = n * h * w * c;
#pragma omp parallel for
        for (int i = 0; i < size; i++) {
            data.at(i) += d.data.at(i);
        }
        return *this;
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::operator+(const Tensor& d) const {
    if (n != d.n || h != d.h || w != d.w || c != d.c) {
        throw "Tensor Report illegal data size";
    } else {
        Tensor res(n, h, w, c);
        for (size_t i = 0; i < n * h * w * c; i++) {
            res.data.push_back(this->data.at(i) + d.data.at(i));
        }
        return res;
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::operator-(const Tensor& d) const {
    if (n != d.n || h != d.h || w != d.w || c != d.c) {
        throw "Illegal data size";
    } else {
        Tensor res(n, h, w, c);
        for (size_t i = 0; i < n * h * w * c; i++) {
            res.data.push_back(this->data.at(i) - d.data.at(i));
        }
        return res;
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::operator*(const Tensor& d) const {
    if (n != d.n || h != d.h || w != d.w || c != d.c) {
        throw "Illegal data size";
    } else {
        Tensor res(n, h, w,
                   c);  // entrywise mul (n,h,w,c) * (n,h,w,c) = (n,h,w,c)
        for (size_t i = 0; i < n * h * w * c; i++) {
            res.data.push_back(this->data.at(i) * d.data.at(i));
        }
        return res;
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::operator/(const Tensor& d) const {
    if (n != d.n || h != d.h || w != d.w || c != d.c) {
        throw "Illegal data size";
    } else {
        Tensor res(n, h, w, c);
        for (size_t i = 0; i < n * h * w * c; i++) {
            res.data.push_back(this->data.at(i) / d.data.at(i));
        }
        return res;
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::operator=(const Tensor& d) {
    if (d.n != 0 && d.h != 0 && d.w != 0 && d.c != 0) {
        n = d.n;
        h = d.h;
        w = d.w;
        c = d.c;

        data.resize(n * h * w * c);
        data = d.data;  // Erase origin data
    } else {
        // throw "Illegal data size";
    }
    return *this;
}

template <class Datatype>
const Datatype& Tensor<Datatype>::at(size_t index) const {
    if (index >= data.size() && index < 0) {
        throw "Illegal data index";
    } else {
        return data.at(index);
    }
}

template <class Datatype>
Datatype& Tensor<Datatype>::at(const size_t index) {
    if (index >= data.size() && index < 0) {
        throw "Illegal data index";
    } else {
        return data.at(index);
    }
}

template <class Datatype>
const Tensor<Datatype> Tensor<Datatype>::operator[](size_t i) const {
    try {
        if (n != 1) {
            int b = h * w * c * i;
            Tensor res(&data[b], 1, h, w, c);
            return res;
        }
        if (h != 1) {
            int b = w * c * i;
            Tensor res(&data[b], 1, 1, w, c);
            return res;
        }
        if (w != 1) {
            int b = c * i;
            Tensor res(&data[b], 1, 1, 1, c);
            return res;
        }
        return *this;
    } catch (const char*& exception) {
        throw exception;
    }
}

template <class Datatype>
void Tensor<Datatype>::matMulNew(const Tensor<Datatype>& m1, const Tensor<Datatype>& m2) {
    // Do batch matrix dot multiplication at the last 2 dimensions
    // eg. (1,1,h,p) * (1,1,p,w) = (1,1,h,w)
    if (m1.c != m2.c) {
        throw "MatMul: input matrix size not support.";
    } else {
        auto i = data.end();
        data.insert(i, m1.w * m2.w, 0);
        for (int32_t idh = 0; idh < m1.w; idh++) {
            int32_t m11 = idh * m1.c;
            int32_t resI = idh * m2.w;
            for (int32_t idw = 0; idw < m2.w; idw++) {
                int32_t m22 = idw * m2.c;
                Datatype res = 0;
                for (int32_t p = 0; p < m1.c; p++) {
                    res += m1.data.at(p + m11) * m2.data.at(p + m22);
                }
                *(i + idw + resI) = res;
            }
        }
    }
}

template <class Datatype>
void Tensor<Datatype>::matMulOmp(const Tensor& m1, const Tensor& m2) {
    // Do batch matrix dot multiplication at the last 2 dimensions
    // eg. (1,1,h,p) * (1,1,w,p) = (1,1,h,w)
    if (m1.c != m2.c) {
        throw "MatMul: input matrix size not support.";
    } else {
        auto i = data.end();
        data.insert(i, m1.w * m2.w, 0);
#pragma omp parallel for
        for (size_t idh = 0; idh < m1.w; idh++) {
            int32_t m11 = idh * m1.c;
            int32_t resI = idh * m2.w;
            for (size_t idw = 0; idw < m2.w; idw++) {
                int32_t m22 = idw * m2.c;
                Datatype res = 0;
                for (size_t p = 0; p < m1.c; p++) {
                    res += m1.data.at(p + m11) * m2.data.at(p + m22);
                }
                *(i + idw + resI) = res;
            }
        }
    }
}

template <class Datatype>
void Tensor<Datatype>::matMul(const Tensor& m1, const Tensor& m2) {
    // Do batch matrix dot multiplication at the last 2 dimensions
    // eg. (1,1,h,p) * (1,1,p,w) = (1,1,h,w)
    if (m1.c != m2.c) {
        throw "MatMul: input matrix size not support.";
    } else {
        auto i = data.end();
        data.insert(i, m1.w * m2.w, 0);
        for (size_t idh = 0; idh < m1.w; idh++) {
            int32_t m11 = idh * m1.c;
            int32_t resI = idh * m2.w;
            for (size_t idw = 0; idw < m2.w; idw++) {
                int32_t m22 = idw * m2.c;
                Datatype res = 0;
                for (size_t p = 0; p < m1.c; p++) {
                    res += m1.data.at(p + m11) * m2.data.at(p + m22);
                }
                *(i + idw + resI) = res;
            }
        }
    }
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Mean(DIM dim, bool BroadCast) const {
    // To calculate the mean of data
    // if BroadCast, the dimension dim will be preserved
    // else, the dimension dim will be reduced to 1
    Tensor MeanRes;
    if (dim != DIMC) {
        throw "Reduced mean in DIMN/DIMH/DIMW is not implemented!";
    }

    if (data.empty()) {
        throw "Empty data!";
    } else {
        if (BroadCast == true) {
            MeanRes.n = n;
            MeanRes.h = h;
            MeanRes.w = w;
            MeanRes.c = c;
            MeanRes.data.reserve(n * h * w * c);
        } else {
            MeanRes.n = n;
            MeanRes.h = h;
            MeanRes.w = w;
            MeanRes.c = 1;
            MeanRes.data.reserve(n * h * w * 1);
        }

        for (size_t idn = 0; idn < n; idn++) {
            for (size_t idh = 0; idh < h; idh++) {
                for (size_t idw = 0; idw < w; idw++) {
                    float res = 0;
                    size_t stride = idn * h * w * c + idh * w * c + idw * c;
                    for (size_t idc = 0; idc < c; idc++) {
                        res += data.at(stride + idc);
                    }
                    float mean = res / (float)c;  // mean
                    if (BroadCast == true)        // if BroadCast, output dim is (n,h,w,c)
                    {
                        for (size_t tmpc = 0; tmpc < c; tmpc++) {
                            MeanRes.data.push_back(mean);
                        }
                    } else  // else, output dim is (n,h,w,1)
                    {
                        MeanRes.data.push_back(mean);
                    }
                }
            }
        }
    }

    return MeanRes;
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Pow(int ex) const {
    Tensor res(n, h, w, c);
    size_t len = res.len();
    for (size_t i = 0; i < len; i++) {
        res.data.push_back(pow(this->data.at(i), ex));
    }
    return res;
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Permute(const std::vector<int>& axes) const {
    int shape_len = 4;
    if (axes.size() != (size_t)shape_len) {
        cout << "axes don't match array" << endl;
        throw "AxisError";
    }
    uint32_t shape_temp[4] = {(uint32_t)n, (uint32_t)h, (uint32_t)w, (uint32_t)c};
    uint32_t _n, _h, _w, _c;
    vector<int> strides;
    vector<int> strides_temp;
    strides.assign(shape_len, 1);
    strides_temp.assign(shape_len, 1);
    strides_temp[2] = c * strides_temp[3];
    strides_temp[1] = w * strides_temp[2];
    strides_temp[0] = h * strides_temp[1];
    // transpose
    int j = 0;
    for (int i : axes) {
        if (i < 0 || i >= shape_len) {
            cout << "axis " << i << " is out of bounds for array of dimension " << j
                 << endl;
            throw "AxisError";
        }
        if (shape_temp[i] == (uint32_t)-1) {
            cout << "repeated axis in transpose" << endl;
            throw "ValueError";
        }
        switch (j) {
            case 0:
                _n = shape_temp[i];
                break;
            case 1:
                _h = shape_temp[i];
                break;
            case 2:
                _w = shape_temp[i];
                break;
            case 3:
                _c = shape_temp[i];
                break;
        }
        shape_temp[i] = -1;
        strides[j] = strides_temp[i];
        ++j;
    }

    // contiguous
    int idx = 0;
    int dst_idx = 0;
    int addr[shape_len];
    vector<Datatype> temp_data = data;
    uint32_t shapes[4] = {_n, _h, _w, _c};
    for (int i = 0; i < shape_len; ++i) {
        addr[i] = 0;
    }
    // n*h*w*c loops
    while (addr[0] != (int)n) {
        idx = 0;
        for (int i = 0; i < shape_len; ++i) {
            idx += addr[i] * strides[i];
        }
        temp_data[dst_idx] = data[idx];
        ++dst_idx;
        addr[shape_len - 1]++;
        for (int i = shape_len - 1; i > 0; --i) {
            if (addr[i] == (int)shapes[i]) {
                addr[i] = 0;
                addr[i - 1]++;
            }
        }
    }
    Tensor temp(temp_data, vector<uint32_t>() = {_n, _h, _w, _c});
    return temp;
}

template <class Datatype>
void Tensor<Datatype>::PermuteSelf(const std::vector<int>& axes) {
    size_t shape_len = 4;
    if (axes.size() != shape_len) {
        throw "AxisError";
    }
    int shape_temp[4] = {(int)n, (int)h, (int)w, (int)c};
    int _n, _h, _w, _c;
    vector<int> strides;
    vector<int> strides_temp;
    strides.assign(shape_len, 1);
    strides_temp.assign(shape_len, 1);
    strides_temp[2] = c * strides_temp[3];
    strides_temp[1] = w * strides_temp[2];
    strides_temp[0] = h * strides_temp[1];
    // transpose
    int j = 0;
    for (int i : axes) {
        if (i < 0 || i >= (int)shape_len) {
            cout << "axis " << i << " is out of bounds for array of dimension " << j
                 << endl;
            throw "AxisError";
        }
        if (shape_temp[i] == -1) {
            cout << "repeated axis in transpose" << endl;
            throw "ValueError";
        }
        switch (j) {
            case 0:
                _n = shape_temp[i];
                break;
            case 1:
                _h = shape_temp[i];
                break;
            case 2:
                _w = shape_temp[i];
                break;
            case 3:
                _c = shape_temp[i];
                break;
        }
        shape_temp[i] = -1;
        strides[j] = strides_temp[i];
        ++j;
    }

    // contiguous
    int idx = 0;
    int dst_idx = 0;
    int addr[shape_len];
    vector<Datatype> temp_data = data;
    int shapes[4] = {_n, _h, _w, _c};
    for (size_t i = 0; i < shape_len; ++i) {
        addr[i] = 0;
    }
    // n*h*w*c loops
    while (addr[0] != (int)n) {
        idx = 0;
        for (size_t i = 0; i < shape_len; ++i) {
            idx += addr[i] * strides[i];
        }
        temp_data[dst_idx] = data[idx];
        ++dst_idx;
        addr[shape_len - 1]++;
        for (int i = shape_len - 1; i > 0; --i) {
            if (addr[i] == shapes[i]) {
                addr[i] = 0;
                addr[i - 1]++;
            }
        }
    }
    data.swap(temp_data);
    std::vector<Datatype>().swap(temp_data);
    n = _n;
    h = _h;
    w = _w;
    c = _c;
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Transpose(int dim1, int dim2) {
    int shape_len = 4;
    if (dim1 >= shape_len || dim2 >= shape_len) {
        throw "Axis Error";
    }
    vector<Datatype> permute_axes;
    for (int i = 0; i < shape_len; ++i) {
        permute_axes.push_back(i);
    }
    permute_axes[dim1] = dim2;
    permute_axes[dim2] = dim1;
    return Permute(permute_axes);
}
template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Transpose(int dim1, int dim2) const {
    int shape_len = 4;
    if (dim1 >= shape_len || dim2 >= shape_len) {
        throw "Axis Error";
    }
    vector<Datatype> permute_axes;
    for (Datatype i = 0; i < shape_len; ++i) {
        permute_axes.push_back(i);
    }
    permute_axes[dim1] = dim2;
    permute_axes[dim2] = dim1;
    return Permute(permute_axes);
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Reshape(int _n, int _h, int _w, int _c) const {
    auto res = *this;

    // check input shape
    std::vector<int> shapes = {_n, _h, _w, _c};
    vector<int> unspecified;
    int size = 1;
    size_t oriSize = n * h * w * c;
    for (size_t i = 0; i < shapes.size(); ++i) {
        if (shapes[i] < 0) {
            unspecified.push_back(i);
        } else {
            size *= shapes[i];
        }
    }
    if (!unspecified.empty()) {
        if (unspecified.size() != 1 || oriSize % size) {
            throw ::invalid_argument("NegtiveDimensionError");
        }
        shapes[unspecified[0]] = n * h * w * c / size;
    } else {
        if ((size_t)size != n * h * w * c) {
            throw ::invalid_argument("IllegalShapeError");
        }
    }
    res.n = shapes[0];
    res.h = shapes[1];
    res.w = shapes[2];
    res.c = shapes[3];

    return res;
}
/*
在不改变tensor数据总量和数据排列的情况下，重新设置tensor的各个维度
e.g. 一个tensor尺度为（1，4，6，2）
              0  2  4  6  8  10 | 1  3  5  7  9  11
              12 14 16 18 20 22 | 13 15 17 19 21 23
              24 26 28 30 32 34 | 25 27 29 31 33 35
              36 38 40 42 44 46 | 37 39 41 43 45 47
    reSize（1，4，3，4）
              0  4  8 | 1  5  9 | 2  6  10 | 3  7  11
              12 16 20| 13 17 21| 14 18 22 | 15 19 23
              24 28 32| 25 29 33| 26 30 32 | 27 31 35
              36 30 44| 37 41 45| 38 42 44 | 39 43 47
*/
template <class Datatype>
void Tensor<Datatype>::reSize(size_t nr, size_t hr, size_t wr, size_t cr) {
    if (nr * hr * wr * cr != n * h * w * c) {
        throw "reSize fail";
    }
    n = nr;
    h = hr;
    w = wr;
    c = cr;
}
template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Concat(const Tensor& in, DIM dim) const {
    Tensor res;
    switch (dim) {
        case DIMC: {
            uint32_t in1_size[4];
            uint32_t in2_size[4];
            this->getSize(in1_size);
            in.getSize(in2_size);

            int in1_n = in1_size[0];
            int in1_h = in1_size[1];
            int in1_w = in1_size[2];
            int in1_c = in1_size[3];

            int in2_n = in2_size[0];
            int in2_h = in2_size[1];
            int in2_w = in2_size[2];
            int in2_c = in2_size[3];
            if (in1_n != in2_n || in1_h != in2_h || in1_w != in2_w) {
                throw "Concate sizes do not match!";
            }
            res.setSize(in1_n, in1_h, in1_w, in1_c + in2_c);
            for (int idn = 0; idn < in1_n; idn++) {
                for (int idh = 0; idh < in1_h; idh++) {
                    for (int idw = 0; idw < in1_w; idw++) {
                        // Push back in1 data
                        size_t stride1 = idn * (in1_h * in1_w * in1_c) +
                                         idh * (in1_w * in1_c) + idw * (in1_c);
                        for (int idc = 0; idc < in1_c; idc++) {
                            res.data.push_back(this->data.at(stride1 + idc));
                        }
                        // Push back in2 data
                        size_t stride2 = idn * (in2_h * in2_w * in2_c) +
                                         idh * (in2_w * in2_c) + idw * (in2_c);
                        for (int idc = 0; idc < in2_c; idc++) {
                            res.data.push_back(in.at(stride2 + idc));
                        }
                    }
                }
            }
            break;
        }
        case DIMN: {
            uint32_t in1_size[4];
            uint32_t in2_size[4];
            this->getSize(in1_size);
            in.getSize(in2_size);

            int in1_n = in1_size[0];
            int in1_h = in1_size[1];
            int in1_w = in1_size[2];
            int in1_c = in1_size[3];

            int in2_n = in2_size[0];
            int in2_h = in2_size[1];
            int in2_w = in2_size[2];
            int in2_c = in2_size[3];
            if (in1_h != in2_h || in1_c != in2_c || in1_w != in2_w) {
                throw "Concate sizes do not match!";
            }
            res.setSize(in1_n + in2_n, in1_h, in1_w, in1_c);

            res.data.insert(res.data.end(), this->data.begin(), this->data.end());
            res.data.insert(res.data.end(), in.data.begin(), in.data.end());
            break;
        }
        case DIMH: {
            uint32_t in1_size[4];
            uint32_t in2_size[4];
            this->getSize(in1_size);
            in.getSize(in2_size);

            int in1_n = in1_size[0];
            int in1_h = in1_size[1];
            int in1_w = in1_size[2];
            int in1_c = in1_size[3];

            int in2_n = in2_size[0];
            int in2_h = in2_size[1];
            int in2_w = in2_size[2];
            int in2_c = in2_size[3];
            if (in1_n != in2_n || in1_c != in2_c || in1_w != in2_w) {
                throw "Concate sizes do not match!";
            }
            res.setSize(in1_n, in1_h + in2_h, in1_w, in1_c);
            for (int n = 0; n < in1_n; ++n) {
                res.data.insert(res.data.end(),
                                this->data.begin() + n * in1_h * in1_w * in1_c,
                                this->data.begin() + (n + 1) * in1_h * in1_w * in1_c);
                res.data.insert(res.data.end(),
                                in.getData() + n * in2_h * in2_w * in2_c,
                                in.getData() + (n + 1) * in2_h * in2_w * in2_c);
            }
            // res.data.insert(res.data.end(), this->data.begin(), this->data.end());
            // res.data.insert(res.data.end(), in.getData(), in.getData() + in.len());
            break;
        }
        case DIMW: {
            uint32_t in1_size[4];
            uint32_t in2_size[4];
            this->getSize(in1_size);
            in.getSize(in2_size);

            int in1_n = in1_size[0];
            int in1_h = in1_size[1];
            int in1_w = in1_size[2];
            int in1_c = in1_size[3];

            int in2_n = in2_size[0];
            int in2_h = in2_size[1];
            int in2_w = in2_size[2];
            int in2_c = in2_size[3];
            if (in1_n != in2_n || in1_c != in2_c || in1_h != in2_h) {
                throw "Concate sizes do not match!";
            }
            res.setSize(in1_n, in1_h, in1_w + in2_w, in1_c);
            for (int n = 0; n < in1_n; ++n) {
                for (int h = 0; h < in1_h; ++h) {
                    res.data.insert(res.data.end(),
                                    this->data.begin() + n * h * in1_w * in1_c,
                                    this->data.begin() + n * (h + 1) * in1_w * in1_c);

                    res.data.insert(res.data.end(), in.getData() + n * h * in2_w * in1_c,
                                    in.getData() + n * (h + 1) * in2_w * in1_c);
                }
            }
            // res.data.insert(res.data.end(), this->data.begin(), this->data.end());
            // res.data.insert(res.data.end(), in.getData(), in.getData() + in.len());
            break;
        }
            // throw "Not implemented yet!";
        default:
            throw "Not supported!";
    }
    return res;
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Concat(const Tensor& tensor1, const Tensor& tensor2) const {
    Tensor resultTensor;

    uint32_t tensor1_size[4];
    uint32_t tensor2_size[4];
    tensor1.getSize(tensor1_size);
    tensor2.getSize(tensor2_size);

    int tensor1_n = tensor1_size[0];
    int tensor1_h = tensor1_size[1];
    int tensor1_w = tensor1_size[2];
    int tensor1_c = tensor1_size[3];

    int tensor2_n = tensor2_size[0];
    int tensor2_h = tensor2_size[1];
    int tensor2_w = tensor2_size[2];
    int tensor2_c = tensor2_size[3];

    if (tensor1_n != tensor2_n || tensor1_h != tensor2_h ||
        tensor1_w != tensor2_w) {
        throw "concat error! concat size do not match in n ,h or w! ";
    }

    resultTensor.setSize(tensor1_n, tensor1_h, tensor1_w, tensor1_c + tensor2_c);

    for (size_t idn = 0; idn < tensor1_n; ++idn) {
        for (size_t idh = 0; idh < tensor1_h; ++idh) {
            for (size_t idw = 0; idw < tensor1_w; ++idw) {
                // push back tensor1
                size_t stride1 = idn * (tensor1_h * tensor1_w * tensor1_c) +
                                 idh * (tensor1_w * tensor1_c) + idw * tensor1_c;
                for (size_t idc = 0; idc < tensor1_c; ++idc) {
                    resultTensor.data.push_back(tensor1.data.at(stride1 + idc));
                }
                // push back tensor2
                size_t stride2 = idn * (tensor2_h * tensor2_w * tensor2_c) +
                                 idh * (tensor2_w * tensor2_c) + idw * tensor2_c;
                for (size_t idc = 0; idc < tensor2_c; ++idc) {
                    resultTensor.data.push_back(tensor2.data.at(stride2 + idc));
                }
            }
        }
    }

    return resultTensor;
}

template <class Datatype>
vector<Datatype> Tensor<Datatype>::changeDim() {
    uint32_t size[4];
    this->getSize(size);
    int n = size[0];
    int h = size[1];
    int w = size[2];
    int c = size[3];
    vector<float> result;
    // result.resize(n * w * n * c);

    // h,w,n,c
    for (int idh = 0; idh < h; ++idh) {
        for (int idw = 0; idw < w; ++idw) {
            for (int idn = 0; idn < n; ++idn) {
                int stride = idn * (h * w * c) + idh * (w * c) + idw * c;
                for (int idc = 0; idc < c; ++idc) {
                    result.push_back(this->at(stride + idc));
                }
            }
        }
    }

    return result;
}

template <class Datatype>
vector<Tensor<Datatype>> Tensor<Datatype>::Split(int splitSize) {
    vector<Tensor> result;
    uint32_t in_size[4];
    this->getSize(in_size);

    int n = in_size[0];
    int h = in_size[1];
    int w = in_size[2];
    int c = in_size[3];

    if (c % splitSize != 0) {
        throw "this tensor can not be split at dim c!";
    }
    int num = c / splitSize;
    result.resize(num);
    for (int a = 0; a < num; ++a) {
        result[a].setSize(n, h, w, splitSize);
    }

    int thread = splitSize;
    for (size_t idn = 0; idn < n; ++idn) {
        for (size_t idh = 0; idh < h; ++idh) {
            for (size_t idw = 0; idw < w; ++idw) {
                size_t stride = idn * (h * w * c) + idh * (w * c) + idw * c;
                int count = 0;
                for (size_t idc = 0; idc < c; ++idc) {
                    int index = floor(count / thread);
                    result[index].data.push_back(this->at(stride + idc));
                    count++;
                }
            }
        }
    }

    return result;
}

template <class Datatype>
size_t Tensor<Datatype>::len() const {
    return n * h * w * c;
}

template <class Datatype>
Tensor<Datatype> Tensor<Datatype>::Softmax(DIM dim) const {
    Tensor res(n, h, w, c);  // softmax does not change the dims
    switch (dim) {
        case DIMH:
            throw "Not implemented yet!";
        case DIMW:
            throw "Not implemented yet!";
        case DIMC: {
            for (size_t idn = 0; idn < n; idn++) {
                for (size_t idh = 0; idh < h; idh++) {
                    for (size_t idw = 0; idw < w; idw++) {
                        Datatype e_sum = 0;
                        size_t stride = idn * (h * w * c) + idh * (w * c) + idw * (c);
                        for (size_t idc = 0; idc < c; idc++) {
                            e_sum += exp(this->data[stride + idc]);
                        }
                        for (size_t idc = 0; idc < c; idc++) {
                            if (isnan(exp(this->data.at(stride + idc)) / e_sum)) {
                                std::cout << "Nan exist!" << std::endl;
                            }
                            res.data.push_back(exp(this->data.at(stride + idc)) / e_sum);
                        }
                    }
                }
            }
            break;
        }
        default:
            break;
    }
    return res;
}

template <class Datatype>
void Tensor<Datatype>::fill(Datatype d) {
    // fill the data with n
    if (n <= 0 || h <= 0 || w <= 0 || c <= 0) {
        throw "Illegal data size";
    }
    if (data.size() == 0) {
        data.resize(n * h * w * c, d);
    } else {
        for (Datatype i = 0; i < n * h * w * c; i++) {
            data.at(i) = d;
        }
    }
}

template <class Datatype>
void Tensor<Datatype>::fill(const std::vector<Datatype>& t) {
    if (c != t.size()) {
        throw "fill not completed";
    }
    int size = n * h * w;
#pragma omp parallel for
    for (int i = 0; i < size; i++) {
        data.insert(data.end(), t.begin(), t.end());
    }
}
template <class Datatype>
void Tensor<Datatype>::fill(const Datatype* inData, const unsigned dataLen) {
    if (c != dataLen) {
        throw "fill not completed";
    }
    for (size_t i = 0; i < n * h * w; i++) {
        data.insert(data.end(), inData, inData + dataLen);
    }
}
/*
在矩形四周补零。pU在H维度上补零， pD在H维度下补零，pL在W维度上补零,
pR在W维度上补零. e.g. 一个矩形的尺寸为（5，6）,pU = 1, pD = 2, pL = 3, pR = 4
原始矩形为：
                        ******
                        ******
                        ******
                        ******
                        ******
pad之后的矩形为：
                    0000000000000
                    000******0000
                    000******0000
                    000******0000
                    000******0000
                    000******0000
                    0000000000000
                    0000000000000
*/
template <class Datatype>
Tensor<Datatype>& Tensor<Datatype>::pad(size_t pU, size_t pD, size_t pL, size_t pR) {
    uint32_t hOut = h + pU + pD;
    uint32_t wOut = w + pL + pR;
    vector<Datatype> outFill;
    outFill.reserve(n * hOut * wOut * c);
    for (size_t nLoop = 0; nLoop < n; nLoop++) {
        outFill.insert(outFill.end(), pU * (w + pL + pR) * c, 0);
        for (size_t hLoop = 0; hLoop < h; hLoop++) {
            outFill.insert(outFill.end(), pL * c, 0);
            outFill.insert(outFill.end(),
                           data.begin() + hLoop * c * w + nLoop * h * w * c,
                           data.begin() + (hLoop + 1) * c * w + nLoop * h * w * c);
            outFill.insert(outFill.end(), pR * c, 0);
        }
        outFill.insert(outFill.end(), pD * (w + pL + pR) * c, 0);
    }
    data.swap(outFill);
    outFill.clear();
    vector<Datatype>().swap(outFill);
    h = hOut;
    w = wOut;
    return *this;
}
template <class Datatype>
Tensor<Datatype>& Tensor<Datatype>::im2col(const std::vector<uint32_t>& t, uint32_t sh, uint32_t sw) {
    // Datatype kerCout = t[0];
    uint32_t kerH = t[1];
    uint32_t kerW = t[2];
    uint32_t kerCin = t[3];
    uint32_t strideh = sh;
    uint32_t stridew = sw;
    uint32_t nOut = n;
    uint32_t hOut = (uint32_t)floor((h - kerH) / strideh) + 1;
    uint32_t wOut = (uint32_t)floor((w - kerW) / stridew) + 1;
    uint32_t cOut = kerCin * kerH * kerW;
    vector<Datatype> sort;
    sort.reserve(nOut * hOut * wOut * cOut);
    uint32_t oneKerFloor = kerW * kerCin;  // kernel one floor
    uint32_t Floor = w * kerCin;           // one floor
    for (size_t hcount = 0; hcount + kerH <= h; hcount += strideh) {
        for (size_t wcount = 0; wcount + kerW <= w; wcount += stridew) {
            uint32_t index = wcount * kerCin;
            for (uint32_t i = 0; i < kerH; ++i) {
                auto begin = data.begin() + index + (hcount + i) * Floor;
                sort.insert(sort.end(), begin, begin + oneKerFloor);
            }
        }
    }
    sort.swap(data);
    sort.clear();
    vector<Datatype>().swap(sort);
    h = hOut;
    w = wOut;
    c = cOut;
    return *this;
}

template <class Datatype>
Tensor<Datatype>& Tensor<Datatype>::clip(const Datatype min, const Datatype max) {
    Datatype len = n * h * w * c;
    while (--len) {
        if (data[len] > max) {
            data[len] = max;
        } else if (data[len] < min) {
            data[len] = min;
        }
    }
    return *this;
}
template <class Datatype>
void Tensor<Datatype>::clip(const Tensor& t, const Datatype min, const Datatype max) {
    std::vector<uint32_t> dataSize = t.getSize();
    n = dataSize[0];
    h = dataSize[1];
    w = dataSize[2];
    c = dataSize[3];
    Datatype len = n * h * w * c;
    data.resize(len, min);
    while (--len) {
        if (t.at(len) > max) {
            data.at(len) = max;
        } else if (t.at(len) > min) {
            data.at(len) = t.at(len);
        }
    }
}
template <class Datatype>
Tensor<Datatype>& Tensor<Datatype>::slice(const Tensor& t, uint32_t start) {
    std::vector<uint32_t> inSize = t.getSize();
    n = inSize[0];
    h = inSize[1];
    w = inSize[2];
    c = inSize[3] - start;
    data.reserve(n * h * w * c);

    for (size_t i = 0; i < n * h * w; i++) {
        data.insert(data.end(), t.getData() + i * inSize[3] + start,
                    t.getData() + (i + 1) * inSize[3]);
    }
    return *this;
}

template <class Datatype>
std::string Tensor<Datatype>::toString() {
    std::string res = "";
    res.append(std::to_string(n) + ' ' + std::to_string(h) + ' ' +
               std::to_string(w) + ' ' + std::to_string(c) + '\n');
    for (int i = 0; i < n * h * w; i++) {
        for (int j = 0; j < c; j++) {
            res.append(std::to_string(data.at(j + i * c)) + ' ');
        }
        res.replace(res.rfind(' '), 1, "\n");
    }
    return res;
}

template <class Datatype>
void Tensor<Datatype>::fromString(std::string& content) {
    std::vector<std::string> strVec;
    Stringsplit(content, '\n', strVec);  // 将子串存放到strList中
    std::vector<std::string> sizeVec;
    Stringsplit(strVec[0], ' ', sizeVec);
    int sizeLen = sizeVec.size();
    assert(sizeLen == 4);
    n = atoi(sizeVec[0].c_str());
    h = atoi(sizeVec[1].c_str());
    w = atoi(sizeVec[2].c_str());
    c = atoi(sizeVec[3].c_str());

    assert(strVec.size() == n * h * w + 1);
    data.reserve(n * h * w * c);
    for (int i = 1; i < strVec.size(); i++) {
        std::vector<std::string> dataStr;
        Stringsplit(strVec[i], ' ', dataStr);
        for (int j = 0; j < dataStr.size(); j++) {
            data.push_back(atof(dataStr[j].c_str()));
        }
    }
}

template <class Datatype>
void Tensor<Datatype>::reshape9x9x3() {
    vector<float> inData = data;
    vector<Datatype> dim = {n, h, w, c};
    vector<float> outData;
    int dataIndex = 0;
    vector<vector<vector<vector<float>>>> weight2vec(n);
    vector<vector<vector<vector<float>>>> flatOutData;

    // ori vec
    for (int64_t i = 0; i < n; ++i) {
        std::vector<std::vector<std::vector<float>>> outvec(h);
        for (int64_t j = 0; j < h; ++j) {
            std::vector<std::vector<float>> hvec(w);
            for (int64_t k = 0; k < w; ++k) {
                std::vector<float> wvec(c);
                for (int64_t l = 0; l < c; ++l) {
                    wvec[l] = inData[dataIndex++];
                }
                hvec[k] = wvec;
            }
            outvec[j] = hvec;
        }
        weight2vec[i] = outvec;
    }

    // reshape vec
    for (int64_t cOutIdx = 0; cOutIdx < n; ++cOutIdx) {
        std::vector<std::vector<std::vector<float>>> reshapeVec(3);
        for (int64_t hIdx = 0; hIdx < 3; ++hIdx) {
            std::vector<std::vector<float>> reshapeVecH(3);
            for (int64_t wIdx = 0; wIdx < 3; ++wIdx) {
                vector<float> nCVec;
                for (int64_t cIndex = 0; cIndex < c; ++cIndex) {
                    for (int64_t hIndex = hIdx * 3; hIndex < (hIdx + 1) * 3; ++hIndex) {
                        for (int64_t wIndex = wIdx * 3; wIndex < (wIdx + 1) * 3; ++wIndex) {
                            // 1*1*27
                            if (hIndex >= h || wIndex >= w) {
                                nCVec.push_back(0);
                            } else {
                                nCVec.push_back(weight2vec[cOutIdx][hIndex][wIndex][cIndex]);
                            }
                        }
                    }
                }
                reshapeVecH[wIdx] = nCVec;
            }
            // 3*3*27
            reshapeVec[hIdx] = reshapeVecH;
        }
        flatOutData.push_back(reshapeVec);
    }

    // out tensor
    for (int i = 0; i < n; ++i) {
        for (int j = 0; j < 3; ++j) {
            for (int k = 0; k < 3; ++k) {
                for (int l = 0; l < 27; ++l) {
                    outData.push_back(flatOutData[i][j][k][l]);
                }
            }
        }
    }

    // Tensor result(outData, {n, 3, 3, 27});

    data = outData;
    h = 3;
    w = 3;
    c = 27;

    // return result;
}

template <class Datatype>
void Tensor<Datatype>::reshape6x6x3() {
    vector<float> inData = data;
    vector<Datatype> dim = {n, h, w, c};
    vector<float> outData;
    int dataIndex = 0;
    vector<vector<vector<vector<float>>>> weight2vec(n);

    // ori vector
    for (int64_t i = 0; i < n; ++i) {
        std::vector<std::vector<std::vector<float>>> outvec(h);
        for (int64_t j = 0; j < h; ++j) {
            std::vector<std::vector<float>> hvec(w);
            for (int64_t k = 0; k < w; ++k) {
                std::vector<float> wvec(c);
                for (int64_t l = 0; l < c; ++l) {
                    wvec[l] = inData[dataIndex++];
                }
                hvec[k] = wvec;
            }
            outvec[j] = hvec;
        }
        weight2vec[i] = outvec;
    }

    // reshape vector
    vector<vector<vector<vector<float>>>> flatOutData;
    for (int64_t cOutIdx = 0; cOutIdx < n; ++cOutIdx) {
        vector<vector<vector<float>>> reshapeVec(2);
        for (int64_t hIdx = 0; hIdx < 2; ++hIdx) {
            vector<vector<float>> reshapeVecH(2);
            for (int64_t wIdx = 0; wIdx < 2; ++wIdx) {
                vector<float> nCVec;
                for (int64_t cIndex = 0; cIndex < c; ++cIndex) {
                    for (int64_t hIndex = hIdx * 3; hIndex < (hIdx + 1) * 3; ++hIndex) {
                        for (int64_t wIndex = wIdx * 3; wIndex < (wIdx + 1) * 3; ++wIndex) {
                            if (hIndex >= h || wIndex >= w) {
                                nCVec.push_back(0);
                            } else {
                                nCVec.push_back(weight2vec[cOutIdx][hIndex][wIndex][cIndex]);
                            }
                        }
                    }
                }
                reshapeVecH[wIdx] = nCVec;
            }
            reshapeVec[hIdx] = reshapeVecH;
        }
        flatOutData.push_back(reshapeVec);
    }

    // out tensor
    for (int i = 0; i < n; ++i) {
        for (int j = 0; j < 2; ++j) {
            for (int k = 0; k < 2; ++k) {
                for (int l = 0; l < 27; ++l) {
                    outData.push_back(flatOutData[i][j][k][l]);
                }
            }
        }
    }

    // Tensor result(outData, {n, 2, 2, 27});
    // return result;
    data = outData;
    h = 2;
    w = 2;
    c = 27;
}

template <class Datatype>
void Tensor<Datatype>::reshape3x3x12(int operate) {
    vector<float> inData = data;
    vector<Datatype> dim = {n, h, w, c};
    vector<float> outData;
    int dataIndex = 0;
    vector<vector<vector<vector<float>>>> weight2vec(n);

    // ori vector
    for (int64_t i = 0; i < n; ++i) {
        std::vector<std::vector<std::vector<float>>> outvec(h);
        for (int64_t j = 0; j < h; ++j) {
            std::vector<std::vector<float>> hvec(w);
            for (int64_t k = 0; k < w; ++k) {
                std::vector<float> wvec(c);
                for (int64_t l = 0; l < c; ++l) {
                    wvec[l] = inData[dataIndex++];
                }
                hvec[k] = wvec;
            }
            outvec[j] = hvec;
        }
        weight2vec[i] = outvec;
    }

    if (operate == 1) {
        // slice by n h w c
        for (int i = 0; i < n; ++i) {
            for (int j = 0; j < h; ++j) {
                for (int k = 0; k < w; ++k) {
                    for (int l = 0; l < c; ++l) {
                        outData.push_back(weight2vec[i][j][k][l]);
                    }
                }
            }
        }
    } else if (operate == 2) {
        // slice by n c h w
        for (int i = 0; i < n; ++i) {
            for (int j = 0; j < h; ++j) {
                for (int k = 0; k < w; ++k) {
                    for (int l = 0; l < c; ++l) {
                        outData.push_back(weight2vec[i][l][j][k]);
                    }
                }
            }
        }
    }

    // Tensor result(outData, {n, 1, 1, h * w * c});
    // return result;
    data = outData;
    c = h * w * c;
    h = 1;
    w = 1;
}

//-------------------------------private
/// @brief
/// @param str
/// @param split
/// @param rst
template <class Datatype>
void Tensor<Datatype>::Stringsplit(const std::string& str, const char split, std::vector<std::string>& rst) {
    std::istringstream iss(str);        // 输入流
    std::string token;                  // 接收缓冲区
    while (getline(iss, token, split))  // 以split为分隔符
    {
        rst.push_back(token);  // 输出
    }
}

//----------------------------private
