/*
    This file is part of ParTI!.

    ParTI! is free software: you can redistribute it and/or modify
    it under the terms of the GNU Lesser General Public License as
    published by the Free Software Foundation, either version 3 of
    the License, or (at your option) any later version.

    ParTI! is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU Lesser General Public
    License along with ParTI!.
    If not, see <http://www.gnu.org/licenses/>.
*/

#include <ParTI.h>
#include <stdlib.h>
#include "sptensor.h"

/**
 * SPA implementation
 *
 */
int sptSparseTensorMulTensor(sptSparseTensor *Z, sptSparseTensor * const X, sptSparseTensor *const Y, sptIndex num_cmodes, sptIndex * cmodes_X, sptIndex * cmodes_Y)
{
    int result;
    sptIndex nmodes_X = X->nmodes;
    sptIndex nmodes_Y = Y->nmodes;
    sptTimer timer;
    sptNewTimer(&timer, 0);

    if(num_cmodes >= X->nmodes) {
        spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU  SpTns * SpTns", "shape mismatch");
    }
    for(sptIndex m = 0; m < num_cmodes; ++m) {
        if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
            spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU  SpTns * SpTns", "shape mismatch");
        }
    }

    sptStartTimer(timer);
    /// Shuffle X indices and sort X as the order of free modes -> contract modes; mode_order also separate all the modes to free and contract modes separately.
    sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
    sptIndex ci = nmodes_X - num_cmodes, fi = 0;
    for(sptIndex m = 0; m < nmodes_X; ++m) {
        if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
            mode_order_X[fi] = m;
            ++ fi;
        }
    }
    sptAssert(fi == nmodes_X - num_cmodes);
    /// Copy the contract modes while keeping the contraction mode order
    for(sptIndex m = 0; m < num_cmodes; ++m) {
        mode_order_X[ci] = cmodes_X[m];
        ++ ci;
    }
    sptAssert(ci == nmodes_X);
    printf("mode_order_X: \n");
    sptDumpIndexArray(mode_order_X, nmodes_X, stdout);
    sptSparseTensorShuffleModes(X, mode_order_X);
    for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
    sptSparseTensorSortIndex(X, 1, 1);

    /// Shuffle Y indices and sort Y as the order of free modes -> contract modes
    sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
    ci = 0;
    fi = num_cmodes;
    for(sptIndex m = 0; m < nmodes_Y; ++m) {
        if(sptInArray(cmodes_Y, num_cmodes, m) == -1) {
            mode_order_Y[fi] = m;
            ++ fi;
        }
    }
    sptAssert(fi == nmodes_Y);
    /// Copy the contract modes while keeping the contraction mode order
    for(sptIndex m = 0; m < num_cmodes; ++m) {
        mode_order_Y[ci] = cmodes_Y[m];
        ++ ci;
    }
    sptAssert(ci == num_cmodes);
    printf("mode_order_Y: \n");
    sptDumpIndexArray(mode_order_Y, nmodes_X, stdout);
    sptSparseTensorShuffleModes(Y, mode_order_Y);
    for(sptIndex m = 0; m < nmodes_Y; ++m) mode_order_Y[m] = m; // reset mode_order
    sptSparseTensorSortIndex(Y, 1, 1);
    sptStopTimer(timer);
    sptPrintElapsedTime(timer, "Sort X, Y");
    // sptPrintElapsedTime(timer, "CPU  SpTns * SpTns");

    sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
    sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);

    /// Set fidx_X: indexing the combined free indices and fidx_Y: indexing the combined contract indices
    sptNnzIndexVector fidx_X, fidx_Y;
    /// Set indices for free modes
    sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
    /// Set indices for contract modes
    sptSparseTensorSetIndices(X, mode_order_Y, num_cmodes, &fidx_Y);
    free(mode_order_X);
    free(mode_order_Y);
    printf("fidx_X: \n");
    sptDumpNnzIndexVector(&fidx_X, stdout);
    printf("fidx_Y: \n");
    sptDumpNnzIndexVector(&fidx_Y, stdout);

    /// Allocate the output tensor
    sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
    sptIndex *ind_buf = malloc(nmodes_Z * sizeof *ind_buf);
    spt_CheckOSError(!ind_buf, "CPU  SpTns * SpTns");
    for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
        ind_buf[m] = X->ndims[m];
    }
    for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
        ind_buf[m + nmodes_X - num_cmodes] = Y->ndims[m];
    }
    // jli: use pre-processing to allocate Y size outside this function.
    result = sptNewSparseTensor(Y, nmodes_Z, ind_buf);  // no indices, values, nnz=0
    free(ind_buf);
    spt_CheckError(result, "CPU  SpTns * SpTns", NULL);
    // sptSparseTensorStatus(Z, stdout);
    // sptAssert(sptDumpSparseTensor(Z, 0, stdout) == 0);

    /// Tensor contraction computation, only work for one contraction mode
    /// Allocate the SPA buffer
    sptIndex nmodes_spa = nmodes_Y - num_cmodes;
    sptIndexVector * spa_inds = (sptIndexVector*)malloc(nmodes_spa * sizeof(sptIndexVector));
    sptValueVector spa_vals;
    

    for(sptNnzIndex fi_ptr = 0; fi_ptr < fidx_X.len; ++fi_ptr) {    // Loop fiber pointers of X
        sptNnzIndex fi_begin = fidx_X.data[fi_ptr];
        sptNnzIndex fi_end = fidx_X.data[fi_ptr+1];

        /// reallocate SPA buffer
        for(sptIndex m = 0; m < nmodes_spa; ++m)
            sptNewIndexVector(&spa_inds[m], 0, 0);
        sptNewIndexVector(&spa_vals, 0, 0);

        for(sptNnzIndex zX = fi_begin; zX < fi_end; ++ zX) {    // Loop nnzs inside a X fiber
            sptValue valX = X->values.data[zX];
            sptIndex k = X->inds[nmodes_X - num_cmodes].data[zX];   // only one contract mode
            sptNnzIndex fj_begin = fidx_Y.data[k];   // only one contract mode
            sptNnzIndex fj_end = fidx_Y.data[k+1];   // only one contract mode
            for(sptNnzIndex zY = fj_begin; zY < fj_end; ++ zY) {
                for(sptIndex m = 0; m < nmodes_spa; ++m)
                    sptAppendIndexVector(&spa_inds[m], Y->inds[m + num_cmodes].data[zY]);
                sptAppendIndexVector(&spa_vals, Y->values.data[zY] * valX);
            }
        }

        /// Write back to Z

        /// Free SPA buffer
        for(sptIndex m = 0; m < nmodes_spa; ++m)
            sptFreeIndexVector(&spa_inds[m]);
        sptFreeValueVector(&spa_vals);

        for(sptNnzIndex j = inz_begin; j < inz_end; ++j) {
            sptIndex r = X->inds[mode].data[j];
            for(sptIndex k = 0; k < U->ncols; ++k) {
                Y->values.values[i*Y->stride + k] += X->values.data[j] * U->values[r*U->stride + k];
            }
        }
    }

    sptFreeTimer(timer);
    sptFreeNnzIndexVector(&fidx_X);
    sptFreeNnzIndexVector(&fidx_Y);
    return 0;
}
