/* Copyright (c) 2021, National University of Defense Technology. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *     http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "csr_matmultivec.h"
#include "parcsr_matmultivec.h"

namespace YHAMG
{

#define MPI_TAG 150

void ParCSR_MatMultiVec(const ParCSR_Matrix& A, const Par_MultiVector& X, const Par_MultiVector& Y)
{
	int n = A.LocLoc.size[0];
	int m = X.num_vectors;
	int nx = X.Loc.size;
	int ny = Y.Loc.size;
	int Ext_size = A.LocExt.size[1];

	int* A_LocLoc_rowptr = A.LocLoc.rowptr;
	int* A_LocLoc_colind = A.LocLoc.colind;
	double* A_LocLoc_values = A.LocLoc.values;

	int* A_LocExt_rowptr = A.LocExt.rowptr;
	int* A_LocExt_colind = A.LocExt.colind;
	double* A_LocExt_values = A.LocExt.values;

	MPI_Comm comm = A.comm;
	int num_neighbors = A.num_neighbors;
	int* neighbor_ranks = A.neighbor_ranks;
	int* recv_ptr = A.Ext_ptr;
	int* send_ptr = A.send_ptr;
	int* send_list = A.send_list;
	double* send_buffer = new double[send_ptr[num_neighbors] * m];

	double* Xv_Loc = X.Loc.values;
	double* Yv_Loc = Y.Loc.values;

	double* Xv_Ext = new double[Ext_size * m];

	MPI_Request* recv_requests = new MPI_Request[num_neighbors];
	MPI_Request* send_requests = new MPI_Request[num_neighbors];
	MPI_Status status;

	for (int r = 0; r < num_neighbors; ++r)
		if (recv_ptr[r + 1] > recv_ptr[r])
			MPI_Irecv(Xv_Ext + recv_ptr[r] * m, (recv_ptr[r + 1] - recv_ptr[r]) * m, MPI_DOUBLE, neighbor_ranks[r], MPI_TAG, comm, recv_requests + r);

	for (int r = 0; r < num_neighbors; ++r)
	{
		if (send_ptr[r + 1] > send_ptr[r])
		{
			for (int i = send_ptr[r]; i < send_ptr[r + 1]; ++i)
				for (int j = 0; j < m; ++j) send_buffer[i * m + j] = Xv_Loc[send_list[i] + j * nx];
			MPI_Isend(send_buffer + send_ptr[r] * m, (send_ptr[r + 1] - send_ptr[r]) * m, MPI_DOUBLE, neighbor_ranks[r], MPI_TAG, comm, send_requests + r);
		}
	}

	CSR_MatMultiVec(A.LocLoc, X.Loc, Y.Loc);

	for (int r = 0; r < num_neighbors; ++r)
		if (recv_ptr[r + 1] > recv_ptr[r])
			MPI_Wait(recv_requests + r, &status);

	for (int r = 0; r < num_neighbors; ++r)
		if (send_ptr[r + 1] > send_ptr[r])
			MPI_Wait(send_requests + r, &status);

#ifdef USE_OPENMP
#pragma omp parallel for schedule(guided)
#endif
	for (int i = 0; i < n; ++i)
	{
		for (int j = 0; j < m; ++j)
		{
			double temp = Yv_Loc[i + j * ny];
			for (int k = A_LocExt_rowptr[i]; k < A_LocExt_rowptr[i + 1]; ++k)
				temp += A_LocExt_values[k] * Xv_Ext[A_LocExt_colind[k] * m + j];
			Yv_Loc[i + j * ny] = temp;
		}
	}

	delete[] Xv_Ext;
	delete[] send_buffer;
	delete[] recv_requests;
	delete[] send_requests;
}

}