/* Copyright (c) 2021, National University of Defense Technology. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *     http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "bsr_common.h"
#include "bsr_matvec.h"
#include "parbsr_matvec.h"

namespace YHAMG
{

#define MPI_TAG 150

void ParBSR_MatVec(double alpha, const ParBSR_Matrix& A, const Par_Vector& x, double beta, const Par_Vector& y, const Par_Vector& w)
{
	int n = A.LocLoc.size[0];
	int block_size = A.LocLoc.block_size;
	int Ext_size = A.LocExt.size[1];

	MPI_Comm comm = A.comm;
	int num_neighbors = A.num_neighbors;
	int* neighbor_ranks = A.neighbor_ranks;
	int* recv_ptr = A.Ext_ptr;
	int* send_ptr = A.send_ptr;
	int* send_list = A.send_list;
	double* send_buffer = A.send_buffer;

	double* xv_Loc = x.Loc.values;
	double* yv_Loc = y.Loc.values;
	double* wv_Loc = w.Loc.values;

	double* xv_Ext = A.x_Ext.values;

	MPI_Request* recv_requests = new MPI_Request[num_neighbors];
	MPI_Request* send_requests = new MPI_Request[num_neighbors];
	MPI_Status status;

	for (int r = 0; r < num_neighbors; ++r)
		if (recv_ptr[r + 1] > recv_ptr[r])
			MPI_Irecv(xv_Ext + recv_ptr[r] * block_size, (recv_ptr[r + 1] - recv_ptr[r]) * block_size, MPI_DOUBLE, neighbor_ranks[r], MPI_TAG, comm, recv_requests + r);

	for (int r = 0; r < num_neighbors; ++r)
	{
		if (send_ptr[r + 1] > send_ptr[r])
		{
			for (int i = send_ptr[r]; i < send_ptr[r + 1]; ++i)
				VecBlockCopy(block_size, xv_Loc + send_list[i] * block_size, send_buffer + i * block_size);
			MPI_Isend(send_buffer + send_ptr[r] * block_size, (send_ptr[r + 1] - send_ptr[r]) * block_size, MPI_DOUBLE, neighbor_ranks[r], MPI_TAG, comm, send_requests + r);
		}
	}

	if (alpha != 0)
	{
		beta /= alpha;
		BSR_MatVec(1.0, A.LocLoc, x.Loc, beta, y.Loc, w.Loc);
	}
	else
	{
		for (int i = 0; i < n; ++i)
			VecBlockCopy(block_size, yv_Loc + i * block_size, wv_Loc + i * block_size);
		if (beta != 1)
#ifdef USE_OPENMP
#pragma omp parallel for 
#endif
			for (int i = 0; i < n; ++i)
				VecBlockScale(block_size, beta, wv_Loc + i * block_size);
	}
	
	for (int r = 0; r < num_neighbors; ++r)
		if (recv_ptr[r + 1] > recv_ptr[r])
			MPI_Wait(recv_requests + r, &status);

	for (int r = 0; r < num_neighbors; ++r)
		if (send_ptr[r + 1] > send_ptr[r])
			MPI_Wait(send_requests + r, &status);

	if (alpha != 0)
	{
		BSR_MatVec(1.0, A.LocExt, A.x_Ext, 1.0, w.Loc, w.Loc);
		if (alpha != 1)
		{
#ifdef USE_OPENMP
#pragma omp parallel for 
#endif
			for (int i = 0; i < n; ++i)
				VecBlockScale(block_size, alpha, wv_Loc + i * block_size);
		}
	}

	delete[] recv_requests;
	delete[] send_requests;
}

void ParBSR_MatVec(const ParBSR_Matrix& A, const Par_Vector& x, const Par_Vector& y)
{
	ParBSR_MatVec(1.0, A, x, 0.0, y, y);
}

}