#include<iostream>
#include <pthread.h>
#include <immintrin.h>
#include <xmmintrin.h> //SSE
#include <emmintrin.h> //SSE2
#include <pmmintrin.h> //SSE3
#include <tmmintrin.h> //SSSE3
#include <smmintrin.h> //SSE4.1
#include <nmmintrin.h> //SSSE4.2
#include <immintrin.h> //AVX、AVX
#include<semaphore.h>
#include<sys/time.h>



#include<mpi.h>
using namespace std;
int T, n;//T是总时间，n是有多少个科目。
int* time_use;
int* value;
int** opt;



int taskNum = 1;
int pThreadNum = 1;

bool** pre_is_done;
bool** ok;

void init(bool whether_random)
{
	if (!whether_random)
		cin >> T >> n;
		
	time_use = new int[n + 1];
	value = new int[n + 1];
	opt = new int*[n + 1];
	ok = new bool* [n + 1];
	for (int i = 0; i <= n; i++)
	{
		opt[i] = new int[T + 1];
		ok[i] = new bool[T];
		for (int j = 0; j <= T; j++) 
		{
			opt[i][j] = 0;
			if (i == 0 || j == 0)
				ok[i][j] = 1;
			else
				ok[i][j] = 0;
		}
		if (i != 0)
		{
			if (whether_random)
			{
				time_use[i] = rand() % 1010 + 1;
				value[i] = rand() % 100;
			}
			else
				cin >> time_use[i] >> value[i];
		}
		else
			time_use[i] = value[i] =  0;
	}


	pre_is_done = new bool* [n + 1];
	for (int i = 1; i <= n; i++)
	{
		pre_is_done[i] = new bool[pThreadNum - 1];
		for (int j = 0; j < pThreadNum - 1; j++)
			pre_is_done[i][j] = 0;
	}
}
void mpiinit(int numProcesses, int rank)
{
	time_use = new int[n + 1];
	value = new int[n + 1];
	opt = new int* [n + 1];
	if (rank == 0)
	{
		for (int i = 0; i <= n; i++)
		{
			opt[i] = new int[T + 1];
			for (int j = 0; j <= T; j++)
			{
				opt[i][j] = 0;
			}
			if (i != 0)
			{
				//time_use[i] = rand() % 1010 + 1;
				//value[i] = rand() % 100;
				//time_use[i] = rand() % 1010 + 1;
				//value[i] = rand() % 100;
			}
			else
				time_use[i] = value[i] = 0;

			for (int j = 1; j < numProcesses; j++)
				MPI_Send(&opt[i][0], T + 1, MPI_INT, j, 0, MPI_COMM_WORLD);
		}
		for (int j = 1; j < numProcesses; j++)
		{
			MPI_Send(&value[0], n + 1, MPI_INT, j, 0, MPI_COMM_WORLD);
			MPI_Send(&time_use[0], n + 1, MPI_INT, j, 0, MPI_COMM_WORLD);
		}
	}
	else
	{
		for (int i = 0; i <= n; i++)
		{
			opt[i] = new int[T + 1];
			MPI_Recv(&opt[i][0], T + 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		}
		MPI_Recv(&value[0], n + 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Recv(&time_use[0], n + 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	}
}

void testShow()
{
	cout << "time_use: ";
	for (int i = 0; i <= n; i++)
		cout << time_use[i] << " ";
	cout << endl << endl;
	cout << "value: ";
	for (int i = 0; i <= n; i++)
		cout << value[i] << " ";
	cout << endl << endl;
	cout << "opt: " << endl;
	for (int i = 0; i <= n; i++)
	{
		for (int p = 0; p <= T; p++)
		{
			cout << opt[i][p] << " ";
		}
		cout << endl;
	}
}

//嘛，总之是从这里开始进行并行化改进咯


typedef struct
{
	int t_id; // 线程 id
	int begin;
	int end;
}threadParam_t;



sem_t sem_leader;
sem_t* sem_workerstart;
sem_t* sem_workerend;


void* LieThreadFunc(void* param)
{
	threadParam_t* p = (threadParam_t*)param;
	int t_id = p->t_id; //线程编号

	int begin = t_id * taskNum + 1;
	int end = begin + taskNum;


	for (int i = 1; i <= n; i++)
	{
		if (t_id != 0)
			while (pre_is_done[i][t_id - 1] == 0)
				continue;
		for (int t = begin; t < end && t <= T; t++)
		{
			if (time_use[i] > t)
				opt[i][t] = opt[i - 1][t];
			else
				opt[i][t] = opt[i - 1][t] > opt[i - 1][t - time_use[i]] + value[i] ? opt[i - 1][t] : opt[i - 1][t - time_use[i]] + value[i];
		}
		if (t_id != pThreadNum - 1)
			pre_is_done[i][t_id] = 1;
	}

	pthread_exit(NULL);

}
void* HangThreadFunc(void* param)
{
	threadParam_t* p = (threadParam_t*)param;
	int t_id = p->t_id; //线程编号

	int begin = t_id * taskNum + 1;
	int end = begin + taskNum;



	for (int i = 1; i <= n; i++)
	{
		if ((i - 1) % pThreadNum == t_id)
		{
			for (int t = 1; t <= T; t++)
			{
				if (time_use[i] > t)
				{
					while (ok[i - 1][t] == 0)
						continue;
					opt[i][t] = opt[i - 1][t];
				}
				else
				{
					while (!(ok[i - 1][t - time_use[i]] && ok[i - 1][t]))
						continue;
					opt[i][t] = opt[i - 1][t] > opt[i - 1][t - time_use[i]] + value[i] ? opt[i - 1][t] : opt[i - 1][t - time_use[i]] + value[i];
				}
				ok[i][t] = 1;
			}
		}
	}

	pthread_exit(NULL);

}
double bingxing_find_best_back()
{
	pthread_t* handle = new pthread_t[pThreadNum];
	threadParam_t* param = new threadParam_t[pThreadNum];

	struct timeval t1, t2;
	double timeuse;



	int hol = 1;//1是行，0是列
	gettimeofday(&t1, NULL);
	for (int id = 0; id < pThreadNum; id++)
	{
		param[id].t_id = id;
		switch (hol)
		{
		case 0:
			pthread_create(&handle[id], NULL, LieThreadFunc, (void*)&param[id]);
			break;
		case 1:
			pthread_create(&handle[id], NULL, HangThreadFunc, (void*)&param[id]);
			break;
		}
	}

	for (int id = 0; id < pThreadNum; id++)
		pthread_join(handle[id], NULL);

	gettimeofday(&t2, NULL);
	timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
	
	return timeuse;
}





void* mpiHangThreadFunc(void* param)
{
	threadParam_t* p = (threadParam_t*)param;
	int t_id = p->t_id; //线程编号

	int begin = p->begin;
	int end = p->end;


	

	for (int j = 1; j <= n; j+= pThreadNum)
	{

		sem_wait(&sem_workerstart[t_id]);
		for (int i = j; i < j + pThreadNum && i <= n; i++)
		{
			if ((i - 1) % pThreadNum == t_id)
			{
				for (int t = begin; t < end && t <= T; t++)
				{
					if (time_use[i] > t)
						opt[i][t] = opt[i - 1][t];
					else
						opt[i][t] = opt[i - 1][t] > opt[i - 1][t - time_use[i]] + value[i] ? opt[i - 1][t] : opt[i - 1][t - time_use[i]] + value[i];
				}
			}
		}
		sem_post(&sem_leader);
		sem_wait(&sem_workerend[t_id]);
	}

	pthread_exit(NULL);

}


double mpi_find_best_back(int numProcesses,int rank)
{
	int num_2_do = T % numProcesses == 0 ? T / numProcesses : T / numProcesses + 1;
	int begin = rank * num_2_do + 1;
	int end = begin + num_2_do;



	pthread_t* handle = new pthread_t[pThreadNum];
	threadParam_t* param = new threadParam_t[pThreadNum];

	for (int id = 0; id < pThreadNum; id++)
	{
		param[id].t_id = id;	
		param[id].begin = begin;
		param[id].end = end;
		pthread_create(&handle[id], NULL, mpiHangThreadFunc, (void*)&param[id]);
	}


	struct timeval t1, t2;
	double timeuse = 0;
	double averageTime = 0;

	gettimeofday(&t1, NULL);



	for (int i = 1; i <= n; i+= pThreadNum)
	{
		if (rank != 0)
		{
			for(int j = i; j < i + pThreadNum && j <= n;j++)
				MPI_Recv(&opt[j][1], begin, MPI_INT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		}


		for (int id = 0; id < pThreadNum; id++)
			sem_post(&sem_workerstart[id]);
		for (int id = 0; id < pThreadNum; id++)
			sem_wait(&sem_leader);
		for (int id = 0; id < pThreadNum; id++)
			sem_post(&sem_workerend[id]);




		if (rank != numProcesses - 1)
		{
			for (int j = i; j < i + pThreadNum && j <= n; j++)
				MPI_Send(&opt[j][1], end, MPI_INT, rank + 1, 0, MPI_COMM_WORLD);
		}
	}



	gettimeofday(&t2, NULL);
	timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
	MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
	averageTime /= numProcesses;

	for (int id = 0; id < pThreadNum; id++)
		pthread_join(handle[id], NULL);


	return averageTime;
}


double mpi_pthread_find_best_back(int numProcesses, int rank)
{
	int num_2_do = T % numProcesses == 0 ? T / numProcesses : T / numProcesses + 1;
	int begin = rank * num_2_do + 1;
	int end = begin + num_2_do;


	struct timeval t1, t2;
	double timeuse = 0;
	double averageTime = 0;

	gettimeofday(&t1, NULL);



	for (int i = 1; i <= n; i++)
	{
		if (rank != 0)
		{
			MPI_Recv(&opt[i][1], begin, MPI_INT, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		}
		for (int t = begin; t < end && t <= T; t++)
		{
			if (time_use[i] > t)
				opt[i][t] = opt[i - 1][t];
			else
				opt[i][t] = opt[i - 1][t] > opt[i - 1][t - time_use[i]] + value[i] ? opt[i - 1][t] : opt[i - 1][t - time_use[i]] + value[i];
		}
		if (rank != numProcesses - 1)
		{
			MPI_Send(&opt[i][1], end, MPI_INT, rank + 1, 0, MPI_COMM_WORLD);
		}
	}



	gettimeofday(&t2, NULL);
	timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
	MPI_Reduce(&timeuse, &averageTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
	averageTime /= numProcesses;
	return averageTime;
}


double find_best_back()
{
	struct timeval t1, t2;
	double timeuse;

	gettimeofday(&t1, NULL);
	for (int i = 1; i <= n; i++)
	{
		for (int t = 1; t <= T; t++)
		{
			if (time_use[i] > t)
				opt[i][t] = opt[i - 1][t];
			else
				opt[i][t] = opt[i - 1][t] > opt[i - 1][t - time_use[i]] + value[i] ? opt[i - 1][t] : opt[i - 1][t - time_use[i]] + value[i];
		}
	}
	gettimeofday(&t2, NULL);
	timeuse = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;

	return timeuse;
}
int main(int argc, char* argv[])
{

	int provided;
	MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
	int numProcesses, rank;
	MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);



	pThreadNum = 10;


	sem_workerstart = new sem_t[pThreadNum];
	sem_workerend = new sem_t[pThreadNum];


	sem_init(&sem_leader, 0, 0);
	for (int i = 0; i < pThreadNum; i++)
	{
		sem_init(&sem_workerstart[i], 0, 0);
		sem_init(&sem_workerend[i], 0, 0);
	}



	int mpi = 1;

	double mpiTime = 0;

	int pre = 0;
	int time = 1;
	if (time)
	{
		if (pre)
			for (int i = 1; i <= 5; i++)
			{
				n = i * 2000;
				T = i * 2000;


				//init(1);
				mpiinit(numProcesses, rank);
				taskNum = T % pThreadNum == 0 ? T / pThreadNum : T / pThreadNum + 1;

				mpiTime = mpi_pthread_find_best_back(numProcesses, rank);
				//mpiTime = mpi_find_best_back(numProcesses, rank);
				if (rank == 0)
				{
					cout << mpiTime << endl;
				}
				//cout << bingxing_find_best_back() << endl;
				//cout << find_best_back() << endl;
			}
		else
			for (int i = 10; i <= 10; i++)
			{
				n = i * 2000;
				T = i * 2000;


				//init(1);
				mpiinit(numProcesses, rank);
				taskNum = T % pThreadNum == 0 ? T / pThreadNum : T / pThreadNum + 1;
				

				mpiTime = mpi_pthread_find_best_back(numProcesses, rank);

				//mpiTime = mpi_find_best_back(numProcesses, rank);
				if (rank == 0)
				{
					cout << mpiTime << endl;
				}

				//cout << bingxing_find_best_back() << endl;
				//cout << find_best_back() << endl;
			}
	}
	else
	{
		n = 3;
		T = 70;
		mpiinit(numProcesses, rank);
		value[1] = 100;
		value[2] = 1;
		value[3] = 2;

		time_use[1] = 71;
		time_use[2] = 69;
		time_use[3] = 1;
		//for (int i = 0; i < numProcesses; i++)
		//{
		//	if (i == rank)
		//	{
		//		testShow();
		//	}
		//	MPI_Barrier(MPI_COMM_WORLD);  // 进程同步
		//}
		//mpi_find_best_back(numProcesses, rank);
		mpi_pthread_find_best_back(numProcesses, rank);
		if (rank == numProcesses - 1)
		{
			testShow();
		}
	}
	

	sem_destroy(&sem_leader);
	for (int i = 0; i < pThreadNum; i++)
	{
		sem_destroy(&sem_workerstart[i]);
		sem_destroy(&sem_workerend[i]);
	}

	return 0;
}
