#include "MTMatrixBinaryOperator.hpp"
#include "MTExpression.hpp"
#include "Params.h"
#include <string>
#include <iostream>
#include <sstream>

namespace MathematicaTranslator
{	
	
	MTMatrixBinaryOperator::MTMatrixBinaryOperator(MTExpression& expr1, const std::string& op, MTExpression& expr2)
		: expr1(expr1), expr2(expr2)	
	{
		if(op == "+")
		{
			this->op = "add";
		} else if (op == "*")
		{
			this->op = "mul";
		}
	}

	std::string* MTMatrixBinaryOperator::generateCode(CodeGenContext& context)
	{		
		std::string type1 = *expr1.getType();
		std::string type2 = *expr2.getType();

		std::string uniType = "double";
		if (type1.compare(uniType) > 0 && type2.compare(uniType) > 0) uniType = "int";

		// ==================================================================================================
		
		std::ofstream *genFileStream = context.getStream();

		if (op.compare("add") == 0){			
			bool writeFunction = false;

			if (uniType.compare("double") == 0) {
				if (!context.listDoubleAdd) {
					context.listDoubleAdd = true;		
					writeFunction = true;
				}
			}
			else {
				if (!context.listIntAdd) {
					context.listIntAdd = true;
					writeFunction = true;
				}
			}

			if (writeFunction){

				*genFileStream << "__global__ void addListKernel(" << uniType << " *c, const " << uniType << " *a, const " << uniType << " *b)" << "\n";
				*genFileStream << "{" << "\n";
				*genFileStream << "	int i = threadIdx.x;" << "\n";
				*genFileStream << "	c[i] = a[i] + b[i];" << "\n";
				*genFileStream << "}" << "\n\n";

				// ==================================================================================================

				*genFileStream << "cudaError_t addWithCuda(" << uniType << " *c, const " << uniType << " *a, const " << uniType << " *b, size_t size)" << "\n";
				*genFileStream << "{" << "\n";
				*genFileStream << "	" << uniType << " *dev_a = 0;" << "\n";
				*genFileStream << "	" << uniType << " *dev_b = 0;" << "\n";
				*genFileStream << "	" << uniType << " *dev_c = 0;" << "\n";
				*genFileStream << "	cudaError_t cudaStatus;" << "\n"  << "\n";

					// Choose which GPU to run on, change this on a multi-GPU system.
				*genFileStream << "	cudaStatus = cudaSetDevice(0);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Allocate GPU buffers for three vectors (two input, one output)    .
				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Copy input vectors from host memory to GPU buffers.
				*genFileStream << "	cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(" << uniType << "), cudaMemcpyHostToDevice);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(" << uniType << "), cudaMemcpyHostToDevice);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Launch a kernel on the GPU with one thread for each element.
				*genFileStream << "	addListKernel<<<1, size>>>(dev_c, dev_a, dev_b);" << "\n" << "\n";

					// cudaDeviceSynchronize waits for the kernel to finish, and returns
					// any errors encountered during the launch.
				*genFileStream << "	cudaStatus = cudaDeviceSynchronize();" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaDeviceSynchronize returned error code %d after launching addListKernel!\", cudaStatus);" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Copy output vector from GPU buffer to host memory.
				*genFileStream << "	cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(" << uniType << "), cudaMemcpyDeviceToHost);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	Error:" << "\n";
				*genFileStream << "		cudaFree(dev_c);" << "\n";
				*genFileStream << "		cudaFree(dev_a);" << "\n";
				*genFileStream << "		cudaFree(dev_b);" << "\n" << "\n";

				*genFileStream << "		return cudaStatus;" << "\n";
				*genFileStream << "	}" << "\n\n";
			}
		}
		
		else if (op.compare("mul") == 0){
			bool writeFunction = false;

			if (uniType.compare("double") == 0) {
				if (!context.matrixDoubleMul) {
					context.matrixDoubleMul = true;		
					writeFunction = true;
				}
			}
			else {
				if (!context.matrixIntMul) {
					context.matrixIntMul = true;
					writeFunction = true;
				}
			}

			if (writeFunction){

				*genFileStream << "__global__ void mulMatrixKernel(" << uniType << " *c, const " << uniType << " *a, const " << uniType << " *b)" << "\n";
				*genFileStream << "{" << "\n";
				*genFileStream << "	int i = threadIdx.x;" << "\n";
				*genFileStream << "	c[i] = a[i] * b[i];" << "\n";
				*genFileStream << "}" << "\n\n";

				// ==================================================================================================

				*genFileStream << "cudaError_t mulMatrixWithCuda(" << uniType << " *c, const " << uniType << " *a, const " << uniType << " *b, size_t size)" << "\n";
				*genFileStream << "{" << "\n";
				*genFileStream << "	" << uniType << " *dev_a = 0;" << "\n";
				*genFileStream << "	" << uniType << " *dev_b = 0;" << "\n";
				*genFileStream << "	" << uniType << " *dev_c = 0;" << "\n";
				*genFileStream << "	cudaError_t cudaStatus;" << "\n"  << "\n";

					// Choose which GPU to run on, change this on a multi-GPU system.
				*genFileStream << "	cudaStatus = cudaSetDevice(0);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Allocate GPU buffers for three vectors (two input, one output)    .
				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(" << uniType << "));" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMalloc failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Copy input vectors from host memory to GPU buffers.
				*genFileStream << "	cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(" << uniType << "), cudaMemcpyHostToDevice);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(" << uniType << "), cudaMemcpyHostToDevice);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Launch a kernel on the GPU with one thread for each element.
				*genFileStream << "	mulMatrixKernel<<<1, size>>>(dev_c, dev_a, dev_b);" << "\n" << "\n";

					// cudaDeviceSynchronize waits for the kernel to finish, and returns
					// any errors encountered during the launch.
				*genFileStream << "	cudaStatus = cudaDeviceSynchronize();" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaDeviceSynchronize returned error code %d after launching addListKernel!\", cudaStatus);" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

					// Copy output vector from GPU buffer to host memory.
				*genFileStream << "	cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(" << uniType << "), cudaMemcpyDeviceToHost);" << "\n";
				*genFileStream << "	if (cudaStatus != cudaSuccess) {" << "\n";
				*genFileStream << "		fprintf(stderr, \"cudaMemcpy failed!\");" << "\n";
				*genFileStream << "		goto Error;" << "\n";
				*genFileStream << "	}" << "\n" << "\n";

				*genFileStream << "	Error:" << "\n";
				*genFileStream << "		cudaFree(dev_c);" << "\n";
				*genFileStream << "		cudaFree(dev_a);" << "\n";
				*genFileStream << "		cudaFree(dev_b);" << "\n" << "\n";

				*genFileStream << "		return cudaStatus;" << "\n";
				*genFileStream << "	}" << "\n\n";
			}
		}

		std::ostringstream ss, v0, v1, v2, v3, v4;
		
		v0 << MTNode::tmpVariable << MTNode::tmpVariableCount++;
		std::string var0 = v0.str(); 
		
		v1 << MTNode::tmpVariable << MTNode::tmpVariableCount++;
		std::string var1 = v1.str(); 

		v2 << MTNode::tmpVariable << MTNode::tmpVariableCount++;
		std::string var2 = v2.str();

		ss << " = " << var0 << ";" << "\n";		
		//ss << type1 << " " << var0 << "[" << *expr1.getNumberOfElements() << "] = { 0 }" <<"\n";

		if (expr2.getClassType() == MATRIXBINARYOP){
			ss << "const " << type1 << " " << var1 << "[" << *expr1.getNumberOfElements() << "]" << *expr1.generateCode(context) << ";" << "\n";
			v4 << MTNode::tmpVariable << MTNode::tmpVariableCount;
			std::string var4 = v4.str(); 
			ss << type2 << " " << var4 << "[" << *expr2.getNumberOfElements() << "]" << " = { 0 };" << "\n";
			ss << type2 << " *" << var2  << *expr2.generateCode(context) << ";";
		}
		else {
			if (expr1.getNumberOfElements()->compare(*expr2.getNumberOfElements()) != 0){
				std::cout << "DIFFRENT SIZE OF MATRIXES" << std::endl;
			}
			else {
				ss << "const " << type1 << " " << var1 << "[" << *expr1.getNumberOfElements() << "]" << *expr1.generateCode(context) << ";" << "\n";
				ss << "const " << type2 << " " << var2 << "[" << *expr2.getNumberOfElements() << "]" << *expr2.generateCode(context) << ";" << "\n";
			}
		}
					
		if (op.compare("add") == 0){
			ss << "\ncudaError_t cudaStatus = addWithCuda(" << var0 << ", " << var1 << ", " << var2 << ", " << *expr1.getNumberOfElements() << ");" << "\n";
		}
		else if (op.compare("mul") == 0){
			ss << "\ncudaError_t cudaStatus = mulMatrixWithCuda(" << var0 << ", " << var1 << ", " << var2 << ", " << *expr1.getNumberOfElements() << ");" << "\n";
		}
		ss << "if (cudaStatus != cudaSuccess) {" << "\n";
		ss << "	fprintf(stderr, \"addWithCuda failed!\");" << "\n";
		ss << "	return 1;" << "\n";
		ss << "}" << "\n";

		// -----------------------------------------------------------------------

		return new std::string(ss.str());
	}
		
	std::string* MTMatrixBinaryOperator::getType()
	{
		std::string *expr1_type = expr1.getType();
		std::string *expr2_type = expr2.getType();
		std::string *type = new std::string("float");
		if (*expr1_type < *type || *expr2_type < *type) type = new std::string("double");
		else if (*expr1_type > *type && *expr2_type > *type) type = new std::string("int");
		return type;
	}
	std::string* MTMatrixBinaryOperator::getNumberOfElements()
	{
		std::string* expr1NumberOfElements = expr1.getNumberOfElements();
		std::string* expr2NumberOfElements = expr2.getNumberOfElements();

		if( *expr1NumberOfElements != *expr2NumberOfElements)
		{
			return new std::string("ERROR IN NUMBER OF ELEMENTS");
		}

		return expr1NumberOfElements;
	}
	
	int MTMatrixBinaryOperator::getClassType(){
		return MATRIXBINARYOP;
	}
}