// LucasLehmer, Performs the Lucas Lehmer algorithm
// Copyright (C) 2014  David McCloskey
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see <http://www.gnu.org/licenses/>.

#include "options.h"
#include "algorithms.h"
#include "algorithms/basic_mersenne/basic_mersenne.h"
#include "algorithms/basic_mersenne/forward_fft.hpp"
#include "algorithms/basic_mersenne/reverse_fft.hpp"
#include "algorithms/basic_mersenne/square.hpp"
#include "algorithms/basic_mersenne/recombine.hpp"
#include "algorithms/basic_mersenne/modulus.hpp"

#pragma warning(push)
#pragma warning(disable : 6011)
#include <tbb/cache_aligned_allocator.h>
#include <tbb/parallel_for.h>
#include <tbb/atomic.h>
#pragma warning(pop)

#ifdef __clang__
#define __LZCNT__
#include <x86intrin.h>
#else
#endif

namespace algorithms
{

	static void execute(uint32_t e, uint32_t k, uint32_t K, uint32_t n, uint32_t M, uint32_t w, uint64_t ** limbs)
	{
		// Perform forward fft
		for (uint32_t i = 0; i < (k - 1); ++i)
		{
            fft_config config;
            config.coeffs = K >> i;
            config.half_coeffs = K >> (i + 1);
            config.half_coeffs_bits = (k - 1) - i - 1;
            config.coeffs_mask = config.half_coeffs - 1;
            config.blocks_mask = ~config.coeffs_mask;
            config.twiddle_base = (1 << i) * w;
            
			uint32_t blocks = 1 << i;
			uint32_t coeffs_per_block = K >> i;
			printf("i: %d, K: %d, blocks: %d, coeffs: %d\n", i, K, blocks, coeffs_per_block);
			forward_fft(0, K / 2, config, limbs);
		}

		// Perform pointwise-squares
		square(0, K, K, n, limbs);

		// Perform reverse fft
		for (uint32_t i = 0; i < (k - 1); ++i)
		{
			uint32_t blocks = K >> (i + 1);
			uint32_t coeffs_per_block = 1 << (i + 1);
			//printf("i: %d, K: %d, blocks: %d, coeffs: %d\n", k - 1 - i - 1, K, blocks, coeffs_per_block);
			reverse_fft(0, K / 2, K, n, w, blocks, coeffs_per_block, limbs);
		}

		// Recombine the blocks, essentially performing modulus M
		recombine(0, K, K, n, M, limbs);

		// Perform modulus the original exponent
		modulus(0, K, K, e, limbs);
	}

	static void execute(uint32_t e, uint32_t threads, uint32_t k, uint32_t K, uint32_t n, uint32_t M, uint32_t w, uint64_t ** limbs)
	{
		// Determine the number of threads to use, ensuring it is a power of 2, rounded down
        uint32_t thread_bits = 32 - __lzcnt32(threads - 1);
		uint32_t alg_threads = 1 << thread_bits;
		if (alg_threads > (K / 2))
			alg_threads = K / 2;

		// Determine coefficients per thread
		uint32_t coeffs_per_thread = (K / 2) / alg_threads;

		// Perform forward fft
		for (uint32_t i = 0; i < (k - 1); ++i)
		{
            fft_config config;
            config.coeffs = K >> i;
            config.half_coeffs = K >> (i + 1);
            config.half_coeffs_bits = (k - 1) - i - 1;
            config.coeffs_mask = config.half_coeffs - 1;
            config.blocks_mask = ~config.coeffs_mask;
            config.twiddle_base = (1 << i) * w;
            
			uint32_t blocks = 1 << i;
			uint32_t coeffs_per_block = K >> i;
			printf("i: %d, K: %d, blocks: %d, coeffs: %d\n", i, K, blocks, coeffs_per_block);
			tbb::parallel_for<uint32_t>(0, alg_threads, 1, [=](uint32_t id) {
                uint32_t thread_coeff = id * coeffs_per_thread;
				forward_fft(thread_coeff, thread_coeff + coeffs_per_thread, config, limbs);
			});
		}

		// Perform pointwise-squares
		tbb::parallel_for<uint32_t>(0, alg_threads, 1, [=](uint32_t id) {
			square(id * coeffs_per_thread, (id + 1) * coeffs_per_thread, K, n, limbs);
		});

		// Perform reverse fft
		for (uint32_t i = 0; i < (k - 1); ++i)
		{
			uint32_t blocks = K >> (i + 1);
			uint32_t coeffs_per_block = 1 << (i + 1);
			//printf("i: %d, K: %d, blocks: %d, coeffs: %d\n", k - 1 - i - 1, K, blocks, coeffs_per_block);
			tbb::parallel_for<uint32_t>(0, alg_threads, 1, [=](uint32_t id) {
				reverse_fft(id * coeffs_per_thread, (id + 1) * coeffs_per_thread, K, n, w, blocks, coeffs_per_block, limbs);
			});
		}

		// Recombine the blocks, essentially performing modulus M
		tbb::parallel_for<uint32_t>(0, alg_threads, 1, [=](uint32_t id) {
			recombine(id * coeffs_per_thread, (id + 1) * coeffs_per_thread, K, n, M, limbs);
		});

		// Perform modulus the original exponent
		tbb::parallel_for<uint32_t>(0, alg_threads, 1, [=](uint32_t id) {
			modulus(id * coeffs_per_thread, (id + 1) * coeffs_per_thread, K, e, limbs);
		});
	}

	template <>
	bool execute<ALG_BASIC_MERSENNE>(uint32_t e, uint32_t threads)
	{
		// Calculate parameters for the transform
		uint32_t e_x_2 = e * 2;
		uint32_t N_log_2 = 32 - __lzcnt32(e_x_2 - 1);
		uint32_t N = 1 << N_log_2;
		uint32_t k = (N_log_2 >> 1) + 1;
		uint32_t K = 1 << (k - 1);
		uint32_t M = N / K;

		// Ensure n is a multiple of K and 64
		uint32_t n = (((2 * M) + k + K - 1) / K) * K;
		n = (n + 63) & (~63);

		// Determine omega squared
		uint32_t w = 1 << (2 * n / K);

		// Turn w, M and n both in to the number of limbs rather than bits
		w = w / 64;
		M = (M + 63) / 64;
		n = n / 64;

		// Allocate the limbs
		tbb::cache_aligned_allocator<uint64_t*> pointer_allocator;
		uint64_t ** limbs = pointer_allocator.allocate(K);

		tbb::cache_aligned_allocator<uint64_t> limb_allocator;
		for (uint32_t i = 0; i < K; ++i)
		{
			limbs[i] = limb_allocator.allocate(M);
			memset(limbs[i], 0, sizeof(uint64_t) * M);
		}

		// Initialize the value
		limbs[0][0] = 4;

		// Start the threads and perform the transform
		if (threads == 1)
		{
			execute(e, k, K, n, M, w, limbs);
		}
		else
		{
			execute(e, threads, k, K, n, M, w, limbs);
		}

		// Determine if the value is correct
		
		// Cleanup
		for (uint32_t i = 0; i < K; ++i)
		{
			limb_allocator.deallocate(limbs[i], M);
		}

		pointer_allocator.deallocate(limbs, K);

		return false;
	}

};
