#include <stdlib.h>

#include <common/macro/build.h>
#include <common/macro/debug.h>
#include <common/macro/array.h>
#include <common/util.h>

#include <query/blobify.h>
#include <query/query.h>
#include <query/filter.h>
#include <query/result.h>
#include <query/iterator.h>
#include <query/commands/insert.h>
#include <storage/storage.h>
#include <storage/page.h>
#include <storage/tuple/page_tuple.h>
#include <transaction/transaction.h>

API_LIBLOCAL result_e query_insert_tuplify(query_t *query, insert_item_t *insert_item, tuple_t *tuple) {
	int i;
	size_t total_size;
	page_item_t items_changed, max_sized_item;
	tuple_atom_t atom;
	page_number_t page_number_blob;

	/*
	 * Implicity tuplify using direct data.
	 * Also use this loop to assess total size.
	 */
	total_size = 0;
	for (i = 0; i < query->table->columns_c; i++) {
		total_size += insert_item->sizes[i];

		atom.type = TUPLE_ATOM_DATA;
		atom.data = insert_item->data[i];
		atom.size = insert_item->sizes[i];
		ARRAY_APPEND(tuple->atoms, tuple->atoms_c, atom);
	}

	items_changed = 0;
	/* The formula is an arbitrary one. */
	while (total_size > query->storage->page_size / 4) {
		if (items_changed == query->table->columns_c) {
			/* We've blobified it all, and yet, we can't fit. */
			return RESULT_DATATOOLARGE;
		}

		/*
		 * This is algorithimically suboptimal, but it is unlikely
		 * to be called too often.
		 */
		max_sized_item = 0;
		for (i = 0; i < query->table->columns_c; i++) {
			if (insert_item->sizes[i] > insert_item->sizes[max_sized_item]) {
				max_sized_item = i;
			}
		}

		if (insert_item->sizes[max_sized_item] <= sizeof(page_number_t)) {
			/* Blobification of the largest item still won't help. */
			return RESULT_DATATOOLARGE;
		}

		query_blobify(query, insert_item->data[max_sized_item], insert_item->sizes[max_sized_item], &page_number_blob);
		
		/* Out with the old, in with the new... */
		free(insert_item->data[max_sized_item]);
		total_size -= insert_item->sizes[max_sized_item];
		total_size += sizeof(page_number_t);

		insert_item->data[max_sized_item] = util_memdup(&page_number_blob, sizeof(page_number_t));
		insert_item->sizes[max_sized_item] = sizeof(page_number_t);

		tuple->atoms[max_sized_item].type = TUPLE_ATOM_BLOBLINK;
		tuple->atoms[max_sized_item].data = insert_item->data[max_sized_item];
		tuple->atoms[max_sized_item].size = insert_item->sizes[max_sized_item];

	}
	return 0;
}

API_LIBLOCAL result_e query_insert_flush(query_t *query) {
	int i, j;
	page_t *page;
	tuple_t tuple;
	page_number_t page_number;
	page_number_t *page_number_skip;
	page_size_t page_size_request;
	result_e result;

	page = NULL;

	for (i = 0; i < query->insert_ctx.queue_c; i++) {
		/*
		 * The entire tuple (ie. new row) is ready. Now, we need to
		 * figure out where can we insert it. We either already have
		 * a suitable page open, or we need to get a new one -- in which
		 * case we either find an existing with enough space, or create
		 * a new altogether.
		 */
		page_size_request = tuple_headersize(&query->insert_ctx.queue[i].tuple) + tuple_datasize(&query->insert_ctx.queue[i].tuple);
		if (page != NULL) {
			if (page_get_freespace(page) < page_size_request) {
				debugmsg("Writing back.\n");

				/*
				 * We cannot fit the tuple in this page, so write back all changes.
				 * It is clearly known that the page is dirty, because if we had
				 * not written into it, it wouldn't be open in the first place.
				 */
				storage_page_write(query->storage, page_number, page);
				storage_fsm_lease_return(query->storage, page_number, PAGE_TUPLE, page_get_freespace(page));

				page_destroy(page);
				page = NULL;
			}
		}

		/*
		 * If it is necessary to open a new page (we either never had one, or
		 * we've just closed it), do it now.
		 */
		if (page == NULL) {
			page_number = 0;
			while (page == NULL) {
				debugmsg("Allocating page.");
				if (query->iterator.status == QUERY_ITERATOR_OPEN)
					page_number_skip = &query->iterator.pointer.page_number;
				else
					page_number_skip = NULL;

				if (storage_fsm_lease_find(query->storage, page_size_request, PAGE_TUPLE, &page_number, page_number_skip)) {
					page = page_new(PAGE_TUPLE, query->storage->page_size);

					if (page_get_freespace(page) < page_size_request) {
						/*
						 * Not even a brand new page can satisfy this request. 
						 * Should not happen at this phase, because we try to blobify beforehands.
						 */
						storage_fsm_lease_return(query->storage, page_number, PAGE_ANY, page_get_freespace(page));
						page_destroy(page);
						return RESULT_DATATOOLARGE;
					}
				} else {
					debugmsg("Reading the page.");
					if (storage_page_writelock(query->storage, page_number) != 0) {
						return RESULT_DEADLOCK;
					}
					storage_page_read(query->storage, page_number, &page);
					if (page->type == PAGE_ANY) {
						page_destroy(page);
						page = page_new(PAGE_TUPLE, query->storage->page_size);
					}
				}
			}
		}

		/*
		 * We now have a page ready, let's feed it with data.
		 */
		page_tuple_insert(page->tuple, &query->insert_ctx.queue[i].tuple);
	}

	/*
	 * We're done, let's clean up. Write back data and release queue.
	 */
	if (page != NULL) {
		storage_page_write(query->storage, page_number, page);
		storage_fsm_update(query->storage, page_number, page->type, page_get_freespace(page));

		storage_page_rwunlock(query->storage, page_number);
		storage_fsm_unlock(query->storage, page_number, 1);

		page_destroy(page);
	}

	for (i = 0; i < query->insert_ctx.queue_c; i++) {
		for (j = 0; j < query->table->columns_c; j++) {
			free(query->insert_ctx.queue[i].data[j]);
		}
		free(query->insert_ctx.queue[i].data);
		free(query->insert_ctx.queue[i].sizes);
	}
	free(query->insert_ctx.queue);

	query->insert_ctx.queue = NULL;
	query->insert_ctx.queue_c = 0;

	return RESULT_OK;
}

API_LIBLOCAL result_e query_insert_prepare(query_t *query) {
	query->insert_ctx.queue = NULL;
	query->insert_ctx.queue_c = 0;

	query->insert_ctx.current.data = NULL;
	query->insert_ctx.current.sizes = NULL;
	query->insert_ctx.data_set = 0;

	return RESULT_OK;
}

API_LIBLOCAL result_e query_insert_step(query_t *query) {
	result_e result;

	if (!query->insert_ctx.data_set)
		return RESULT_MISSINGDATA;
	
	tuple_init(&query->insert_ctx.current.tuple);
	query->insert_ctx.current.tuple.id_origin = query->transaction->id;
	query->insert_ctx.current.tuple.id_insert = query->transaction->id;

	if (query->type == QUERY_UPDATE) {
		query->insert_ctx.current.tuple.id_command = query->transaction->command_id;
	} else {
		query->insert_ctx.current.tuple.id_command = 0;
	}

	/* Ensure proper tuplification. */
	result = query_insert_tuplify(query, &query->insert_ctx.current, &query->insert_ctx.current.tuple);
	if (result != RESULT_OK)
		return result;

	/* Enqueue */
	ARRAY_EXTEND(query->insert_ctx.queue, query->insert_ctx.queue_c);
	ARRAY_SET(query->insert_ctx.queue, query->insert_ctx.queue_c - 1, query->insert_ctx.current);

	/* Clear current */
	query->insert_ctx.current.data = NULL;
	query->insert_ctx.current.sizes = NULL;
	query->insert_ctx.data_set = 0;

	/* Possibly flush. The number is arbitrary, better logic may be employed. */
	if (query->insert_ctx.queue_c > 250)
		return query_insert_flush(query);
	else
		return RESULT_OK;
}

API_LIBLOCAL result_e query_insert_finalize(query_t *query) {
	result_e result;

	result = query_insert_flush(query);

	query->insert_ctx.current.data = NULL;
	query->insert_ctx.current.sizes = NULL;
	query->insert_ctx.data_set = 0;

	return result;
}
