
#include <map>
#include <vector>
#include <list>
#include <string>
#include <algorithm>
#include <utility>
#include <tr1/unordered_map>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <dirent.h>
#include <sys/time.h>
#include <errno.h>
#include <Python.h>

using namespace std;
using namespace std::tr1;

// Each table in the database can hold a maximum of 2^32 items. Items in the
// database are identified by a 48-bit numeric ID, and hence the database as a
// whole can contain a maximum of 2^48 items.
//
// Each table can have a maximum of 2^15 attributes (columns). An attribute is
// composed of a 16-bit key field, and a 48-bit value field. Each table 
// contains a mapping from key IDs to column names, and from (key, value) pairs
// to value names. The most significant bit of the 16-bit attribute key field 
// specifies whether the attribute links to another item.
//
// Each item has a special "Can edit" attribute that links to the group or user
// that can modify the item.
//
// Each table has an associated changelog file that contains all modifications
// ever made to the table. Tables can be rebuilt based on the changelog. 


/*************/
/* UTILITIES */
/*************/

static const int KB = 1024;
static const int MB = 1024*1024;

void lowercase(char* str) {
	for (char* c = str; *c; c++) { *c = ::tolower(*c); }
}
void lowercase(string& str) {
	for (size_t i = 0; i < str.length(); i++) { str[i] = ::tolower(str[i]); }
}
void sanitize(char* str) {
	for (char* c = str; *c; c++) { if (*c < 32 && *c != '\n') *c = ' '; }
}
void newline_to_vtab(char* str) {
	for (char* c = str; *c; c++) { if (*c == '\n') *c = '\v'; }
}
void vtab_to_newline(char* str) {
	for (char* c = str; *c; c++) { if (*c == '\v') *c = '\n'; }
}

string trim(string str) {
	size_t start = str.find_first_not_of(" \t");
	size_t end = str.find_last_not_of(" \t");
	if (start == string::npos || end == string::npos) return "";
	return str.substr(start, end - start + 1);
}

struct Buffer {
	char stack_buf[KB];    // FIXME: Make this template controlled...
	char* data;
	size_t capacity;
	char* free;
	
	Buffer() : data(stack_buf), capacity(sizeof(stack_buf)), free(data) {}
	~Buffer() { if (this->data != this->stack_buf) { ::free(this->data); } }
	
	unsigned int length() const { return this->free - this->data; }
	
	char* add(const char* str) {
		unsigned int len = strlen(str) + 1;
		reserve(length() + len);
		memcpy(this->free, str, len);
		this->free += len;
		return this->free - len;
	}
	
	char* allocate(unsigned int len) {
		reserve(length() + len);
		this->free += len;
		return this->free - len;
	}
		
	inline void reserve(unsigned int needed) {
		if (needed <= this->capacity) return;
		unsigned int new_size = (needed / MB + 1) * MB;
		unsigned int orig_len = length();
		if (this->data == this->stack_buf) {
			char* orig_data = this->data;
			this->data = (char*)malloc(new_size);
			memcpy(this->data, orig_data, orig_len);
		} else {
			this->data = (char*)realloc(this->data, new_size);
		}
		this->free = this->data + orig_len;
		this->capacity = new_size;
	}
};

struct StringBuffer : public Buffer {
	char* add(const char* str) {
		unsigned int len = strlen(str) + 1;
		reserve(length() + len);
		memcpy(this->free, str, len);
		this->free += (len - 1);
		return this->free - (len - 1);
	}
};

// Simple and fast strstr() variant that returns NULL if needle is not found.
static inline const char* strfind(const char* __restrict haystack,
	const char* __restrict needle){
	for (;; haystack++) {
		const char* h = haystack;
		for (const char* n = needle; *n; h++, n++) {
			if (*h == 0) return NULL;     // Ran out of haystack
			if (*n != *h) goto next_S;
		}
		return haystack;
		next_S:;
	}
}





/*****************************/
/* TYPES AND DATA STRUCTURES */
/*****************************/

FILE* history = NULL;

// ItemIDs are 48-bit integers
typedef uint64_t ItemID;
static const ItemID NO_ITEM = 0x0000ffffffffffff;
static const uint16_t LINKER_ATTR = 0x8000;  // MSB set

// Each attribute is 8 bytes.
struct Attribute {
	union __attribute__((__packed__)) {
		uint64_t item_id:48;   // Linker attributes
		uint32_t value;        // Normal attributes
	};
	uint16_t key;
};

struct Table;
struct ItemAddress { Table* table; unsigned int row; };

vector<ItemAddress> item_addresses;  // Translate ItemIDs into memory locations

struct __attribute__((__packed__)) Item {
	struct __attribute__((__packed__)) {
		uint64_t item_id:48;
		uint64_t can_edit:48;
		uint16_t num_attribs;
	};
	Attribute attributes[0];

	static Item& get(ItemID id);
	Attribute& operator[](unsigned int n) { return this->attributes[n]; }
	size_t bytes() {
		return sizeof(Item) + sizeof(Attribute) * this->num_attribs;
	}

	void to_map(map<uint16_t, uint64_t>& fields);
	static void to_json(ItemID item_id, StringBuffer& buf);
	Attribute* find_attribute(int column);
	static void update(ItemID item_id, const vector<const char*>& updates);
};

enum QueryType {
	EXACT = 0,
	SUBSTRING = 1,
	LESS_THAN = 2,
	LESS_THAN_OR_EQUAL = 3,
	GREATER_THAN = 4,
	GREATER_THAN_OR_EQUAL = 5
};
struct Query { int type; string key; string value; };

struct Index {
	unordered_map<string, ItemID> hashmap;
	
	Index(Table& table, vector<size_t>& columns);
	Index(const Index& that) = delete;  // No copy constructor!
	void add(ItemID item_id, vector<const char*>& values);
	ItemID get(const vector<const char*>& fields);
};

struct TransactionBrief {
	ItemID who; char when[16]; size_t created; size_t modified; size_t deleted;
};

struct Table {
	Table(const string& name) : name(name) {}
	Table(const Table& that) = delete;  // No copy constructor!

	static Table* get(const string& table_name);
	static Table* get(ItemID id) {
		return id >= item_addresses.size() ? NULL : item_addresses[id].table;
	}

	static vector<ItemID> search(const vector<Table*>& tables,
		vector<Query>& queries, bool case_sensitive, bool deep);

	const char* column_name(uint16_t col, bool case_sensitive = true) {
		col = col & ~LINKER_ATTR;
		return (case_sensitive ? this->column_storage.data :
			this->column_storage_lowercase.data) + this->column_names[col];
	}
	const char* column_value(uint16_t col, unsigned int val,
		bool case_sensitive = true) {
		col = col & ~LINKER_ATTR;
		return (case_sensitive ? this->column_storage.data :
			this->column_storage_lowercase.data) +
			this->column_values[col][val];
	}
	const char* column_value(Attribute& attrib, bool case_sensitive = true) {
		return column_value(attrib.key, attrib.value, case_sensitive);
	}

	int column_index(const char* column_name, bool create = false);
	int column_value_index(uint16_t col, const char* value, bool create=false);

	Item& item(unsigned int row) {
		return *(Item*)(this->item_storage.data + this->item_offsets[row]);
	}
	size_t num_items() { return this->item_offsets.size(); }
	size_t num_columns() { return this->column_names.size(); }
	size_t num_column_values(int col) {
		return this->column_values[col].size();
	}

	Item* _allocate_item(ItemID item_id, int num_attribs) {
		Item* item = (Item*)this->item_storage.allocate(
			sizeof(Item) + num_attribs * sizeof(Attribute));
		item->item_id = item_id;
		item->can_edit = 0;
		item->num_attribs = num_attribs;
		uint32_t offset = (char*)item - this->item_storage.data;
		this->item_offsets.push_back(offset);
		ItemAddress addr = { this, (uint32_t)this->item_offsets.size() - 1 };
		if (item_id >= item_addresses.size()) item_addresses.resize(item_id+1);
		item_addresses[item_id] = addr;
		return item;
	}

	Item& allocate_item(int num_attribs) {
		return *_allocate_item(item_addresses.size(), num_attribs);
	}
	Item& allocate_item_with_id(ItemID item_id, int num_attribs) {
		return *_allocate_item(item_id, num_attribs);
	}

	Item& reallocate_item(ItemID item_id, int num_attribs) {
		// Check if we can reuse the old slot.
		Item& old = Item::get(item_id);
		if (old.num_attribs >= num_attribs) {
			old.num_attribs = num_attribs;
			return old;
		}

		// Need to allocate a new slot.
		Item* item = (Item*)this->item_storage.allocate(
			sizeof(Item) + num_attribs * sizeof(Attribute));
		item->item_id = item_id;
		item->num_attribs = num_attribs;
		uint32_t offset = (char*)item - this->item_storage.data;
		this->item_offsets[item_addresses[item_id].row] = offset;
		return *item;
	}

	string name;
	list<TransactionBrief> latest_changes;

private:
	vector<unsigned int> column_names;
	vector< vector<unsigned int> > column_values;
	vector<uint32_t> item_offsets;
	
	Buffer item_storage;
	Buffer column_storage;
	Buffer column_storage_lowercase;
};

struct Transaction {
	Transaction(Table* table, ItemID who);
	void commit(int added, int modified, int deleted);

	void create_item(ItemID item_id, ItemID can_edit);
	void update_item(ItemID item_id);

	void attribute(const char* key, const char* value);
	void link(const char* key, ItemID target);

	StringBuffer data;
	Table* table;
	ItemID who;
	time_t when;
};




/**********************
 * ITEM FUNCTIONALITY *
 **********************/

Item& Item::get(ItemID id) {
	return item_addresses[id].table->item(item_addresses[id].row);
}

Attribute* Item::find_attribute(int attrib_id) {
	for (size_t a = 0; a < this->num_attribs; a++) {
		if (this->attributes[a].key == attrib_id) return &this->attributes[a];
	}
	return NULL;
}

void Item::to_map(map<uint16_t, uint64_t>& fields) {
	for (int a = 0; a < this->num_attribs; a++) {
		Attribute& attrib = this->attributes[a];
		fields[attrib.key] = (attrib.key & LINKER_ATTR) ?
			attrib.item_id : attrib.value;
	}
}

void Item::update(ItemID item_id, const vector<const char*>& updates) {
	Table* table = Table::get(item_id);
	Item& old = Item::get(item_id);
	if (&old == NULL) { printf("Invalid item ID.\n"); return; }

	// Build a dictionary out of the old values.
	map<uint16_t, uint64_t> fields;
	old.to_map(fields);
	
	// Update old dictionary with new values.
	for (size_t k = 0; k < updates.size(); k += 2) {
		if (updates[k+1][0] == '\0') {   // If empty string, erase field
			fields.erase(table->column_index(updates[k], false));
		} else {
			int col = table->column_index(updates[k], true);
			int row = table->column_value_index(col, updates[k+1], true);
			fields[col] = row;
		}
	}

	Item& item = table->reallocate_item(item_id, fields.size());
	Attribute* attrib = item.attributes;
	for (auto iter = fields.begin(); iter != fields.end(); iter++) {
		attrib->key = iter->first; attrib->value = iter->second;
		attrib += 1;
	}
}

void Item::to_json(ItemID item_id, StringBuffer& buf) {
	Item& item = Item::get(item_id);
	Table* table = Table::get(item_id);
	
	// Add item ID.
	char tmp[16]; sprintf(tmp, "%lu", item_id);
	buf.add("{\"id\":"); buf.add(tmp);

	string prefixes[20];
	Item* linked[20] = {&item};
	int N = 1;   // Number of elements in linked[]
	
	for (int checked = 0; checked < 20 && checked < N; checked++) {
		const string& prefix = prefixes[checked];

		// Add the special "Can edit" field.
		char tmp[16]; sprintf(tmp, "%lu", linked[checked]->can_edit);
		buf.add(",\"Can edit\":"); buf.add(tmp);

		for (int a = 0; a < linked[checked]->num_attribs; a++) {
			Attribute& attrib = linked[checked]->attributes[a];
			const char* key = table->column_name(attrib.key);
			
			// Check if we are dealing with a linker key.
			if (attrib.key & LINKER_ATTR) {
				prefixes[N] = prefix + key + " > ";
				linked[N] = &Item::get(attrib.item_id);
				N += 1;
			} else {
				buf.add(",\""); buf.add(prefix.c_str()); buf.add(key);
				buf.add("\":\""); buf.add(table->column_value(attrib));
				buf.add("\"");
			}
		}
	}
	buf.add("}");
}


/***********************/
/* INDEX FUNCTIONALITY */
/***********************/

Index::Index(Table& table, vector<size_t>& columns) {
	for (size_t i = 0; i < table.num_items(); i++) {
		Item& item = table.item(i);

		StringBuffer composite;
		size_t c;
		for (c = 0; c < columns.size(); c++) {
			Attribute* attrib = item.find_attribute(columns[c]);
			if (attrib == NULL) break;
			composite.add(table.column_value(*attrib));
		}
		if (c < columns.size()) continue;     // Did not find all attributes

		auto p = this->hashmap.insert(
			pair<string, ItemID>(composite.data, +item.item_id));
		if (p.second == false && p.first->second != item.item_id)
			p.first->second = NO_ITEM;  // Mark non-unique
	}
}

void Index::add(ItemID item_id, vector<const char*>& values) {
	StringBuffer composite;
	for (size_t v = 0; v < values.size(); v++)
		composite.add(values[v]);
	auto p = this->hashmap.insert(
		pair<string, ItemID>(composite.data, item_id));
	if (p.second == false && p.first->second != item_id)
		p.first->second = NO_ITEM;  // Mark non-unique
}

ItemID Index::get(const vector<const char*>& values) {
	StringBuffer composite;
	for (size_t v = 0; v < values.size(); v++)
		composite.add(values[v]);
	auto it = this->hashmap.find(composite.data);
	return (it != this->hashmap.end()) ? it->second : NO_ITEM;
}






/***********************/
/* TABLE FUNCTIONALITY */
/***********************/

Table* Table::get(const string& table_name) {
	static map<string, Table*> tables;
	Table* table = tables[table_name];
	if (table == NULL)
		tables[table_name] = table = new Table(table_name);
	return table;
}

int Table::column_index(const char* column_name, bool create) {
	for (size_t c = 0; c < num_columns(); c++) {
		if (strcmp(this->column_name(c), column_name) == 0) return c;
	}
	if (!create) return -1;    // Column was not found

	this->column_names.push_back(
		this->column_storage.add(column_name) - this->column_storage.data);
	lowercase(this->column_storage_lowercase.add(column_name));
	this->column_values.emplace_back();
	return num_columns() - 1;
}

int Table::column_value_index(uint16_t col, const char* value, bool create) {
	// If we are adding a new value to a column, and the column already
	// contains over 50 different values, we can be pretty sure that
	// redundancy is low, so we only look through the first 50 values.
	int V = num_column_values(col);
	if (V >= 50 && create) V = 50;
	for (int v = 0; v < V; v++) {
		if (strcmp(column_value(col, v), value) == 0) return v;
	}
	if (!create) return -1;  	// Value was not found.

	this->column_values[col].push_back(
		this->column_storage.add(value) - this->column_storage.data);
	lowercase(this->column_storage_lowercase.add(value));
	return num_column_values(col) - 1;
}





/**************/
/* CHANGELOGS */
/**************/

Transaction::Transaction(Table* table, ItemID who) 
	: table(table), who(who), when(::time(NULL)) {}

void Transaction::create_item(ItemID item_id, ItemID can_edit) {
	char buf[64]; sprintf(buf, "\nCREATE %lu %lu", item_id, can_edit);
	this->data.add(buf);
}

void Transaction::update_item(ItemID item_id) {
	char buf[64]; sprintf(buf, "\nUPDATE %lu", item_id); this->data.add(buf);
}

void Transaction::attribute(const char* key, const char* value) {
	this->data.add("\t"); this->data.add(key);
	this->data.add("\t"); newline_to_vtab(this->data.add(value));
}

void Transaction::link(const char* key, ItemID target) {
	char value[16]; sprintf(value, "%lu", target);
	this->data.add("\t%"); this->data.add(key);
	this->data.add("\t"); this->data.add(value);
}

void Transaction::commit(int added, int modified, int deleted) {
	if (history == NULL) return;
	struct tm tm; localtime_r(&this->when, &tm);
	fprintf(history, 
		"TRANSACTION ON %s BY %lu AT %04d-%02d-%02d %02d:%02d:%02d",
		this->table->name.c_str(), this->who, 1900+tm.tm_year, tm.tm_mon+1,
		tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);

	Buffer& buf = this->data; buf.add("\n");
	fwrite(buf.data, 1, buf.length() - 1, history);
	fflush(history);
}




/****************/
/* QUERY ENGINE */
/****************/

Query parse_query(string& str) {
	Query q;
	size_t split;
	if ((split = str.find("=")) != string::npos) {
		q.type = EXACT;
		q.key = trim(str.substr(0, split));
		q.value = trim(str.substr(split+1));
	} else {
		q.type = SUBSTRING;
		q.key = "";
		q.value = str;
	}
	return q;
}

vector<ItemID> Table::search(const vector<Table*>& tables,
	vector<Query>& queries, bool case_sensitive, bool deep) {

	// Don't allow arbitrarily complex queries.
	if (queries.size() > 8) return vector<ItemID>();

	// Convert query parameters to lowercase if not case sensitive.
	if (!case_sensitive) {
		for (size_t i = 0; i < queries.size(); i++) {
			lowercase(queries[i].key);
			lowercase(queries[i].value);
		}
	}
	
	vector<ItemID> matching_items;

	// For each table, build a vector that tells which column-value pairs
	// fulfill a particular query.
	unsigned char** table_matches[tables.size()];
	uint16_t type_cols[tables.size()];   // FIXME: Find better way
	for (size_t t = 0; t < tables.size(); t++) {
		Table& table = *tables[t];
		unsigned char** matches = new unsigned char*[table.num_columns()];
		table_matches[t] = matches;

		for (size_t c = 0; c < table.num_columns(); c++) {
			matches[c] = NULL;     // NULL means no matches in this column

			if (strcmp(table.column_name(c), "Type") == 0) type_cols[t] = c;

			size_t V = table.num_column_values(c);
			for (size_t v = 0; v < V; v++) {
				const char* col_value = table.column_value(c, v,
					case_sensitive);
				for (size_t q = 0; q < queries.size(); q++) {
					bool match = false;
					Query& query = queries[q];
					switch (query.type) {
					case EXACT:
						match = (query.value == col_value); break;
					case SUBSTRING:
						match = strfind(col_value, query.value.c_str()); break;
					}
					if (match) {
						if (matches[c] == NULL)
							matches[c] = new unsigned char[V]();  // Zero-init
						matches[c][v] |= (1 << q);
					}
				}
			}
		}
	}

	for (size_t t = 0; t < tables.size(); t++) {
		Table& table = *tables[t];
		// Look for items that match all queries.
		unsigned char all_mask = (1 << queries.size()) - 1;

		for (size_t i = 0; i < table.num_items(); i++) {
			unsigned char hits = 0;	

			struct { size_t table; Item* item; } linked[20] = {
				{t, &table.item(i)} };
			int num_linked = 1;
			for (int checked = 0; checked < num_linked; checked++) {
				unsigned char** matches = table_matches[linked[checked].table];
				uint16_t type_col = type_cols[linked[checked].table];
				Item& item = *linked[checked].item;
				for (int a = 0; a < item.num_attribs; a++) {
					Attribute& attrib = item.attributes[a];
					if (attrib.key & LINKER_ATTR) {
						if (!deep) continue;
						auto it = find(tables.begin(), tables.end(),
							Table::get(attrib.item_id));
						if (it == tables.end()) continue;
						linked[num_linked++] = { it - tables.begin(),
							&Item::get(attrib.item_id) };
					} else if (matches[attrib.key]) {
						if (checked > 0 && attrib.key == type_col) continue;
						hits |= matches[attrib.key][attrib.value];
					}
				}
			}

			if ((hits & all_mask) == all_mask)
				matching_items.push_back(table.item(i).item_id);
		}
	}
	
	return matching_items;
}







/*******************/
/* PYTHON BINDINGS */
/*******************/

const char* py_to_utf8(PyObject* pystr) {
	if (PyBytes_Check(pystr)) {
		return PyBytes_AsString(pystr);
	} else if (PyUnicode_Check(pystr)) {
		return PyUnicode_AsUTF8(pystr);
	} else {
		printf("py_to_utf8() failed: ");
		PyObject_Print(pystr, stdout, 0); printf("\n");
		return "";
	}
}

vector<string> pyseq_to_vector(PyObject* pyseq) {
	vector<string> result;
	if (PyUnicode_Check(pyseq)) {
		result.push_back(PyUnicode_AsUTF8(pyseq));
	} else {
		for (int i = 0; i < PySequence_Size(pyseq); i++)
			result.push_back(PyUnicode_AsUTF8(PySequence_GetItem(pyseq, i)));
	}
	return result;
}

PyObject* item_to_pydict(ItemID item_id) {
	Item& item = Item::get(item_id);
	Table* table = Table::get(item_id);
	
	PyObject* dict = PyDict_New();
	
	string prefixes[20];
	Item* linked[20] = {&item};
	int N = 1;   // Number of elements in linked[]
	
	// FIXME: When we fetch the information of linked items, we should
	// check if the user has access to those tables.
	for (int checked = 0; checked < 20 && checked < N; checked++) {
		const string& prefix = prefixes[checked];
		for (int a = 0; a < linked[checked]->num_attribs; a++) {
			Attribute& attrib = linked[checked]->attributes[a];
			const char* key = table->column_name(attrib.key);
			
			// Add item ID.
			PyObject* pyid = PyLong_FromUnsignedLong(linked[checked]->item_id);
			PyDict_SetItemString(dict, (prefix + "id").c_str(), pyid);
			Py_DECREF(pyid);

			// Add the special "Can edit" attribute.
			pyid = PyLong_FromUnsignedLong(linked[checked]->can_edit);
			PyDict_SetItemString(dict, "Can edit", pyid); Py_DECREF(pyid);

			// Check if we are dealing with a linker key.
			if (attrib.key & LINKER_ATTR) {
				prefixes[N] = prefix + key + " > ";
				linked[N] = &Item::get(attrib.item_id);
				N += 1;
			} else {
				const char* value = table->column_value(attrib);
				PyObject* pyval = PyUnicode_FromString(value);
				PyDict_SetItemString(dict, (prefix + key).c_str(), pyval);
				Py_DECREF(pyval);
			}
		}
	}
	return dict;
}



static PyObject* get(PyObject* self, PyObject* args) {
	ItemID item_id = NO_ITEM;
	if (!PyArg_ParseTuple(args, "k", &item_id)) return NULL;
	if (item_id >= item_addresses.size()) {
		PyErr_SetString(PyExc_RuntimeError, "Item not found.");
		return NULL;
	}
	return item_to_pydict(item_id);
}



static PyObject* search(PyObject* self, PyObject* args, PyObject* keywords) {
	PyObject* table_names_arg = NULL;
	PyObject* queries_arg = NULL;
	PyObject* case_arg = Py_True;
	PyObject* deep_arg = Py_False;
	const char* output = "dict";
	
	static const char *kwlist[] = {"tables", "queries", "case", "deep",
		"output", NULL};
	if (!PyArg_ParseTupleAndKeywords(args, keywords, "OO|OOs", (char**)kwlist,
		&table_names_arg, &queries_arg, &case_arg, &deep_arg, &output))
		return NULL;
	
	bool case_sensitive = (case_arg == Py_True);
	bool deep = (deep_arg == Py_True);
	
	// Check that all tables that are to be searched actually exist.
	vector<string> table_names = pyseq_to_vector(table_names_arg);
	vector<Table*> tables;
	for (size_t t = 0; t < table_names.size(); t++) {
		Table* table = Table::get(table_names[t]);
		tables.push_back(table);
	}

	// Parse the search query components.
	vector<string> query_strs = pyseq_to_vector(queries_arg);
	vector<Query> queries;
	for (size_t q = 0; q < query_strs.size(); q++)
		queries.push_back(parse_query(query_strs[q]));

	vector<ItemID> matches = Table::search(tables, queries,
		case_sensitive, deep);
	
	// Output in user-requested format.
	if (strcmp(output, "dict") == 0) {
		PyObject* results = PyTuple_New(matches.size());
		for (size_t i = 0; i < matches.size(); i++)
			PyTuple_SET_ITEM(results, i, item_to_pydict(matches[i]));
		return results;
	} else if (strcmp(output, "json")) {
		return PyUnicode_FromString("");
	} else {
		PyErr_SetString(PyExc_RuntimeError, "Bad output type."); return NULL;
	}
}



static PyObject* insert(PyObject* self, PyObject* args) {
	char* table_name;
	PyObject* dict;
	if (!PyArg_ParseTuple(args, "sO", &table_name, &dict))
		return NULL;

	Table* table = Table::get(table_name);
	PyObject* fields = PyDict_Items(dict);
	Item& item = table->allocate_item(PyList_Size(fields));
	Transaction txn(table, 0); txn.create_item(item.item_id, 0);

	for (int a = 0; a < item.num_attribs; a++) {
		PyObject* field = PyList_GetItem(fields, a);
		const char* key = py_to_utf8(PyTuple_GetItem(field, 0));
		const char* value = py_to_utf8(PyTuple_GetItem(field, 1));
		
		int c = table->column_index(key, true);
		int v = table->column_value_index(c, value, true);
		
		Attribute& attrib = item.attributes[a];
		attrib.key = c; attrib.value = v;
		txn.attribute(key, value);
	}

	Py_DECREF(fields);
	txn.commit(1, 0, 0);
	return Py_BuildValue("i", item.item_id);
}



struct HierarchyNode {
	string prefix;
	vector<size_t> keys;
	vector<HierarchyNode> linked;
	ItemID item_id;
};

static PyObject* insert_multiple(PyObject* self, PyObject* args,
	PyObject* keywords) {
	const char* table_name;
	PyObject* headers_arg;
	PyObject* items;
	PyObject* merge_arg;
	ItemID can_edit = NO_ITEM;
	
	static const char *kwlist[] = {"table_name", "headers", "items", "merge",
		"edit", NULL};
	if (!PyArg_ParseTupleAndKeywords(args, keywords, "sOO|Ok", (char**)kwlist,
		&table_name, &headers_arg, &items, &merge_arg, &can_edit))
		return NULL;

	Table* table = Table::get(table_name);

	// Get attribute names, calculate column indexes
	vector<string> headers = pyseq_to_vector(headers_arg);

	vector<bool> is_merge_col(headers.size());
	vector<string> merge_cols = pyseq_to_vector(merge_arg);
	for (size_t m = 0; m < merge_cols.size(); m++) {
		for (size_t h = 0; h < headers.size(); h++) {
			if (merge_cols[m] == headers[h]) is_merge_col[h] = true;
		}
	}

	vector<size_t> column_index(headers.size());
	vector<string> attrib_names(headers.size());
	for (size_t h = 0; h < headers.size(); h++) {
		size_t last_arrow = headers[h].rfind(">");
		last_arrow = (last_arrow == string::npos) ? 0 : last_arrow + 1;
		attrib_names[h] = trim(headers[h].substr(last_arrow));
		column_index[h] = table->column_index(attrib_names[h].c_str(), true);
	}

	printf("All columns:\n");
	for (size_t k = 0; k < headers.size(); k++) {
		printf("- %s [%lu]%s\n", headers[k].c_str(), column_index[k],
			is_merge_col[k] ? " (merge)" : "");
	}

	// Construct a map of the linkage relationships between our items.
	HierarchyNode root;
	for (size_t c = 0; c < headers.size(); c++) {
		string& header = headers[c];
		size_t start = 0;
		HierarchyNode* node = &root;
		for (size_t k = 0; k < header.length(); k++) {
			if (header[k] == '>') {
				string link_name = trim(header.substr(start, k - start));
				// Linked column, see if we need to extend hierarchy.
				bool found = false;
				for (size_t child = 0; child < node->linked.size(); child++) {
					if (node->linked[child].prefix == link_name) {
						node = &node->linked[child]; found = true; break;
					}
				}

				if (found == false) {
					node->linked.emplace_back();
					HierarchyNode& new_node = node->linked.back();
					new_node.prefix = link_name;
					node = &new_node;
				}

				k += 1;
				start = k;
			}
		}
		node->keys.push_back(c);
	}

	// Order the hierarchy from deepest to shallowest.
	vector<HierarchyNode*> ordered_hierarchy;
	ordered_hierarchy.push_back(&root);
	for (size_t n = 0; n < ordered_hierarchy.size(); n++) {
		HierarchyNode* node = ordered_hierarchy[n];
		for (size_t l = 0; l < node->linked.size(); l++)
			ordered_hierarchy.push_back(&node->linked[l]);
	}
	reverse(ordered_hierarchy.begin(), ordered_hierarchy.end());

	// If user wants to merge with existing items based on a set of attributes,
	// we need to build an index.
	vector<Index*> indexes(ordered_hierarchy.size());
	for (size_t n = 0; n < ordered_hierarchy.size(); n++) {
		HierarchyNode* node = ordered_hierarchy[n];
		vector<size_t> merge_columns;
		for (size_t k = 0; k < node->keys.size(); k++) {
			if (is_merge_col[node->keys[k]])
				merge_columns.push_back(column_index[node->keys[k]]);
		}
		indexes[n] = (merge_columns.size() > 0) ?
			new Index(*table, merge_columns) : NULL;
	}

	Transaction txn(table, 0);
	size_t items_added = 0;

	// Go through each row and add listed items to database.
	PyObject* iter = PyObject_GetIter(items);
	while (PyObject* row = PyIter_Next(iter)) {  // row must be list or tuple
		for (size_t n = 0; n < ordered_hierarchy.size(); n++) {
			HierarchyNode* node = ordered_hierarchy[n];

			// If we are merging, check if item already exists.
			ItemID item_id = NO_ITEM;
			if (indexes[n] != NULL) {
				vector<const char*> merge_keys;
				for (size_t k = 0; k < node->keys.size(); k++) {
					if (is_merge_col[node->keys[k]])
						merge_keys.push_back(py_to_utf8(
							PySequence_Fast_GET_ITEM(row, node->keys[k])));
				}
				item_id = indexes[n]->get(merge_keys);
			}

			int A = node->keys.size();
			int L = node->linked.size();

			// Count non-empty attributes.
			int A_ne = 0;
			for (int a = 0; a < A; a++) {
				if (PyObject_Not(PySequence_Fast_GET_ITEM(row,
					node->keys[a])) < 1)
					A_ne += 1;
			}

			// Create new item if necessary.
			if (item_id == NO_ITEM) {
				Item& item = table->allocate_item(A_ne + L + 1);
				item_id = item.item_id;

				txn.create_item(item_id, 0);

				int a_ne = 0;
				for (int a = 0; a < A; a++) {
					PyObject* elem = PySequence_Fast_GET_ITEM(
						row, node->keys[a]);
					if (PyObject_Not(elem) == 1) continue;
					const char* value = py_to_utf8(elem);
					size_t c = column_index[node->keys[a]];
					size_t v = table->column_value_index(c, value, true);
					Attribute& attrib = item.attributes[a_ne++];
					attrib.key = c; attrib.value = v;

					txn.attribute(attrib_names[node->keys[a]].c_str(), value);
				}

				for (int l = 0; l < L; l++) {
					const char* key = node->linked[l].prefix.c_str();
					uint16_t col = table->column_index(key, true);
					Attribute& attrib = item.attributes[A_ne + l];
					attrib.key = col | LINKER_ATTR;
					attrib.item_id = node->linked[l].item_id;  // FIXME: Right?

					txn.link(key, attrib.item_id);
				}

				items_added += 1;

			} else {
				int a_ne = 0;
				vector<const char*> updates(2*A_ne);
				for (int a = 0; a < A; a++) {
					PyObject* elem = PySequence_Fast_GET_ITEM(
						row, node->keys[a]);
					if (PyObject_Not(elem) == 1) continue; 
					const char* key = attrib_names[node->keys[a]].c_str();
					const char* value = py_to_utf8(elem);
					updates[2*a_ne] = key; updates[2*a_ne+1] = value;
					a_ne += 1;
				}

				txn.update_item(item_id);
				for (size_t k = 0; k < updates.size(); k += 2)
					txn.attribute(updates[k], updates[k+1]);
				Item::update(item_id, updates);
			}

			node->item_id = item_id;
		}
	}
	Py_DECREF(iter);

	// Clear indexes
	for (size_t n = 0; n < indexes.size(); n++) delete indexes[n];

	txn.commit(items_added, 0, 0);
	Py_RETURN_NONE;
}



static PyObject* remove(PyObject* self, PyObject* args) {
	Py_RETURN_NONE;    // FIXME
}



static PyObject* update(PyObject* self, PyObject* args) {
	ItemID item_id;
	PyObject* dict;
	if (!PyArg_ParseTuple(args, "kO", &item_id, &dict)) return NULL;
	
	PyObject* py_updates = PyDict_Items(dict);
	vector<const char*> updates(2*PyList_Size(py_updates));
	for (int k = 0; k < PyList_Size(py_updates); k++) {
		PyObject* update = PyList_GetItem(py_updates, k);
		updates[k*2] = py_to_utf8(PyTuple_GetItem(update, 0));
		updates[k*2+1] = py_to_utf8(PyTuple_GetItem(update, 1));
	}

	Item::update(item_id, updates);

	Transaction txn(Table::get(item_id), 0);
	txn.update_item(item_id);
	for (size_t k = 0; k < updates.size(); k += 2)
		txn.attribute(updates[k], updates[k+1]);
	txn.commit(0, 1, 0);

	Py_RETURN_NONE;
}



static PyObject* enable_logging(PyObject* self, PyObject* args) {
	char* history_file;
	if (!PyArg_ParseTuple(args, "s", &history_file)) return NULL;

	printf("Rebuilding tables based on %s...\n", history_file);

	size_t max_line_len = 16384; 
	char* line = new char[max_line_len];

	// Go through all lines
	Table* table = NULL;
	TransactionBrief txn;
	history = fopen(history_file, "r");
	while (getline(&line, &max_line_len, history) >= 0) {
		ItemID id = NO_ITEM;
		ItemID can_edit = NO_ITEM;
		ItemID who = NO_ITEM;
		char table_name[128];
		char when[16];
		char type = ' ';
		if (sscanf(line, "TRANSACTION ON %s BY %lu AT %s",
			table_name, &who, when) == 3) {
			if (table) {
				table->latest_changes.push_front(txn);
				while (table->latest_changes.size() > 10)
					table->latest_changes.pop_back();
			}
			txn.who = who; strcpy(txn.when, when);
			txn.created = 0; txn.modified = 0; txn.deleted = 0;
			table = Table::get(table_name);
			continue;
		} else if (sscanf(line, "CREATE %lu %lu", &id, &can_edit) == 2) {
			type = 'c'; txn.created += 1;
		} else if (sscanf(line, "UPDATE %lu", &id) == 1) {
			type = 'u'; txn.modified += 1;
		} else {
			PyErr_SetString(PyExc_RuntimeError, "Changelog is malformed.");
			return NULL;
		}

		// Construct a vector out of the new fields
		vector<const char*> fields;
		char* key = NULL;
		char* value = NULL;
		for (char* pos = line; *pos; pos++) {
			if (*pos == '\t' || *pos == '\n') {
				pos[0] = '\0';
				if (value) {
					vtab_to_newline(value);
					fields.push_back(key); fields.push_back(value);
					key = pos + 1; value = NULL;
				} else if (key) {
					value = pos + 1;
				} else {
					key = pos + 1;
				}
			}
		}

		if (type == 'c') {
			// Sanity check
			if (id < item_addresses.size() && item_addresses[id].table) {
				PyErr_SetString(PyExc_RuntimeError, "Item already exists.");
				return NULL;
			}

			// Create new item
			size_t F = fields.size() / 2;
			Item& item = table->allocate_item_with_id(id, F);
			item.can_edit = can_edit;
			for (size_t f = 0; f < F; f++) {
				const char* key = fields[f*2];
				const char* value = fields[f*2+1];
				Attribute& attrib = item.attributes[f];
				if (key[0] == '%') {
					uint16_t c = table->column_index(key+1, true);
					attrib.key = c | LINKER_ATTR;
					attrib.item_id = strtoul(value, NULL, 10);
				} else {
					uint16_t c = table->column_index(key, true);
					uint32_t v = table->column_value_index(c, value, true);
					attrib.key = c; attrib.value = v;
				}
			}

		} else if (type == 'u') {
			// Sanity check
			if (id >= item_addresses.size() || !item_addresses[id].table) {
				PyErr_SetString(PyExc_RuntimeError, "Item does not exist.");
				return NULL;
			}
			// Update existing item
			Item::update(id, fields);
		}
	}

	// Take care of the last transaction.
	if (table) {
		table->latest_changes.push_front(txn);
		while (table->latest_changes.size() > 10)
			table->latest_changes.pop_back();
	}

	delete[] line;
	history = freopen(history_file, "a", history);
	Py_RETURN_NONE;
}




static PyObject* latest_changes(PyObject* self, PyObject* args) {
	char* table_name = NULL;
	if (!PyArg_ParseTuple(args, "s", &table_name)) return NULL;

	auto& latest = Table::get(table_name)->latest_changes;
	PyObject* ret = PyTuple_New(latest.size());
	size_t u = 0;
	for (auto it = latest.begin(); it != latest.end(); it++, u++) {
		auto& txn = *it;
		PyObject* py_change = PyTuple_New(5);
		PyTuple_SET_ITEM(py_change, 0, PyLong_FromUnsignedLong(txn.who));
		PyTuple_SET_ITEM(py_change, 1, PyUnicode_FromString(txn.when));
		PyTuple_SET_ITEM(py_change, 2, PyLong_FromSize_t(txn.created));
		PyTuple_SET_ITEM(py_change, 3, PyLong_FromSize_t(txn.modified));
		PyTuple_SET_ITEM(py_change, 4, PyLong_FromSize_t(txn.deleted));
		PyTuple_SET_ITEM(ret, u, py_change);
	}
	return ret;
}



static PyMethodDef DatastoreMethods[] = {
	{"get", get, METH_VARARGS, ""},
	{"search", (PyCFunction)::search, METH_VARARGS|METH_KEYWORDS, ""},
	{"insert", insert, METH_VARARGS, ""},
	{"insert_multiple", (PyCFunction)insert_multiple, METH_VARARGS|METH_KEYWORDS, ""},
	{"remove", remove, METH_VARARGS, ""},
	{"update", update, METH_VARARGS, ""},
	{"enable_logging", enable_logging, METH_VARARGS, ""},
	{"latest_changes", latest_changes, METH_VARARGS, ""},
	{NULL, NULL, 0, NULL}  // Sentinel
};

static struct PyModuleDef datastore_module = {
	PyModuleDef_HEAD_INIT, "datastore", NULL, -1, DatastoreMethods
};

PyMODINIT_FUNC PyInit_datastore() {
	return PyModule_Create(&datastore_module);
}
