#include "extendiblehash.h"

#include <iostream>
#include <sstream>

//#include "LogFile.h"

template<class Key, class Record>
ExtendibleHash<Key, Record>::ExtendibleHash() {
    file.addMetadata(&header);
}

template<class Key, class Record>
ExtendibleHash<Key, Record>::~ExtendibleHash() {
}

template<class Key, class Record>
void ExtendibleHash<Key, Record>::addMetadata(Serializable* metadata) {
    file.addMetadata(metadata);
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::open(const std::string &filename, u_int32_t bucketSize) {
    logFileName = filename + ".log";
    if (file.open(filename, bucketSize)){
        this->bucketSize = bucketSize;
        return true;
    } else
        return false;
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::create(const std::string &filename, u_int32_t bucketSize) {
    this->bucketSize = bucketSize;
    logFileName = filename + ".log";
    header.globalDepth = 0;
    if (file.create(filename, bucketSize)) {
        Bucket<Key, Record> bucket(bucketSize);
        u_int32_t fileIndex = file.append(bucket);
        DirectoryEntry entry = {header.globalDepth, fileIndex};
        header.directory.push_back(entry);
        return true;
    } else
        return false;
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::contains(Key key) {
    u_int32_t index = getDirectoryIndex(key.hashcode());
    Bucket<Key, Record> bucket = readBucket(index);
    return bucket.contains(key);
}

template<class Key, class Record>
Nullable<Record> ExtendibleHash<Key, Record>::get(Key key) {
    u_int32_t index = getDirectoryIndex(key.hashcode());
    Bucket<Key, Record> bucket = readBucket(index);
    return bucket.get(key);
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::add(Record record) {
    u_int32_t index = getDirectoryIndex(record.getKey().hashcode());
    Bucket<Key, Record> bucket = readBucket(index);
    return internalAdd(record, index, bucket);
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::update(Record record) {
    u_int32_t index = getDirectoryIndex(record.getKey().hashcode());
    Bucket<Key, Record> bucket = readBucket(index);
    bool success;
    try {
        success = bucket.update(record);
        if (success) {
            file.write(header.directory[index].fileIndex, bucket);
            /*LogFile::log("Se actualizó el registro a " + record->toString(), logFileName);
            LogFile::log(toString(), logFileName);*/
        } /*else
            LogFile::log("Se intentó actualizar registro inexistente " + record->toString(), logFileName);*/
    } catch (ContainerFullException & e){
        success = handleOverflow(record, bucket, index);
    }
    return success;
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::remove(Key key) {
    u_int32_t index = getDirectoryIndex(key.hashcode());
    Bucket<Key, Record> bucket = readBucket(index);
    bool success = bucket.remove(key);
    if (success) {
        //LogFile::log("Se elimina el registro con clave " + key->toString(), logFileName);
        tryMerge(index, bucket);
        file.write(header.directory[index].fileIndex, bucket);
        //LogFile::log(toString(), logFileName);
    } /*else {
        LogFile::log("Se intento eliminar registro inexistente. Clave: " + key->toString(), logFileName);
    }*/
    return success;
}

template<class Key, class Record>
Bucket<Key, Record> ExtendibleHash<Key, Record>::readBucket(u_int32_t index) {
    Bucket<Key, Record> bucket(bucketSize);
    file.read(header.directory[index].fileIndex, bucket);
    return bucket;
}

template<class Key, class Record>
u_int32_t ExtendibleHash<Key, Record>::getDirectoryIndex(u_int32_t hash) const {
    return hash % (1 << header.globalDepth);
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::internalAdd(Record record, u_int32_t index, Bucket<Key, Record>& bucket) {
    bool success;
    try {
        success = bucket.add(record);
        if (success) {
            //LogFile::log("Se agrega el registro " + record->toString(), logFileName);
            file.write(header.directory[index].fileIndex, bucket);
            //LogFile::log(toString(), logFileName);
        } /*else
            LogFile::log("Se intento agregar registro ya existente: " + record->toString(), logFileName);*/
    } catch (ContainerFullException & e){
        success = handleOverflow(record, bucket, index); //should be always true
    }
    return success;
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::handleOverflow(Record newRecord, Bucket<Key, Record>& fullBucket, u_int32_t fullBucketIndex) {
    DirectoryEntry fullEntry = header.directory[fullBucketIndex];
    u_int8_t localDepth = fullEntry.depth;

    //LogFile::log("Overflow, se agrega un bucket", logFileName);

    if (localDepth == header.globalDepth) {
        extendTable();
        //LogFile::log("Y se duplica la tabla", logFileName);
    }

    // Crea un nuevo bucket en el archivo y lo indexa en el directorio
    Bucket<Key, Record> newBucket(bucketSize);
    u_int32_t newBucketIndex = getDirectoryIndex(fullBucketIndex + (1 << fullEntry.depth));

    //Actualizo las entradas en la tabla
    fullEntry.depth++;
    DirectoryEntry newEntry = {fullEntry.depth, file.append(newBucket)};
    for(u_int8_t i = 0; i <= header.globalDepth - localDepth; i++) {
        u_int32_t fullBuddyIndex = getDirectoryIndex(fullBucketIndex + i * (1 << fullEntry.depth));
        u_int32_t newBuddyIndex = getDirectoryIndex(newBucketIndex + i * (1 << fullEntry.depth));

        header.directory[fullBuddyIndex] = fullEntry;
        header.directory[newBuddyIndex] = newEntry;
    }

    // Distribuye parte del contenido del bucket original en el nuevo
    std::vector<Record> records = fullBucket.empty();
    u_int32_t index;
    for (Record record : records) {
        index = getDirectoryIndex(record.getKey().hashcode());
        (header.directory[index].fileIndex == fullEntry.fileIndex ? fullBucket : newBucket).add(record);
    }

    // Actualiza en el archivo el bucket que NO recibe el nuevo registro.
    // Y agrega el nuevo registro al otro. Puede causar un nuevo overflow
    bool success;
    index = getDirectoryIndex(newRecord.getKey().hashcode());
    DirectoryEntry entry = header.directory[index];
    if (entry.fileIndex == fullEntry.fileIndex){
        file.write(header.directory[newBucketIndex].fileIndex, newBucket);
        success = internalAdd(newRecord, index, fullBucket);
    } else {
        file.write(header.directory[fullBucketIndex].fileIndex, fullBucket);
        success = internalAdd(newRecord, index, newBucket);
    }
    return success;
}

template<class Key, class Record>
void ExtendibleHash<Key, Record>::extendTable() {
    header.directory.reserve(2 * header.directory.size());
    for (DirectoryEntry entry : header.directory) {
        header.directory.push_back(entry);
    }
    header.globalDepth++;
}

template<class Key, class Record>
void ExtendibleHash<Key, Record>::tryMerge(u_int32_t index, Bucket<Key, Record>& bucket) {
    DirectoryEntry entry = header.directory[index];
    entry.depth--;

    u_int32_t buddyIndex = index;

    bool merged = false;
    u_int32_t removePos = 0;

    while (index != (buddyIndex = getDirectoryIndex( buddyIndex + (1 << (entry.depth)))) ) {
        DirectoryEntry buddyEntry = header.directory[buddyIndex];

        if ((entry.fileIndex != buddyEntry.fileIndex)
                && (entry.depth + 1 == buddyEntry.depth)) {

            if (! merged) {
                Bucket<Key, Record> buddyBucket = readBucket(buddyIndex);

                if (bucket.getSizeUsed() + buddyBucket.getSizeUsed() <= buddyBucket.getCapacity()) {
                    for (Record record : buddyBucket.empty())
                        bucket.add(record);
                    removePos = buddyEntry.fileIndex;
                    merged = true;
                }
            }

            if (merged && removePos == buddyEntry.fileIndex)
                header.directory[buddyIndex] = entry;
        }
    }

    if (merged) {
        header.directory[index] = entry;
        file.remove(removePos);
        tryCollapse();
        tryMerge(getDirectoryIndex(index), bucket);
    }
}

template<class Key, class Record>
bool ExtendibleHash<Key, Record>::tryCollapse() {
    u_int8_t maxLocalDepth = 0;
    for (DirectoryEntry entry : header.directory)
        maxLocalDepth = entry.depth > maxLocalDepth ? entry.depth : maxLocalDepth;

    if (maxLocalDepth < header.globalDepth) {
        //LogFile::log("Se reduce la tabla la mitad", logFileName);
        header.directory.resize(1 << maxLocalDepth);
        header.globalDepth = maxLocalDepth;
        return true;
    } else
        return false;
}

template<class Key, class Record>
std::string ExtendibleHash<Key, Record>::toString() {
    std::stringstream sout;
    for (u_int32_t i = 0; i < header.directory.size(); i++) {
        Bucket<Key, Record> bucket = readBucket(i);
        DirectoryEntry entry = header.directory[i];
        sout << i << " pos:" << entry.fileIndex << " td:" << (1 << entry.depth) << " " << bucket.toString() << std::endl;
    }
    return sout.str();
}
