#include "destor.h"
#include "jcr.h"
#include "storage/containerstore.h"
#include "recipe/recipestore.h"
#include "rewrite_phase.h"
#include "backup.h"
#include "index/index.h"

extern uint32_t ComputeDelta(struct chunk *, struct chunk *);
extern struct chunk *get_simi_chunk(struct chunk *);
extern struct chunk *retrive_basechunk_by_fp_id(fingerprint *fp, containerid id);
extern void super_feature_index_update(GSequence *);
extern void DepWithDelta_har_monitor_update(containerid id, struct chunk *c);

static pthread_t filter_t;
static int64_t chunk_num;

struct
{
    /* accessed in dedup phase */
    struct container *container_buffer;
    /* In order to facilitate sampling in container,
     * we keep a queue for chunks in container buffer. */
    GSequence *chunks;
} storage_buffer;

extern struct
{
    pthread_mutex_t mutex;
    pthread_cond_t cond; // index buffer is not full
    int wait_threshold;
} index_lock;

/*
 * When a container buffer is full, we push it into container_queue.
 */

static void *filter_thread(void *arg)
{

    int enable_rewrite = 1;
    struct recipeMeta *r = NULL;

    /*
     * For the unique chunks that can be used as base chunks in pipeline
     */
    GHashTable *buffer_delta_unique_chunks = g_hash_table_new_full(g_int64_hash, g_fingerprint_equal, NULL, free);

    while (1)
    {
        struct chunk *c = sync_queue_pop(delta_queue);

        if (c == NULL)
            /* backup job finish */
            break;

        /* reconstruct a segment */
        struct segment *s = new_segment();

        /* segment head */
        assert(CHECK_CHUNK(c, CHUNK_SEGMENT_START));
        free_chunk(c);

        c = sync_queue_pop(delta_queue);
        while (!(CHECK_CHUNK(c, CHUNK_SEGMENT_END)))
        {
            g_sequence_append(s->chunks, c);
            if (!CHECK_CHUNK(c, CHUNK_FILE_START) && !CHECK_CHUNK(c, CHUNK_FILE_END))
                s->chunk_num++;

            c = sync_queue_pop(delta_queue);
        }
        free_chunk(c);

        /* For self-references in a segment.
         * If we find there is an early copy of the chunk in this segment,
         * has been rewritten
         * the rewrite request for it will be denied. */

        GHashTable *recently_unique_chunks = g_hash_table_new_full(g_int64_hash,
                                                                   g_fingerprint_equal, NULL, free_chunk);

        pthread_mutex_lock(&index_lock.mutex);

        TIMER_DECLARE(1);
        TIMER_BEGIN(1);

        /* This function will check the fragmented chunks
         * that would be rewritten later.
         * If we find an early copy of the chunk in earlier segments,
         * has been rewritten,
         * the rewrite request for it will be denied. */

        index_check_buffer(s);

        GSequenceIter *iter = g_sequence_get_begin_iter(s->chunks);
        GSequenceIter *end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter))
        {
            struct chunk *c = g_sequence_get(iter);
            struct chunk *ck_base = NULL;

            if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END))
                continue;

            if (CHECK_CHUNK(c, CHUNK_DUPLICATE) && c->id == TEMPORARY_ID)
            {
                struct chunk *ruc = g_hash_table_lookup(recently_unique_chunks, &c->fp);
                assert(ruc);
                c->id = ruc->id;
                // c->baseid = ruc->baseid;
                c->DepWithDelta = ruc->DepWithDelta;
                // c->base_size = ruc->base_size;
                // c->delta_size = ruc->delta_size;

                if (ruc->deltaCompressed)
                {
                    c->DepWithDelta = 1;
                    c->type = CHUNK_TYPE_DUPDELTA;
                }
                else
                {
                    c->type = CHUNK_TYPE_DUPLICATE;
                }

                if (c->DepWithDelta)
                    SET_CHUNK(c, CHUNK_REWRITE_DENIED);
            }

            /* A fragmented chunk will be denied if it has been rewritten recently */
            if (!CHECK_CHUNK(c, CHUNK_DUPLICATE) || (!CHECK_CHUNK(c, CHUNK_REWRITE_DENIED) && (CHECK_CHUNK(c, CHUNK_SPARSE))))
            {
                /*
                 * If the chunk is unique, or be fragmented and not denied,
                 * we write it to a container.
                 * Fragmented indicates: sparse, or out of order and not in cache,
                 */
                if (storage_buffer.container_buffer == NULL)
                {
                    storage_buffer.container_buffer = create_container();
                    // if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
                    storage_buffer.chunks = g_sequence_new(free_chunk);
                }

                if (container_overflow(storage_buffer.container_buffer, c))
                {
                    if (destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
                    {

                        GHashTable *features = sampling(storage_buffer.chunks,
                                                        g_sequence_get_length(storage_buffer.chunks));
                        index_update(features, get_container_id(storage_buffer.container_buffer));

                        TIMER_END(1, jcr.filter_time);
                        write_container_async(storage_buffer.container_buffer);
                        TIMER_BEGIN(1);

                        if (destor.enable_sidc == 0)
                        {

                            TIMER_DECLARE(3);
                            TIMER_BEGIN(3);
                            super_feature_index_update(storage_buffer.chunks);
                            TIMER_END(3, jcr.sketch_replace_time);
                        }

                        g_hash_table_destroy(features);

                        g_sequence_free(storage_buffer.chunks);
                        storage_buffer.chunks = g_sequence_new(free_chunk);
                    }
                    else
                    {
                        TIMER_END(1, jcr.filter_time);
                        write_container_async(storage_buffer.container_buffer);
                        TIMER_BEGIN(1);

                        if (destor.enable_sidc == 0)
                        {

                            TIMER_DECLARE(3);
                            TIMER_BEGIN(3);
                            super_feature_index_update(storage_buffer.chunks);
                            TIMER_END(3, jcr.sketch_replace_time);
                        }

                        g_sequence_free(storage_buffer.chunks);
                        storage_buffer.chunks = g_sequence_new(free_chunk);
                    }
                    storage_buffer.container_buffer = create_container();
                }
                /*
                if (c->delta && c->delta->baseid == -1){

                    struct indexElem *ne = g_hash_table_lookup(buffer_delta_unique_chunks, &c->delta->basefp);
                    assert(ne);
                    c->delta->baseid = ne->id;
                    assert(c->delta->baseid != -1);
                }
                */
                if (add_chunk_to_container(storage_buffer.container_buffer, c))
                {

                    struct chunk *wc = new_chunk(0);
                    memcpy(&wc->fp, &c->fp, sizeof(fingerprint));
                    wc->id = c->id;
                    // wc->baseid = c->baseid;

                    wc->DepWithDelta = c->DepWithDelta;
                    // wc->base_size = c->base_size;
                    // wc->delta_size = c->delta_size;

                    if (c->delta)
                    {
                        /*
                         * jcr.delta_data_size denotes the size of
                         * stored data using delta without rewrite
                         */
                        jcr.stored_data_size += sizeof(c->size) + c->delta->size + sizeof(fingerprint) + sizeof(containerid);
                        jcr.delta_chunk_num++;

                        jcr.delta_compressed_size += c->size - c->delta->size;
                        jcr.size_before_compressed += c->size;
                        c->deltaCompressed = 1;
                        if (!CHECK_CHUNK(c, CHUNK_DUPLICATE))
                        {
                            jcr.unique_data_size += c->size;
                            jcr.unique_chunk_num++;

                            // c->baseid = c->delta->baseid;
                            wc->baseid = c->baseid;
                            wc->deltaCompressed = 1;
                            c->deltaCompressed = 1;
                            // c->base_size = c->baseChunk->size;
                            // c->delta_size = c->delta->size;
                            g_hash_table_insert(recently_unique_chunks, &wc->fp, wc);
                        }
                    }
                    else
                    {
                        jcr.stored_data_size += c->size;
                        if (!CHECK_CHUNK(c, CHUNK_DUPLICATE))
                        {
                            jcr.unique_data_size += c->size;
                            jcr.unique_chunk_num++;

                            assert(c->id != TEMPORARY_ID);
                            g_hash_table_insert(recently_unique_chunks, &wc->fp, wc);
                            VERBOSE("Filter phase: %dth chunk is recently unique, size %d", chunk_num,
                                    g_hash_table_size(recently_unique_chunks));
                        }
                    }

                    // if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
                    {
                        struct chunk *ck = new_chunk(0);
                        memcpy(&ck->fp, &c->fp, sizeof(fingerprint));
                        ck->id = c->id;
                        ck->deltaCompressed = c->deltaCompressed;
                        ck->superfeature->sf1 = c->superfeature->sf1;
                        ck->superfeature->sf2 = c->superfeature->sf2;
                        ck->superfeature->sf3 = c->superfeature->sf3;
                        g_sequence_append(storage_buffer.chunks, ck);
                    }
                    /*
                    if(CHECK_CHUNK(c, CHUNK_BUFFER_DELTA)){
                        struct indexElem *ne = (struct indexElem*)malloc(sizeof(struct indexElem));
                        ne->id = c->id;
                        memcpy(&ne->fp, &c->fp, sizeof(fingerprint));
                        g_hash_table_insert(buffer_delta_unique_chunks, &ne->fp, ne);
                    }
                    */
                }
            }

            assert(c->id != TEMPORARY_ID);

            chunk_num++;

            /* free the delta chunk and base chunk*/
            if (c->delta)
            {
                free(c->delta);
                c->delta = NULL;
            }

            if (c->baseChunk)
            {
                free(c->baseChunk);
                c->baseChunk = NULL;
            }
        }

        int full = index_update_buffer(s);

        /* Write a SEGMENT_BEGIN */
        segmentid sid = append_segment_flag(jcr.bv, CHUNK_SEGMENT_START, s->chunk_num);

        /* Write recipe */
        iter = g_sequence_get_begin_iter(s->chunks);
        end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter))
        {
            c = g_sequence_get(iter);

            if (r == NULL)
            {
                assert(CHECK_CHUNK(c, CHUNK_FILE_START));
                r = new_recipe_meta(c->data);
            }
            else if (!CHECK_CHUNK(c, CHUNK_FILE_END))
            {

                struct chunkPointer cp;
                cp.id = c->id;
                assert(cp.id >= 0 && cp.id <= jcr.container_num_now_stored + 1);
                memcpy(&cp.fp, &c->fp, sizeof(fingerprint));
                cp.size = c->size;
                // assert(c->type != CHUNK_TYPE_DUPLICATE_ALL && c->type != CHUNK_TYPE_UNIQUE_ALL);
                if (c->type == CHUNK_TYPE_UNIQUE || c->type == CHUNK_TYPE_DUPLICATE)
                    cp.flag = CP_TYPE_CHUNK;
                else
                    cp.flag = CP_TYPE_DELTA;

                append_n_chunk_pointers(jcr.bv, &cp, 1);
                r->chunknum++;
                r->filesize += c->size;
            }
            else
            {
                assert(CHECK_CHUNK(c, CHUNK_FILE_END));
                append_recipe_meta(jcr.bv, r);
                free_recipe_meta(r);
                r = NULL;
            }
        }

        /* Write a SEGMENT_END */
        append_segment_flag(jcr.bv, CHUNK_SEGMENT_END, 0);

        if (destor.index_category[1] == INDEX_CATEGORY_LOGICAL_LOCALITY)
        {
            /*
             * Update_index for logical locality
             */
            s->features = sampling(s->chunks, s->chunk_num);
            if (destor.index_category[0] == INDEX_CATEGORY_EXACT)
            {
                /*
                 * For exact deduplication,
                 * unique fingerprints are inserted.
                 */
                VERBOSE("Filter phase: add %d unique fingerprints to %d features",
                        g_hash_table_size(recently_unique_chunks),
                        g_hash_table_size(s->features));
                GHashTableIter iter;
                gpointer key, value;
                g_hash_table_iter_init(&iter, recently_unique_chunks);
                while (g_hash_table_iter_next(&iter, &key, &value))
                {
                    struct chunk *uc = value;
                    fingerprint *ft = malloc(sizeof(fingerprint));
                    memcpy(ft, &uc->fp, sizeof(fingerprint));
                    g_hash_table_insert(s->features, ft, NULL);
                }
            }
            index_update(s->features, sid);
        }

        free_segment(s);

        if (index_lock.wait_threshold > 0 && full == 0)
        {
            pthread_cond_broadcast(&index_lock.cond);
        }
        TIMER_END(1, jcr.filter_time);
        pthread_mutex_unlock(&index_lock.mutex);

        g_hash_table_destroy(recently_unique_chunks);
    }

    if (storage_buffer.container_buffer && !container_empty(storage_buffer.container_buffer))
    {
        if (destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
        {
            /*
             * Update_index for physical locality
             */
            GHashTable *features = sampling(storage_buffer.chunks,
                                            g_sequence_get_length(storage_buffer.chunks));
            index_update(features, get_container_id(storage_buffer.container_buffer));
            write_container_async(storage_buffer.container_buffer);
            if (destor.enable_sidc == 0)
                super_feature_index_update(storage_buffer.chunks);

            g_hash_table_destroy(features);
            g_sequence_free(storage_buffer.chunks);
        }
        else
        {
            write_container_async(storage_buffer.container_buffer);
            if (destor.enable_sidc == 0)
                super_feature_index_update(storage_buffer.chunks);

            g_sequence_free(storage_buffer.chunks);
        }
    }

    jcr.container_num_after_backup = get_container_count() - 1;
    jcr.container_num_stored = jcr.container_num_after_backup - jcr.container_num_before_backup;
    g_hash_table_destroy(buffer_delta_unique_chunks);
    return NULL;
}

void start_filter_phase()
{

    storage_buffer.container_buffer = NULL;

    init_restore_aware();

    pthread_create(&filter_t, NULL, filter_thread, NULL);
}

void stop_filter_phase()
{
    pthread_join(filter_t, NULL);
}
