import os
import pickle

from imoocdb.catalog.entry import catalog_table, catalog_index
from imoocdb.constant import DATA_DIRECTORY
from imoocdb.errors import PageError
from imoocdb.storage.slotted_page import Page, PAGE_SIZE
from imoocdb.storage.transactions.entry import get_current_lsn
from imoocdb.storage.bplus_tree import BPlusTree, BPlusTreeTuple, load_root_node
from imoocdb.storage.lru import LRUCache


def table_tuple_get_all(table_name):
    assert catalog_table.select(lambda r: r.table_name == table_name)
    # 迪米特法则（最小知道法则） 上层对底层的实现知道越少越好
    for location in table_tuple_get_all_locations(table_name):
        yield table_tuple_get_one(table_name, location)


def get_table_filename(table_name):
    if not os.path.exists(DATA_DIRECTORY):
        os.makedirs(DATA_DIRECTORY)
    return os.path.join(DATA_DIRECTORY, table_name + ".tbl")


def table_tuple_get_pages(table_name):
    filename = get_table_filename(table_name)
    if not os.path.exists(filename):
        return 0
    size = os.stat(filename).st_size
    if size > 0:
        assert size % PAGE_SIZE == 0
    return size // PAGE_SIZE - 1


#  后面使用LRU替换这个Cache
cache = LRUCache()


def table_tuple_get_page(table_name, pageno):
    filename = get_table_filename(table_name)
    if not os.path.exists(filename):
        return 0

    key = (table_name, pageno)
    if key not in cache:
        cache[key] = Page()
    page = cache[key]
    return page
    # todo 当前page没有首先从buffer里读取，而是直接读的磁盘后面的

    # 后面需要先过buffer
    with open(filename, mode='rb') as f:
        f.seek(pageno * PAGE_SIZE)
        # 读取一页的大小
        buffer = f.read(PAGE_SIZE)
        page = Page.deserialize(buffer)
    return page


def table_tuple_get_page_tuples(table_name, pageno):
    page = table_tuple_get_page(table_name, pageno)
    return len(page.slot_directory)


def table_tuple_get_all_locations(table_name):
    for pageno in range(0, table_tuple_get_pages(table_name)):
        for sid in range(0, table_tuple_get_page_tuples(table_name, pageno)):
            # 返回的location是一个二元组
            # 无效元组应该过滤
            location = (pageno, sid)
            if not table_tuple_is_dead(table_name, location):
                yield (pageno, sid)


def table_tuple_is_dead(table_name, location):
    return len(table_tuple_get_one(table_name, location)) == 0


def tuple_to_bytes(tup):
    return pickle.dumps(tup)


def bytes_to_tuple(bytes_):
    if len(bytes_) == 0:
        return ()
    return pickle.loads(bytes_)


def table_tuple_get_one(table_name, location):
    # 解引用
    pageno, sid = location
    page = table_tuple_get_page(table_name, pageno)
    return bytes_to_tuple(page.select(sid))


def table_tuple_update_one(table_name, location, tup):
    pageno, sid = location
    page = table_tuple_get_page(table_name, pageno)
    try:
        new_sid = page.update(sid, tuple_to_bytes(tup))
    except PageError as e:
        # 只存在insert无法插入数据，是因为没有空间了，才会导致
        # 因此我们只需要处理该种异常即可
        page.delete(sid)
        new_pageno = table_tuple_allocate_page(table_name)
        new_page = table_tuple_get_page(table_name, new_pageno)
        sid = new_page.insert(tuple_to_bytes(tup))
    # todo WAL 预写日志
    page.set_header(sid=get_current_lsn())
    return pageno, sid


def table_tuple_get_last_pageno(table_name):
    return table_tuple_get_pages(table_name) - 1


def table_tuple_allocate_page(table_name):
    filename = get_table_filename(table_name)
    with open(filename, 'ab') as f:
        page = Page()
        page.set_header(get_current_lsn())
        f.write(page.serialize())
        os.fsync(f.fileno())
    return table_tuple_get_last_pageno(table_name)


def table_tuple_insert_one(table_name, tup):
    pageno = table_tuple_get_last_pageno(table_name)
    if pageno < 0:
        pageno = table_tuple_allocate_page(table_name)
    # 产生了非常多的overhead,这也进一步证明了buffer的重要性
    page = table_tuple_get_page(table_name, pageno)
    try:
        sid = page.insert(tuple_to_bytes(tup))
    except PageError as e:
        new_pageno = table_tuple_allocate_page(table_name)
        new_page = table_tuple_get_page(table_name, new_pageno)
        new_sid = new_page.insert(tuple_to_bytes(tup))
    page.set_header(lsn=get_current_lsn())
    return sid


def table_tuple_delete_one(table_name, location):
    pageno, sid = location
    page = table_tuple_get_page(table_name, pageno)
    page.set_header(sid=get_current_lsn())
    page.delete(sid)


def table_tuple_reorganize(table_name):
    # 把所有的tuple状态为DEAD的元组，统一进行整理
    pass


def get_index_filename(index_name):
    if not os.path.exists(DATA_DIRECTORY):
        os.mkdir(DATA_DIRECTORY)
    return os.path.join(DATA_DIRECTORY, index_name + '.idx')


def index_tuple_create(index_name, table_name, columns):
    table_columns = catalog_table.select(
        lambda r: r.table_name == table_name
    )[0].columns
    # 获取索引列的下标
    columns_indexes = [table_columns.index(c) for c in columns]
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename)
    for location in table_tuple_get_all_locations(table_name):
        tup = table_tuple_get_one(table_name, location)
        key = BPlusTreeTuple(tuple(tup[i] for i in columns_indexes))
        tree.insert(key, location)
    tree.serialize()


def range_compare(value, start, end):
    if start is None and end is None:
        return False
    if start is None:
        return value < end
    if end is None:
        return value > start
    return start < value < end


def index_tuple_get_range(index_name, start=None, end=None):
    """"
    start end 用来扫描索引中部分参数，如果不给这两个参数赋值，那么就默认拿这个索引中全部数据
    """
    results = catalog_index.select(lambda r: r.index_name == index_name)
    table_name = results[0].table_name
    for location in index_tuple_get_range_locations(index_name, start, end):
        # python支持元组比较 其他语言要自己实现
        # 这就是回表过程 从全量表中数据中获取location的部分
        yield table_tuple_get_one(table_name, location)


def index_tuple_get_range_locations(index_name, start=float('-inf'), end=float('inf')):
    """
    start,end 这两个参数，用来指定扫描索引中部分数据，如果不给这两个参数不赋值的话，
    那么默认就是获取索引中的全部数据
    """
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename, load_root_node(filename))
    for location in tree.find_range(start, end):
        yield location


def index_tuple_get_equal_value_locations(index_name, equal_value):
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename, load_root_node(filename))
    for location in tree.find(equal_value):
        yield location


def index_tuple_get_equal_value(index_name, equal_value):
    results = catalog_index.select(lambda r: r.index_name == index_name)
    table_name = results[0].table_name
    for location in index_tuple_get_equal_value_locations(index_name, equal_value):
        yield table_tuple_get_one(table_name, location)


def covered_index_tuple_get_range(index_name, start=float('-inf'), end=float('inf')):
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename, load_root_node(filename))
    for key in tree.find_range(start, end, return_keys=True):
        yield key.tup


def covered_index_tuple_get_equal_value(index_name, equal_value):
    for location in index_tuple_get_equal_value_locations(index_name, equal_value):
        yield equal_value


def index_tuple_insert_one(index_name, key, value):
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename, load_root_node(filename))
    tree.insert(BPlusTreeTuple(key), value)
    # todo lsn
    # 直接暴力刷新
    tree.serialize()


def index_tuple_delete_one(index_name, key, location=None):
    filename = get_index_filename(index_name)
    tree = BPlusTree(filename, load_root_node(filename))
    tree.delete(key, location)
    # todo lsn
    # 直接暴力刷新
    tree.serialize()


def index_tuple_update_one(index_name, key, old_value, value):
    index_tuple_delete_one(index_name, key, old_value)
    index_tuple_insert_one(index_name, key, value)


def table_tuple_delete_multiple(table_name, locations):
    for location in locations:
        table_tuple_delete_one(table_name, location)
    table_tuple_reorganize(table_name)
