import os
import time
import pickle

from ...sql.parser.ast import JoinType

from ...session_manager import get_current_session_id

from ...constant import TEMP_DIRECTORY
from ...errors import ExecutorCheckError
from ...storage.entry import *
from ...sql.logical_operator import Condition
from ...common.fabric import TableColumn
from ...catalog.entry import catalog_table, catalog_index, catalog_function

class PhysicalOperator:
    def __init__(self, name: str):
        self.name = name # 算子名

        # 如果我们采用CBO的内置公式进行优化.
        # 那么我们需要记录这个算子的代价.
        # NOTICE: 由于这个项目中涉及到的SQL的语句都比较简单，RBO已经非常已经非常够用了，
        # 就足以产生比较优的执行计划，CBO在这里基本属于多余，因此，
        # 这个公式值可能不一定能用到
        self.cost = 0

        # 该算子的子节点，即该算子的数据来源
        self.children = []

        # 输出的结构：用于给上层的算子提供输入
        # TODO: 该columns是list还是dict我们后面要思考，要权衡
        self.columns = None
    
    # raise NotImplementedError() 表示子类继承，就必须要实现该方法

    def open(self):
        """
        用途：初始化该执行算子，例如提前缓存一些数据，提前获取数据库内部
        的某些状态信息，提前申请一些变量等
        """
        raise NotImplementedError()

    def next(self):
        """
        用途：用于获取一行数据(tuple，record，row)
        数据库的执行过程，是一个迭代器的遍历过程，所以要用next()方法进行抽象，
        用于表示这个这个迭代过程
        """
        raise NotImplementedError()

    def close(self):
        """
        用途：用于清理open方法创建出来的临时数据，关闭一些资源等
        """
        raise NotImplementedError()

    def add_child(self, operator):
        # 相比于直接在外面操作 children 字段，这里封装的好处是:
        # 1.可以面向抽象的接口进行编程，避免面向具体的实现进行编程
        # 2.避免霰弹式修改
        # 3....
        assert isinstance(operator, PhysicalOperator)
        self.children.append(operator)

class TableScan(PhysicalOperator):
    def __init__(self, table_name: str, condition=None):
        super().__init__('TableScan')
        self.table_name = table_name    
        self.condition = condition
        self.columns = None
    
    def open(self):
        # 表采集到的columns要和这个采集到的tuple下标元素一一对应上
        self.columns = []
        for column in catalog_table.select(lambda r: r.table_name == self.table_name)[0].columns:
            self.columns.append(TableColumn(table_name=self.table_name, column_name=column))

    def next(self):
        for tup in table_tuple_get_all(table_name=self.table_name):
            if not self.condition:
                yield tup
            else:
                # 案例：
                # 表结构 t1 (id, name)
                # 获取到的元组是 (1, 'xiaoming')
                # 则，我们可以构造出 values 为 {TableColumn(t1, id): 1,
                #                             TableColumn(t1, name): 'xiaoming'}
                # values = {k: tup[i] for i, k in enumerate(self.columns)}, 等价于
                values = cast_tuple_pair_to_values(columns=self.columns, tup=tup)
                if is_condition_true(values=values, condition=self.condition):
                    yield tup

    def close(self):
        pass


# 索引扫描，如 t1 表的 a 索引，查询语句为 
# select t1.a from t1 where t1.a = 10 (等值查询)
# select t1.a from t1 where t1.a < 10 (范围查询)
# select t1.a from t1 where t1.a > 10 (范围查询)
# t1.a 为 TableColumn 类型，而 10 为 Constant 类型
class IndexScan(PhysicalOperator):
    def __init__(self, index_name: str, condition=None):
        super().__init__('IndexScan')
        self.index_name = index_name
        self.condition = condition

        self.table_name = None # 索引涉及表名
        self.condition_column = None # 涉及的索引条件列
        self.constant = None # 涉及的索引条件值，常量
        self.tuple_get_equal_value = index_tuple_get_equal_value
        self.tuple_get_range = index_tuple_get_range_locations
    
    def open(self):
        # 采集索引条件
        # 如 t1 表的 a 索引, select t1.a from t1 where t1.a = 10
        # t1.a 为 column，10 为 constant
        constants = []
        columns = []
        for node in (self.condition.left, self.condition.right):
            if isinstance(node, TableColumn):
                columns.append(node)
            else:
                constants.append(node)
        # 都没有条件，错误
        if not (constants and columns):
            raise ExecutorCheckError('IndexScan condition error.')
        if len(columns) != 1 and len(constants) != 1:
            raise ExecutorCheckError('Only supported one condition/value column.')
        self.condition_column = columns[0]
        self.constant = constants[0]
        self.fill_in_columns()

    def fill_in_columns(self):
        # 采集上来的元组tuple结构
        self.columns = []
        self.table_name = catalog_index.select(lambda r: r.index_name == self.index_name)[0].table_name
        for column in catalog_table.select(lambda r: r.table_name == self.table_name)[0].columns:
            self.columns.append(TableColumn(table_name=self.table_name, column_name=column))

    def next_location(self):
        if not self.condition:
            raise NotImplementedError()
        elif self.condition.sign == '=':
            for locatioin in self.tuple_get_equal_value(index_name=self.index_name, value=(self.constant,)):
                yield locatioin
        elif self.condition.sign == '>':
            # eg, ... where t1.a > 100
            # 等价于 ... where 100 < t1.a
            start = (float('-inf'),)
            end = (float('inf'),)
            if isinstance(self.condition.left, TableColumn):
                start = (self.constant,)
            else:
                end = (self.constant,)
            for location in self.tuple_get_range(index_name=self.index_name, start=start, end=end):
                yield location
        elif self.condition.sign == '<':
            # eg, ... where t1.a > 100
            # 等价于 ... where 100 < t1.a
            start = (float('-inf'),)
            end = (float('inf'),)
            if isinstance(self.condition.left, TableColumn):
                end = (self.constant,)
            else:
                start = (self.constant,)
            for location in self.tuple_get_range(index_name=self.index_name, start=start, end=end):
                yield location
        else:
            raise NotImplementedError(
                f'not supported operation {self.condition.sign} for {self.name}'
            )

    def next(self):
        # next_location() 类似于取指针/引用
        # next() 相当于解引用 *p , 或者直接返回具体的值s
        for location in self.next_location():
            yield table_tuple_get_one(table_name=self.table_name, location=location)

    def close(self):
        pass

class CoveredIndexScan(IndexScan):
    def __init__(self, index_name: str, condition=None):
        super().__init__(index_name, condition)
        self.name = 'CoveredIndexScan'
        self.tuple_get_equal_value = covered_index_tuple_get_equal_value
        self.tuple_get_range = covered_index_tuple_get_range

    def next(self):
        # 覆盖索引扫描的返回的就是key, 可以直接返回给上层算子了
        for key in self.next_location():
            yield key

class Materialize(PhysicalOperator):
    def __init__(self, name):
        super().__init__(name)
        # 物化的二维数组(list of tuple)
        self.tuples = []

    def open(self):
        if len(self.children) != 1:
            raise ExecutorCheckError(f'{self.name} only supports one child')
        child = self.children[0]
        child.open()
        self.columns = child.columns # 这里是一个引用，不是copy
    
    def materialize(self):
        for child in self.children:
            for tup in child.next():
                self.tuples.append(tup)

    def close(self):
        for child in self.children:
            child.close()
    
    def next(self):
        pass

class Sort(Materialize):
    INTERNAL_SORT = 'internal sort'
    EXTERNAL_SORT = 'external sort'
    HEAP_SORT = 'heap sort' # 暂时先不实现，只是一个替换排序算法的过程
    
    # TODO: 这里我们只实现单列的排序
    def __init__(self, sort_column: TableColumn, asc=True):
        super().__init__('Sort')
        self.columns = None
        assert isinstance(sort_column, TableColumn)
        self.sort_column = sort_column
        self.asc = asc
        self.method = self.INTERNAL_SORT # 默认内排序
        self.sort_column_index = None # sort column 所在的元组下标位置


    def open(self):
        super().open()

        # 排序cloumn所在的元组的下标位置
        self.sort_column_index = self.columns.index(self.sort_column)

    def next(self):
        self.materialize()
        
        if self.method == self.INTERNAL_SORT:
            for tup in self.internal_sort():
                yield tup
        elif self.method == self.EXTERNAL_SORT:
            for tup in self.external_sort():
                yield tup
        else:
            raise NotImplementedError(f'not supported sort method {self.method}')

    def internal_sort(self):
        # 直接使用Python自带的排序方法
        self.tuples.sort(key=lambda t: t[self.sort_column_index], reverse=(not self.asc))
        for tup in self.tuples:
            yield tup
    
    def external_sort(self):
        max_part_size = 2
        chunks = [self.tuples[i: i+max_part_size] for i in range(
            0, len(self.tuples), max_part_size)]
        
        # 下面，我们要进行外排序，即把每个chunk分别排序，部分结果要放到磁盘中进行缓存
        if not os.path.exists(TEMP_DIRECTORY):
            os.mkdir(TEMP_DIRECTORY)

        # 下面，我们要进行外排序，即把每个chunk分别排序，部分结果要放到磁盘中进行缓存
        temp_files = []
        for i, chunk in enumerate(chunks):
            chunk.sort(key=lambda t: t[self.sort_column_index], reverse=(not self.asc))
            temp_file = os.path.join(TEMP_DIRECTORY, f'temp_sort_{get_current_session_id()}_{i}')
            
            with open(temp_file, 'wb') as f:
                for item in chunk:
                    f.write(pickle.dumps(item) + b'\n') # 这里我们直接把元组序列化到文件中
            temp_files.append(temp_file)

        
        # 合并过程
        file_fds = [open(temp_file, 'rb') for temp_file in temp_files]
        
        # 获取来自每个chunk的第一个元素，然后把他们进行排序
        first_items = []
        # 补偿机制
        file_fd_index = {}
        for i, file_fd in enumerate(file_fds):
            item = pickle.loads(file_fd.readline())
            first_items.append(item)

            # 作用：保证当前 first_items 中每个元素都尽可能从所有chunk中获取
            # 有了该 file_fd_index 之后，我们就知道当前已经排好序的item是从哪个 fd 中获取的了
            # 相当于一种反查机制(也叫倒排索引)
            if item not in file_fd_index:  
                file_fd_index[item] = []
            file_fd_index[item].append(i)
        

        # 排序
        first_items.sort(key=lambda t: t[self.sort_column_index], reverse=(not self.asc))
        
        # 然后我们开始合并
        while len(first_items) > 0:
            item = first_items.pop(0)
            fd_index = file_fd_index[item].pop(0)
            if len(file_fd_index[item]) == 0:
                del file_fd_index[item] # 对 GC 友好，也不是必须的
            yield item

            next_item = file_fds[fd_index].readline()
            if not next_item:
                continue

            # 反序列化过程
            next_item = pickle.loads(next_item)
            first_items.append(next_item)

            # 再次排序，已经排好序的已经被pop出去去了
            first_items.sort(key=lambda t: t[self.sort_column_index], reverse=(not self.asc))

            if next_item not in file_fd_index:
                file_fd_index[next_item] = []
            file_fd_index[next_item].append(fd_index)

        for file_fd in file_fds:
            # 关闭文件
            file_fd.close()
            # 删除临时文件
            os.unlink(file_fd.name)
    
class HashAgg(Materialize):
    def __init__(self, group_by_column: TableColumn, aggregate_function_name: str, aggregate_column: TableColumn):
        # 例如，
        # select a, count(a) from t1 group by a;
        # group_by_column 是 group by 子句后面的那个（些）列
        # aggregate_column 是聚合函数里面的那个（些）列
        super().__init__('HashAgg')
        self.group_by_column = group_by_column
        self.aggregate_function_name = aggregate_function_name
        self.aggregate_column = aggregate_column

        # having 字句，由于我们没有实现这个语法，所以此时不用过滤
        # 如果实现了 having，那么这里应该是一个 Condition 对象
        # self.having_condition = Condition()
        # 额外信息：

        self.group_by_column_index = None
        self.aggregate_column_index = None

    def open(self):
        super().open()

        self.group_by_column_index = self.columns.index(self.group_by_column)
        self.aggregate_column_index = self.columns.index(self.aggregate_column)

        # 返回的时候，父算子才知道子算子返回的元组的每一位代表什么意思
        self.columns = (self.group_by_column, self.aggregate_column)

    @staticmethod
    def _aggregate_function(name: str):
        results = catalog_function.select(lambda r: r.function_name == name and r.agg_function)
        if len(results) != 1:
            raise ExecutorCheckError(f'not supported aggreate method {name}')
        return results[0].callback


    def next(self):
        self.materialize()
        # 1.先做hash过程
        hash_table = {}
        for tup in self.tuples:
            key = tup[self.group_by_column_index]
            value = tup[self.aggregate_column_index]

            # 往hash表放数据
            if key not in hash_table:
                hash_table[key] = [value]
            else:
                hash_table[key].append(value)

        # 2.再遍历hash表，对哈希表内部的value进行聚合
        for key, values in hash_table.items():
            func = self._aggregate_function(self.aggregate_function_name)
            aggregated_value = func(values)
            yield (key, aggregated_value)

class NestedLoopJoin(PhysicalOperator):
    def __init__(self, join_type: str, left_table_name: str, right_table_name: str, join_condition: Condition):
        super().__init__('NestedLoopJoin')
        self.join_type = join_type
        self.left_table_name = left_table_name
        self.right_table_name = right_table_name
        self.join_condition = join_condition

    def open(self):
        assert len(self.children) == 2
        for child in self.children:
            child.open()
        
        self.columns = self.left_table.columns + self.right_table.columns
    
    def next(self):
        if self.join_type == JoinType.INNER_JOIN:
            generator = self.inner_join()
        elif self.join_type == JoinType.CROSS_JOIN:
            generator = self.cross_join()
        elif self.join_type == JoinType.LEFT_JOIN:
            generator = self.left_join()
        elif self.join_type == JoinType.RIGHT_JOIN:
            generator = self.right_join()
        elif self.join_type == JoinType.FULL_JOIN:
            generator = self.full_join()
        else:
            raise ExecutorCheckError(f'not supported join type {self.join_type}')

        for tup in generator:
            yield tup

    
    def close(self):
        for child in self.children:
            child.close()

    def cross_join(self):
        # 笛卡尔积，就是两个简单的，没有条件的for循环，不考虑其他io代价
        # 外循环式外表，内循环式内表，谁是外表/内表，问题不大，都是一样的结果
        for left_tuple in self.left_table.next(): 
            for right_tuple in self.right_table.next():
                yield left_tuple + right_tuple

    def inner_join(self):
        for tup in self.cross_join():
            values = cast_tuple_pair_to_values(columns=self.columns, tup=tup)
            if is_condition_true(values, self.join_condition):
                yield tup

    @property
    def left_table(self):
        assert self.children[0].columns[0].table_name == self.left_table_name
        # 在我们代码里面，只支持两个表Join，我们的join算子的子节点，只能是scan
        # 等价于：
        # assert self.children[0].table_name == self.left_table_name
        return self.children[0]

    @property
    def right_table(self):
        assert self.children[1].columns[0].table_name == self.right_table_name
        return self.children[1]

    def outer_join(self, outer_table, inner_table, exchange=False):
        # 左连接和右连接是等价的，只需要把左右表交换一下即可
        # t1 left join t2 等价于 t2 right join t1
        # None 来表示null, 如果没有匹配到，就给表的字段补None
        # exchange=False，outer_table 是左表，inner_table 是右表
        # exchange=True，outer_table 是右表，inner_table 是左表
        if not exchange:
            padding_nulls = tuple(None for _ in range(len(inner_table.columns)))
        else:
            padding_nulls = tuple(None for _ in range(len(outer_table.columns)))

        for outer_tuple in outer_table.next():
            matching_tuples = []
            for inner_tuple in inner_table.next():
                if not exchange:
                    joined_tuple = outer_tuple + inner_tuple
                else:
                    joined_tuple =  inner_tuple + outer_tuple
                values = cast_tuple_pair_to_values(columns=self.columns, tup=joined_tuple)
                if is_condition_true(values, self.join_condition):
                    matching_tuples.append(joined_tuple)
            # 没匹配到
            if len(matching_tuples) == 0:
                if not exchange:
                    matching_tuples.append(outer_tuple + padding_nulls)
                else:
                    matching_tuples.append(padding_nulls + outer_tuple)
            
            for tup in matching_tuples:
                yield tup


    def left_join(self):
        for tup in self.outer_join(self.left_table, self.right_table):
            yield tup

    def right_join(self):
        for tup in self.outer_join(self.right_table, self.left_table, exchange=True):
            yield tup

    def full_join(self):
        padding_nulls_left = tuple([None] * len(self.left_table.columns))
        padding_nulls_right = tuple([None] * len(self.right_table.columns))

        # 注意，此处是一个物化过程，因为这些元组不止一次被使用，不像上面哪些join类型，
        # 那些元组只会使用一次，因此不需要物化
        left_tuples = []
        right_tuples = []
        for tup in self.left_table.next():
            left_tuples.append(tup)
        for tup in self.right_table.next():
            right_tuples.append(tup)

        # 下面开始 full join 的过程
        for left_tuple in left_tuples:
            matching_tuples = []
            for right_tuple in right_tuples:
                joined_tuple = left_tuple + right_tuple
                values = cast_tuple_pair_to_values(columns=self.columns, tup=joined_tuple)
                if is_condition_true(values, self.join_condition):
                    matching_tuples.append(joined_tuple)
            # 到这里，相当于做完了内连接
            # 下面部分，是左连接部分
            if len(matching_tuples) == 0:
                matching_tuples.append(left_tuple + padding_nulls_right)

            # 到这里，已经完成了左连接的实现
            for tup in matching_tuples:
                yield tup

            # 下面开始右连接部分
            for right_tuple in right_tuples:
                not_matched = True
                for left_tuple in left_tuples:
                    joined_tuple = left_tuple + right_tuple
                    values = cast_tuple_pair_to_values(columns=self.columns, tup=joined_tuple)
                    if is_condition_true(values, self.join_condition):
                        not_matched = False # 代表匹配上了，相当于去重操作，避免两次返回 inner join 的结果
                        break
                if not_matched:
                    yield padding_nulls_left + right_tuple

class PhysicalQuery(PhysicalOperator):
    def __init__(self):
        """
        该物理算子相当于一个dummy，相当于一种编程技巧
        让它帮忙记录下执行阶段的信息，如执行耗时
        """
        super().__init__('Results')
        self.open_time = 0
        self.close_time = 0
        
        # 影响的行数
        self.actual_rows = 0
        
        self.projection_column_ids = []
        
    def open(self):
        # 获取单调时间戳，可以忽略操作系统上用户手动修改时间的影响
        self.open_time = time.monotonic()
        for child in self.children:
            child.open()
        
        # 遍历要输出的列，寻找子节点中对应的下标位置
        child_columns = self.children[0].columns  # 子节点返回的所有列信息

        for target_column in self.columns:
            for j, child_column in enumerate(child_columns):
                if target_column == child_column and j not in self.projection_column_ids:
                    self.projection_column_ids.append(j)

    def next(self):
        for child in self.children:
            self.actual_rows += 1
            for tup in child.next():
                # 要做没有用的列的删除，即投影操作
                yield tuple(tup[i] for i in self.projection_column_ids)
    
    def close(self):
        for child in self.children:
            child.close()

        self.close_time = time.monotonic()
    
    @property
    def elapsed_time(self):
        # 获取执行的总耗时
        return self.close_time - self.open_time

def is_condition_true(values: dict, condition: Condition):
    left = condition.left if not isinstance(condition.left, TableColumn) else values[condition.left]
    right = condition.right if not isinstance(condition.right, TableColumn) else values[condition.right]
    if condition.sign == '=':
        return left == right
    elif condition.sign == '>':
        return left > right
    elif condition.sign == '<':
        return left < right
    elif condition.sign == '!=':
        return left != right
    else:
        raise NotImplementedError()

def cast_tuple_pair_to_values(columns, tup):
    rv = {}
    for k, v in zip(columns, tup):
        rv[k] = v
    return rv

