from __future__ import with_statement

try:
    import cPickle as pickle
except ImportError: #pragma: no cover
    import pickle
from kineta.cmp import (cmp_rows, cmp_values, Largest, InvertKey, InternalRow,
                        HashableVal, dump_key, load_key)
import kineta.berkdb as berkdb
import kineta.utils as utils


class Strategy(object):

    def __init__(self, opts):
        self._db = opts.db
        self._dblimits = opts.dblimits
        self._cnt = 0
        self.joinname = opts.joinname

    def __iter__(self):
        return self.gen()

    def get_plan(self):
        ret = {}
        self.get_plan_detail(ret)
        return ret

    def check_dup_join(self, names):
        if self.joinname in names:
            raise KinetaError("Duplicate join name '%s'" % self.joinname)
        names.add(self.joinname)

    def close(self):
        pass


class ScanStrategy(Strategy):

    def __init__(self, idx, forcebkmark, opts):
        super(ScanStrategy, self).__init__(opts)
        self._tableid = opts.tableid
        self._colset = opts.colset
        self._whereobj = opts.whereobj
        self.ordercols = opts.ordercols
        self._op = opts.op
        self._idx = idx
        self._forcebkmark = forcebkmark
        self._largesort = False
        self._returned = 0
        self._examined = 0
        self._bookmark = False
        self._colsreturned = {}
        if self._whereobj is not None:
            for col in sorted(self._whereobj):
                self._add_permutations(col, False)

    def _prune_row(self, row):
        if self._op == "select":
            for key in row.keys():
                if key not in self._colset:
                    del row[key]

    def index_update(self):
        index_flag = self._dblimits is not None and \
                     self._dblimits.index_where(self._examined, self._returned)
        bookmark_flag = self._bookmark and \
                        self._returned > self._dblimits.max_bookmark_lookup \
                        and not self._forcebkmark
        if self._largesort:
            ind = list(filter(lambda x: x[0] is not None, self.ordercols))
            self._add_missing_cols(ind)
            self._db.tables.index_create(self._tableid, ind, False)
        elif index_flag or bookmark_flag:

            def comparecolreturned(x, y):
                r = cmp(self._colsreturned[x], self._colsreturned[y])
                if r == 0:
                    xlen = 1 if not isinstance(x, tuple) else len(x)
                    ylen = 1 if not isinstance(y, tuple) else len(y)
                    return (cmp(xlen, ylen) * -1)
                return r

            colorder = sorted(list(self._colsreturned.iterkeys()),
                              comparecolreturned)
            if len(colorder) > 0:
                lowestcol = colorder[0]
                if isinstance(lowestcol, tuple):
                    ind = [(self._get_id(c), "a") for c in lowestcol]
                else:
                    ind = [(self._get_id(lowestcol), "a")]
            if bookmark_flag:
                self._add_missing_cols(ind)
            self._db.tables.index_create(self._tableid, ind, False)
        elif self._idx is not None:
            self._db.tables.index_update_time(self._tableid, self._idx)

    def _get_id(self, col):
        if isinstance(col, (str, unicode)):
            col = self._db.dict.get_id(col)
        return col

    def _add_permutations(self, col, skipcheck):
        self._colsreturned[col] = 0
        if skipcheck or self._whereobj[col].has_single_value():
            for nextcol in sorted(self._whereobj):
                if (isinstance(col, tuple) and nextcol not in col) or \
                   (not isinstance(col, tuple) and nextcol != col):
                    if self._whereobj[nextcol].has_single_value():
                        if isinstance(col, tuple):
                            l = list(col)
                            l.append(nextcol)
                            self._add_permutations(tuple(l), True)
                        else:
                            self._add_permutations((col, nextcol), True)

    def _process_col_results(self, results):
        for perm in self._colsreturned:
            if isinstance(perm, tuple):
                res = True
                for col in perm:
                    if not results[col]:
                        res = False
                        break
                if res:
                    self._colsreturned[perm] += 1
            else:
                if results[perm]:
                    self._colsreturned[perm] += 1

    def _add_missing_cols(self, ind):
        cs = set()
        for col in self._colset:
            if not isinstance(col, (str, unicode)):
                cs.add(self._get_id(col))
        for col, dir in ind:
            cs.discard(col)
        for col in cs:
            ind.append((self._get_id(col), 'a'))


class EmptyStrategy(Strategy):

    def __init__(self, opts):
        super(EmptyStrategy, self).__init__(opts)

    def gen(self):
        return self

    def next(self):
        raise StopIteration()

    def index_update(self):
        pass

    def get_plan_detail(self, plan):
        plan['strategy'] = 'empty'


class FieldListStrategy(object):

    def __init__(self, db, txn, lock, tableid):
        self._tblobj = db.tables.table_open(lock, tableid, txn, True)
        self._cursor = None
        if self._tblobj is not None:
            self._cursor = self._tblobj.cursor(txn)

    def __iter__(self):
        return self.gen()

    def gen(self):
        if self._cursor is None: #pragma: no cover
            return
        rec = self._cursor.first()
        while rec is not None:
            rowid, rowdata = rec
            yield pickle.loads(rowdata)
            rec = self._cursor.next()

    def close(self):
        if self._cursor is not None:
            self._cursor.close()
            self._cursor = None


class TableScanStrategy(ScanStrategy):

    def __init__(self, txn, lock, opts):
        super(TableScanStrategy, self).__init__(None, False, opts)
        self._tblobj = self._db.tables.table_open(lock, opts.tableid, txn,
                                                  True)
        self._cursor = None
        self.row_count_est = None
        if self._tblobj is not None:
            self._cursor = self._tblobj.cursor(txn)
            if self._whereobj is not None:
                self.row_count_est = self._tblobj.num_records(txn)
        else:
            self.row_count_est = 0

    def update(self, rowid, data):
        self._cursor.put(0, data, berkdb.DB_CURRENT)

    def delete(self):
        self._cursor.delete()

    def update_commit(self):
        pass

    def get_plan_detail(self, plan):
        plan['strategy'] = 'table_scan'
        plan['rows_examined'] = self._examined
        plan['rows_returned'] = self._returned
        plan['table'] = self._db.dict.get_name(self._tableid)
        plan['estimated_row_count'] = self.row_count_est

    def gen(self):
        if self._cursor is None: #pragma: no cover
            return
        rec = self._cursor.first()
        while rec is not None:
            rowid, rowdata = rec
            row = pickle.loads(rowdata)
            self._examined += 1
            testresults = {}
            rowok = (self._whereobj is None or
                     self._whereobj.test_row(row, testresults))
            if len(testresults) > 0:
                self._process_col_results(testresults)
            if rowok:
                self._returned += 1
                self._prune_row(row)
                yield InternalRow(self.joinname, row, rowid, len(rowdata))

            rec = self._cursor.next()

    def close(self):
        if self._cursor is not None:
            self._cursor.close()
            self._cursor = None


class IndexScanStrategy(ScanStrategy):

    def __init__(self, txn, lock, idx, opts):
        super(IndexScanStrategy, self).__init__(idx, opts.op != "select", opts)
        # set bookmark = True if we cant pull results directly from the index
        # keys, so we have to go to the table instead
        self._bookmark = opts.op != "select" or \
                         (len(utils.no_strings(self._colset) -
                          set(col for col, dir in self._idx)) > 0)
        self._autotxn = False
        self._committxn = False

        # updates must be done in a transaction to avoid a deadlock
        if txn is None and opts.op == "update":
            txn = self._db.txn_begin(berkdb.DB_READ_COMMITTED, None)
            self._autotxn = True
        self._txn = txn
        self._tblobj = self._db.tables.table_open(lock, opts.tableid, txn,
                                                  True)
        self._idxobj = None
        self._cursor = None
        self.row_count_est = None
        if self._tblobj is not None:
            self._idxobj = self._db.tables.index_open(lock, self._tblobj,
                                                 opts.tableid, idx, txn,
                                                 detached=(not self._bookmark))
            self._cursor = self._idxobj.cursor(txn)
            if self._whereobj is None:
                self.row_count_est = self._tblobj.num_records(txn)
        else:
            self.row_count_est = 0

    def get_plan_detail(self, plan):
        plan['strategy'] = 'index_scan'
        plan['rows_examined'] = self._examined
        plan['rows_returned'] = self._returned
        plan['bookmark_lookup'] = self._bookmark
        plan['table'] = self._db.dict.get_name(self._tableid)
        plan['index'] = utils.get_index_names(self._db, self._idx)
        plan['estimated_row_count'] = self.row_count_est

    def update(self, rowid, data):
        self._tblobj.update(rowid, data, self._txn)

    def update_commit(self):
        self._committxn = True

    def delete(self):
        self._cursor.delete()

    def gen(self):
        if self._cursor is None: #pragma: no cover
            return

        # decide whether to scan multiple columns with the == operator
        # or whether to do a range scan on the first column
        singlevalues = {}
        firstcol = None
        needslargest = None
        num = 0 # the number of elements in the key
        for col, dir in self._idx:
            if self._whereobj is None:
                firstcol = col
                num = 1
                break
            expr = self._whereobj.get(col, None)
            if (expr is not None) and expr.has_single_value() and \
               (expr.value is not None):
                singlevalues[col] = expr.value
                num += 1
            else:
                if len(singlevalues) == 0:
                    firstcol = col
                    num = 1
                break

        # scan in the opposite direction?
        reverse = (self.ordercols is not None) and \
                  (self._idx[0][1] != self.ordercols[0][1])

        # set keys properly if next column is descending
        if num < len(self._idx) and self._idx[num][1] == 'd' and not reverse:
            needslargest = self._idx[num][0]

        # set variables for bdb based on bookmark/reverse flags
        if self._bookmark:
            idindex = 1
            valindex = 2
            method = getattr(self._cursor, "pget")
            loadmethod = pickle.loads
        else:
            valindex = 0
            idindex = 1
            method = getattr(self._cursor, "get")
            loadmethod = lambda k: load_key(self._idx, k)
        if not reverse:
            nextflag = berkdb.DB_NEXT
            firstflag = berkdb.DB_FIRST
            lastflag = berkdb.DB_LAST
        else:
            nextflag = berkdb.DB_PREV
            firstflag = berkdb.DB_LAST
            lastflag = berkdb.DB_FIRST

        if len(singlevalues) > 0:
            # if scanning in reverse, jump to last matching item
            if reverse:
                singlevalues = InvertKey(singlevalues)
            # if next column is descending, we can't leave its value as None
            if needslargest is not None:
                singlevalues[needslargest] = Largest()

            rec = method(dump_key(self._idx, singlevalues),
                         berkdb.DB_SET_RANGE)
            # reverse scans always land on the row after the one we want
            if reverse:
                if rec is None:
                    rec = method(lastflag)
                else:
                    rec = method(nextflag)
            while rec is not None:
                size = len(rec[valindex])
                row = loadmethod(rec[valindex])
                self._examined += 1
                testresults = {}
                rowok = self._whereobj is None or \
                        self._whereobj.test_row(row, testresults)
                if len(testresults) > 0:
                    self._process_col_results(testresults)
                if rowok:
                    self._returned += 1
                    self._prune_row(row)
                    yield InternalRow(self.joinname, row, rec[idindex], size)
                else:
                    break
                rec = method(nextflag)
        else:
            # get ranges to scan and reverse them if neccessary
            if self._whereobj is not None and firstcol in self._whereobj:
                ranges = self._whereobj[firstcol].get_ranges()
                rangelist = ranges['ranges']
                inclusivestart = ranges['start_inclusive']
                inclusiveend = ranges['end_inclusive']
            else:
                rangelist = [{}]
                inclusivestart = True
                inclusiveend = True
            revcount = 0
            if reverse:
                revcount += 1
            elif self._idx[0][1] == "d":
                revcount += 1
            if revcount == 1:
                rangelist.reverse()
                inclusivestart, inclusiveend = inclusiveend, inclusivestart
            for range in rangelist:
                if revcount == 1:
                    range = IndexScanStrategy._reverse(range)
                if 'begin' in range:
                    key = {firstcol: range['begin']}
                    # if doing a reverse scan jump to the last item
                    if (inclusivestart and reverse) or \
                       (not inclusivestart and not reverse):
                        key = InvertKey(key)
                    # if next column is descending, we can't leave its value
                    # as None
                    if needslargest is not None:
                        key[needslargest] = Largest()

                    rec = method(dump_key(self._idx, key), berkdb.DB_SET_RANGE)
                else:
                    rec = method(firstflag)

                # reverse scans always land on the row after the one we want
                if reverse:
                    if rec is None:
                        rec = method(lastflag)
                    else:
                        rec = method(nextflag)

                # we're positioned at (or before) the start, read rows until
                # we pass the end
                while rec is not None:
                    size = len(rec[valindex])
                    row = loadmethod(rec[valindex])

                    self._examined += 1
                    testresults = {}
                    rowok = self._whereobj is None or \
                            self._whereobj.test_row(row, testresults)
                    if len(testresults) > 0:
                        self._process_col_results(testresults)

                    if 'end' in range:
                        r = cmp_values(range['end'], row.get(firstcol, None))
                        if revcount == 1:
                            r *= -1
                        if inclusiveend:
                            if r < 0:
                                break
                        else:
                            if r <= 0:
                                break

                    if rowok:
                        self._returned += 1
                        self._prune_row(row)
                        yield InternalRow(self.joinname, row, rec[idindex],
                                          size)

                    rec = method(nextflag)

                # since the ranges are supposed to be in sequential order,
                # we can skip the rest of the range scans if one of them hits
                # the end
                if rec is None:
                    break

    @staticmethod
    def _reverse(range):
        reversed = {}
        if 'begin' in range:
            reversed['end'] = range['begin']
        if 'end' in range:
            reversed['begin'] = range['end']
        return reversed

    def close(self):
        if self._cursor is not None:
            self._cursor.close()
            self._cursor = None
        if self._idxobj is not None:
            self._idxobj.close()
            self._idxobj = None
        if self._autotxn and self._txn is not None:
            if self._committxn:
                self._txn.commit()
            else: #pragma: no cover
                self._txn.abort()
            self._txn = None


class SortStrategy(ScanStrategy):

    def __init__(self, opts, create_index, substrategy):
        super(SortStrategy, self).__init__(None, False, opts)
        self._sub = substrategy
        self._iter = iter(substrategy)
        self._closed = False
        self._create_index = create_index
        if hasattr(self._sub, 'row_count_est'):
            self.row_count_est = self._sub.row_count_est

    def check_dup_join(self, names):
        self._sub.check_dup_join(names)

    def get_plan_detail(self, plan):
        plan['strategy'] = 'sort'
        plan['in_memory'] = True
        plan['sub_plan'] = self._sub.get_plan()

    def index_update(self):
        if self._largesort and self._create_index:
            super(SortStrategy, self).index_update()
        else:
            self._sub.index_update()

    def gen(self):
        joinordercols = []
        for oc in self.ordercols:
            if len(oc) == 2:
                joinordercols.append((self._sub.joinname, oc[0], oc[1]))
            else:
                joinordercols.append(oc)

        results = []
        limit = self._dblimits.max_sort_create_index
        cnt = 0
        try:
            while True:
                results.append(self._iter.next())
                cnt += 1
                if cnt >= limit:
                    self._largesort = True
        except StopIteration:
            results.sort(lambda x, y: cmp_rows(joinordercols, x, y))
            for r in results:
                yield r

    def close(self):
        if not self._closed:
            self._sub.close()
            self._closed = True


class MergeJoinStrategy(Strategy):

    def __init__(self, txn, lock, opts, parent, child, childname, childopts):
        super(MergeJoinStrategy, self).__init__(opts)
        self._parent = parent
        self._child = child
        self._childiter = iter(child)
        self._childname = childname
        self._ondict = {}
        for p, c in childopts['on']:
            self._ondict[c] = p
        self._childopts = childopts

    def check_dup_join(self, names):
        self._parent.check_dup_join(names)
        self._child.check_dup_join(names)

    def index_update(self):
        self._parent.index_update()
        self._child.index_update()

    def get_plan_detail(self, plan):
        plan['strategy'] = 'merge_join'
        plan['left'] = self._parent.get_plan()
        plan['right'] = self._child.get_plan()

    def gen(self):
        childrow, childcmp = self._next_childrow()
        for parentrow in self._parent:
            parentcmp = parentrow.get_tuple(self._parent.joinname)[0]
            yielded = False
            while childrow is not None:
                c = cmp_rows(self._parent.ordercols, parentcmp, childcmp)
                if c < 0:
                    break
                else:
                    if c == 0:
                        r = parentrow.clone()
                        r.combine(childrow)
                        yield r
                        yielded = True
                    childrow, childcmp = self._next_childrow()

            if not yielded and self._childopts['outer']:
                yield parentrow

    def _next_childrow(self):
        try:
            childrow = self._childiter.next()
            childvals = {}
            (row, rowid) = childrow.get_tuple(self._childname)
            for k in row:
                if k in self._ondict:
                    childvals[self._ondict[k]] = row[k]
            return childrow, childvals
        except StopIteration:
            return None, None


class HashJoinStrategy(Strategy):

    def __init__(self, txn, lock, opts, parent, child, childname, childopts):
        super(HashJoinStrategy, self).__init__(opts)
        self._parent = parent
        self._child = child
        self._first = parent
        self._firstname = parent.joinname
        self._firstfldidx = 0
        self._second = child
        self._secondname = childname
        self._secondfldidx = 1
        self._childopts = childopts
        self.row_count_est = None

        parentcount = getattr(parent, 'row_count_est', None)
        childcount = getattr(child, 'row_count_est', None)
        if parentcount is not None and childcount is not None:
            self.row_count_est = parentcount * childcount
            if parentcount > childcount and not self._childopts['outer']:
                self._first = child
                self._firstname = childname
                self._firstfldidx = 1
                self._second = parent
                self._secondname = parentname
                self._secondfldidx = 0

    def check_dup_join(self, names):
        self._parent.check_dup_join(names)
        self._child.check_dup_join(names)

    def index_update(self):
        # XXX
        self._parent.index_update()
        self._child.index_update()

    def get_plan_detail(self, plan):
        plan['strategy'] = 'hash_join'
        plan['left'] = self._parent.get_plan()
        plan['right'] = self._child.get_plan()

    class RowHash(object):

        def __init__(self):
            self._d = {}

        def set_parent(self, fldvals, row):
            if fldvals not in self._d:
                lists = ([], [])
                self._d[fldvals] = lists
            else:
                lists = self._d[fldvals]
            lists[0].append(row)

        def set_child(self, fldvals, row):
            lists = self._d.get(fldvals, None)
            if lists is not None:
                lists[1].append(row)

        def rows(self, outer):
            for parents, children in self._d.itervalues():
                for parent in parents:
                    if len(children) == 0:
                        if outer:
                            yield parent
                    else:
                        for child in children:
                            r = parent.clone()
                            r.combine(child)
                            yield r
                children[:] = []
                parents[:] = []

    def gen(self):
        hash = HashJoinStrategy.RowHash()

        for internalrow in self._first:
            fldvals = HashableVal(
                        tuple(internalrow.get_value(self._firstname,
                                                    flds[self._firstfldidx])
                              for flds in self._childopts['on']))
            hash.set_parent(fldvals, internalrow)

        for internalrow in self._second:
            fldvals = HashableVal(
                        tuple(internalrow.get_value(self._secondname,
                                                    flds[self._secondfldidx])
                              for flds in self._childopts['on']))
            hash.set_child(fldvals, internalrow)

        return hash.rows(self._childopts['outer'])
