#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# # author : cypro666
# # date   : 2015-09-01
# # Simple Python implementation of the Apriori Algorithm
import sys
from itertools import chain, combinations
from collections import defaultdict
from optparse import OptionParser
from string import Template

from magic3.logger import Logger
from magic3.filesystem import *
add_sys_path(grand_dir(__file__))

from skt.utils import load_data


class Apriori(object):
    ''' simple apriori algotirhm implement, not fast but easy to use '''

    def __init__(self, min_support, min_confidence, logfile):
        if min_support <= 1e-3 or min_confidence <= 1e-3:
            raise ValueError('invalid parameters!')

        self._mins = min_support
        self._minc = min_confidence
        self._fmt = Template('${left} => ${right} confidence: ${value}\n')
        self._log = Logger(logfile)

    @property
    def name(self): return 'Apriori'

    def join_set(self, item_set, length):
        return set(i.union(j) for i in item_set for j in item_set if len(i.union(j)) == length)

    def sub_sets(self, arr):
        return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])

    def items_with_min_support(self, item_set, trans_list, min_support, freqset):
        _items = set()
        local_set = defaultdict(int)

        for item in item_set:
            for transaction in trans_list:
                if item.issubset(transaction):
                    freqset[item] += 1
                    local_set[item] += 1

        for item, count in local_set.items():
            support = float(count) / len(trans_list)
            if support >= min_support:
                _items.add(item)

        return _items

    def get_trans_list(self, data_iterator):
        trans_list = list()
        item_set = set()

        for record in data_iterator:
            transaction = frozenset(record)
            trans_list.append(transaction)
            for item in transaction:
                item_set.add(frozenset([item]))  # Generate 1-itemSets

        return item_set, trans_list

    def run(self, datafile):
        ''' run the apriori algorithm. data_iter is a record iterator
            return:
             - items (tuple, support)
             - rules ((pretuple, posttuple), confidence)
        '''

        data_iter = load_data(datafile)
        item_set, trans_list = self.get_trans_list(data_iter)
        freqset = defaultdict(int)
        large_set = dict()
        assocRules = dict()

        pre_set = self.items_with_min_support(item_set, trans_list, self._mins, freqset)
        step = 2

        while len(pre_set):
            large_set[step - 1] = pre_set
            pre_set = self.join_set(pre_set, step)
            cur_set = self.items_with_min_support(pre_set, trans_list, self._mins, freqset)
            pre_set = cur_set
            step += 1

        _support = lambda item: float(freqset[item]) / len(trans_list)
        ret_items = []

        for key, value in large_set.items():
            ret_items.extend([(tuple(item), _support(item)) for item in value])

        ret_rules = []

        for key, value in tuple(large_set.items())[1:]:
            for item in value:
                _subsets = map(frozenset, [x for x in self.sub_sets(item)])
                for element in _subsets:
                    remain = item.difference(element)
                    if len(remain) > 0:
                        confidence = _support(item) / _support(element)
                        if confidence >= self._minc:
                            ret_rules.append(((tuple(element), tuple(remain)), confidence))

        return ret_items, ret_rules

    # TODO: rebuild this part later?
    def output(self, items, rules, outputfile):
        tmp = {}
        csv = open(outputfile, 'w')
        csv.write('sets with support >= %.3f\n' % self._mins)

        for item, support in sorted(items, key=lambda pair: pair[1]):
            if len(item) < 2 or support < self._mins:
                continue
            tmp[item] = support
            csv.write(' '.join(str(i) for i in item))
            csv.write(' support: %.4f\n' % support)

        csv.write('\nrules\n')

        for rule, confidence in sorted(rules, key=lambda pair: pair[1]):
            pre, post = rule
            csv.write(self._fmt.substitute(left=' '.join(str(s) for s in pre),
                                           right=' '.join(str(s) for s in post),
                                           value=round(confidence, 5)))

        csv.close()


if __name__ == "__main__":
    optparser = OptionParser()

    optparser.set_usage('python3 -O %s [OPTIONS]' % sys.argv[0])

    optparser.add_option('--train_file', dest='train_file', type='str',
                         help='csv file name of train data')

    optparser.add_option('--log_file', dest='log_file', type='str',
                         help='log file name with full path')

    optparser.add_option('--min_support', dest='min_support', type='float', default=0.15,
                         help='minimum support value')

    optparser.add_option('--min_confidence', dest='min_confidence', default=0.6, type='float',
                         help='minimum confidence value')

    optparser.add_option('--results_file', dest='results_file', type='str',
                         help='file name to store results')

    (options, args) = optparser.parse_args()

    if not options.train_file or not options.results_file:
        optparser.error('see help for more details')

    apriori = Apriori(options.min_support, options.min_confidence, options.log_file)

    items, rules = apriori.run(options.train_file)

    apriori.output(items, rules, options.results_file)




