# coding = utf8
__author__ = 'jiangchanghao'

from FavProcess import FavProcess
from FavUtil import NewDict

COMMON_TUPLE_INVOLVE_SCORE = 0.1
USER_COMMON_TUPLE_COUNT = 4
#########################################
# 1. init filter information from file -> only those tuple with score > COMMON_TUPLE_INVOLVE_SCORE will be added
#    *those URL with no folder and no parameter (get) SHOULD be removed from the filter list, still in consideration.
# 2. process each line of user to check the tuples, only those with common tuple
#    more than USER_COMMON_TUPLE_COUNT will start the filter-out process
# 3. filter-out process: remove all the common tuple from the candidates
#########################################

class FavProcessWithFilter(FavProcess):

    def __init__(self, filename=''):
        FavProcess.__init__(self, filename)
        self.standard_common_tuple = NewDict()
        self.filter_num = 0
        self.tuple_num = 0
        self.init_filter()

    def init_filter(self, filename='info/standard_common.txt'):
        f_in = open(filename,'r')
        print 'initializing filter common info ...'
        line = f_in.readline()
        while line:
            ss = line.rstrip().split('\t')
            line = f_in.readline()
            tuple = (ss[0], ss[1], ss[2])
            score = float(ss[3])
            if score > COMMON_TUPLE_INVOLVE_SCORE:
                self.standard_common_tuple.add_val(tuple, score)
        print 'finished filter common info initialization'

    def process_line(self, line):
        ss = line.split('\t')
        num = len(ss)
        assert num > 0 and (num - 2) % 3 == 0, "error for taking line syntax"
        user_id = ss[0]
        tm = ss[-1]
        idx = 1
        user_triple_data = []
        while idx < num - 1:
            url, title, tag = ss[idx:idx+3]
            url = self.url_short(url) # refactor the url first
            idx += 3
            # 1. filter on tag (their can be filter or pre-process on url itself.
            if tag == '-':
                # when user have no tag on this url, we can utilize the title, if the title is too long, don't count it.
                if len(title) < 30: # 30 chinese words
                    tag = title
                else:
                    continue
            # 2. record them
            user_triple_data.append((url, tag, title))
        # filter out those information
        self.filter_out_tuple(user_triple_data)
        # finally record all the remain information
        self.record_remain_info(user_id, user_triple_data, tm)

    def record_remain_info(self, user_id, user_triple_data, tm):
        for (url, tag, title) in user_triple_data:
            triple = (user_id, url, tag)
            self.triple_data.append(triple)
            triple_idx = len(self.triple_data) - 1
            self.add_list_item_in_dict(self.url_triple_idx, url, triple_idx)
            self.add_list_item_in_dict(self.user_triple_idx, user_id, triple_idx)
            # 3. give initial value for F and w
            self.F.insert(url, '-')
            self.w.insert(user_id, 0.5)
            # 4. process trace system
            if self.need_url_trace:
                if url in self.trace_url_list:
                    if not self.trace_system.has_key(url):
                        self.trace_system[url] = ([],{})
                    if not self.trace_system[url][1].has_key(tag):
                        self.trace_system[url][1][tag] = []

    def filter_out_tuple(self, user_triple_data):
        need_to_remove = []
        for triple in user_triple_data:
            self.tuple_num += 1
            if self.standard_common_tuple.has_key(triple):
                need_to_remove.append(triple)
        if len(need_to_remove) > USER_COMMON_TUPLE_COUNT:
            for triple in need_to_remove:
                self.filter_num += 1
                user_triple_data.remove(triple)

class GenNewData(FavProcessWithFilter):

    def record_remain_info(self, user_id, user_triple_data, tm):
        self.m_newfile.write(user_id + '\t')
        for (url, tag, title) in user_triple_data:
            self.m_newfile.write(url + '\t' + title + '\t' + tag + '\t')
        self.m_newfile.write(tm + '\n')

    def init_out_file(self):
        self.m_newfile = open(self.m_current_parsing_filename + '-new','w')

    def close_file(self):
        self.m_newfile.close()

# utilize this to run the iteration
if __name__ == '__main__':
    '''
    test = FavProcessWithFilter('data/part-00000')
    test.set_base_dir('')       # change the output file location
    test.trace_from_list()      # trace url in 'url_trace_list.txt'
    test.init()
    test.update_iteration(10)
    test.print_trace_result('res/trace_with_filter_1.txt', True, -1)  # print trace information into filename, whether to print detail and max limit of output tags
    test.clear()                # clear data from iteration, and we can reset some configuration.
    '''
    test = GenNewData('data/part-00000')
    test.init()
    test.close_file()

