# coding = utf8
from FavUtil import NewDict

# ############################################ #
# FAV_PROCESS: root class for the whole process
# to-do list:
#   add configuration system
#   1. each update, print compare result (diff) from previous version
#   2. each url result, print original most common found url list and the result
#   3. implement different compare function for two words
# ############################################ #

USING_SHORT_URL = False
DEBUG_MODE=False

class FavProcess:
    triple_data = [] # contains triple (user, url, tag)
    url_triple_idx = {} # url -> [triple_idx]
    user_triple_idx = {} # user id -> [triple_idx]

    def __init__(self, filename='', user_num=-1):
        self.F = NewDict() # key is url
        self.w = NewDict() # key is user id
        if filename != '':
            self.m_filename = [filename]
        else:
            self.m_filename = []
            import ConfigParser
            cf = ConfigParser.ConfigParser()
            cf.read('run.conf')
            folder_name = cf.get('data','folder')
            prefix = cf.get('data','prefix')
            import os
            files = os.listdir(folder_name)
            for i_file_name in files:
                if i_file_name.startswith(prefix):
                    self.m_filename.append(folder_name + '/' + i_file_name)
            if DEBUG_MODE: print self.m_filename
        self.m_user_limit = user_num
        self.update_time = 0
        self.base_dir = './'
        # trace some significant url and output there results during updating
        self.trace_file = 'url_trace_list.txt'
        self.need_url_trace = False
        self.trace_system = {}   # trace_system[url] = ( [best tag of each iteration] , {tag:[score of different iteration]})
        self.trace_url_list = []
        # trace end
        import time
        self.time_str=time.strftime('%Y%m%d-%H%M',time.localtime(time.time()))

    def check_folder(self):
        f_n = self.time_str
        import os
        if os.path.exists(f_n):
            return True
        return os.mkdir(f_n)

    def message(self, msg=''):
        _fOut = open(self.time_str+'/read_me.txt','w')
        _fOut.write('MESSAGE: \t'+ msg + '\n\n')
        _fOut.write('input file name: \t' + self.m_filename + '\n')
        _fOut.write('input triple number: \t' + str(len(self.triple_data)) + '\n')
        _fOut.write('input user number: \t' + str(len(self.user_triple_idx.keys())) + '\n')
        _fOut.write('input url number: \t' + str(len(self.url_triple_idx.keys())) + '\n')

    def update_iteration(self, time):
        self.check_folder()
        for i in range(0, time):
            print ' updating w and F: iteration ', self.update_time
            self.update_time += 1
            self.update_F()
            self.update_w()
            self.w.print_file(self.base_dir + self.time_str + '/w_'+str(self.update_time) + '.txt')
            self.F.print_file(self.base_dir + self.time_str + '/F_'+str(self.update_time) + '.txt')

    #clear iteration results.
    def clear(self):
        # 1 clear F and w just make w equal to 0.5 is OK. F is automatically reset as default when initiated.
        self.w.fill_val(0.5)
        # 2 clear trace system but keep the url and tag system which is initiated from init()
        if self.need_url_trace:
            for url in self.trace_system.keys():
                tar_dict = self.trace_system[url]
                del tar_dict[0][:]
                for tag in tar_dict[1].keys():
                    del tar_dict[1][tag][:]

    def print_trace_result(self, filename, full_information=False, max_lim='-1'):
        fout = open(filename,'w')
        if full_information:
            for url in self.trace_system.keys():
                tag_info = sorted(self.trace_system[url][1].items(), key=lambda e:e[1][0], reverse=True)
                fout.write('===========\n'+url+'\n')
                p_count = 0
                for line in tag_info:
                    p_count += 1
                    if p_count > max_lim > 0:
                        break
                    fout.write(line[0]) # tag printed
                    for num in line[1]:
                        fout.write('\t' + str(num))
                    fout.write('\n')
        else:
            for url in self.trace_system.keys():
                tag_results = self.trace_system[url][0]
                fout.write(url)
                for tag in tag_results:
                    fout.write('\t' + tag)
                fout.write('\n')
        fout.close()

    def init(self, count_p = -1):
        for filename in self.m_filename:
            print 'parsing ' + filename +' ... '
            self.m_current_parsing_filename = filename
            self.init_out_file()
            _fin = open(filename, 'r')
            content = _fin.readline()
            line_num = 0
            while content:
                line = content.strip()
                while line and len(line) == 0:
                    content = _fin.readline()
                self.process_line(line)
                content = _fin.readline()
                line_num += 1
                if line_num > self.m_user_limit > 0:
                    break
                if count_p > 0 and line_num % count_p == 0:
                    print 'processing : [' + str(line_num) + ']'
            _fin.close()
            #self.test_init()

    def trace_from_list(self, filename=''):
        if len(filename) > 0:
            self.trace_file = filename
        self.trace_url_list = []
        lines = open(self.trace_file, 'r').readlines()
        for l in lines:
            l = l.strip()
            if len(l) > 0:
                self.trace_url_list.append(self.url_short(l))
        self.need_url_trace = True


    def process_line(self, line):
        ss = line.split('\t')
        num = len(ss)
        assert num > 0 and (num - 2) % 3 == 0, "error for taking line syntax"
        user_id = ss[0]
        time_mod = ss[-1]
        idx = 1
        while idx < num - 1:
            url, title, tag = ss[idx:idx+3]
            url = self.url_short(url) # refactor the url first
            idx += 3
            # 1. filter on tag (their can be filter or pre-process on url itself.
            if tag == '-':
                # when user have no tag on this url, we can utilize the title, if the title is too long, don't count it.
                if len(title) < 30: # 30 chinese words
                    tag = title
                else:
                    continue
            # 2. record them
            triple = (user_id, url, tag)
            self.triple_data.append(triple)
            triple_idx = len(self.triple_data) - 1
            self.add_list_item_in_dict(self.url_triple_idx, url, triple_idx)
            self.add_list_item_in_dict(self.user_triple_idx, user_id, triple_idx)
            # 3. give initial value for F and w
            self.F.insert(url, '-')
            self.w.insert(user_id, 0.5)
            # 4. process trace system
            if self.need_url_trace:
                if url in self.trace_url_list:
                    if not self.trace_system.has_key(url):
                        self.trace_system[url] = ([],{})
                    if not self.trace_system[url][1].has_key(tag):
                        self.trace_system[url][1][tag] = []

    def update_F(self):
        for url in self.F.keys():
            # tag, score
            tag_score = NewDict()
            triple_idx_list = self.url_triple_idx[url]
            for triple_idx in triple_idx_list:
                (user_id, _u, tag) = self.triple_data[triple_idx]
                tag_score.add_val(tag, self.w.get_val(user_id))
            # update F finally
            best_fit_tag = tag_score.get_highest_key()
            self.F[url] = best_fit_tag
            # update trace_system
            if self.need_url_trace and url in self.trace_url_list:
                tar_dict = self.trace_system[url]
                for tag in tag_score.keys():
                    tar_dict[1][tag].append(tag_score[tag])
                tar_dict[0].append(best_fit_tag)

    def update_w(self):
        for user_id in self.w.keys():
            # user_id, score
            score = 0
            triple_idx_list = self.user_triple_idx[user_id]
            for triple_idx in triple_idx_list:
                (_uid, url, tag) = self.triple_data[triple_idx]
                score += self.get_tag_similarity(tag, self.F[url])
            self.w[user_id] = 1.0 * score / len(triple_idx_list)

    def test_init(self):
        self.F.print_file('F_info.txt')
        self.w.print_file('w_info.txt')

    def set_base_dir(self, dir='./'):
        self.base_dir = dir

    @staticmethod
    def add_list_item_in_dict(target_dict, key, item):
        if key in target_dict:
            target_dict[key].append(item)
        else:
            target_dict[key] = [item]

    @staticmethod
    def get_tag_similarity(word1, word2):
        if word1 == word2:
            return 1
        else:
            return 0

    @staticmethod
    def url_refactor(url):
        if not USING_SHORT_URL:
            return url
        if url[-1] == '/':
            url = url[:-1]
        return url

    @staticmethod
    def url_short(url):
        if not USING_SHORT_URL:
            return url
        url = FavProcess.url_refactor(url)
        if url.startswith('http://'):
            url = url[7:]
        if url.startswith('https://'):
            url = url[8:]
        if url.startswith('www.'):
            url = url[4:]
        return url

    def init_out_file(self):
        return


if __name__ == '__main__':
    test = FavProcess()
    test.set_base_dir('')       # change the output file location
    test.trace_from_list()      # trace url in 'url_trace_list.txt'
    test.init()
    test.update_iteration(10)
    #test.print_trace_result('res/trace_4.txt')  # print trace information into filename, whether to print detail and max limit of output tags
    test.print_trace_result('res/trace_4.txt', True, -1)  # print trace information into filename, whether to print detail and max limit of output tags
    test.clear()                # clear data from iteration, and we can reset some configuration.


