# coding = utf8
__author__ = 'jiangchanghao'

##############################
# 1. USER candidates: num > THRESHOLD_USER_URL_MINIMUM_NUM
# 2. two users have more than THRESHOLD_COMMON_INFOSTRING_NUM exact common tuple, these tuples will be put into common list
# 3. these tuples are scored by \sum_t a / max(u1,u2)   a is the total common number of this time
# 4  common string is dict. {(url, folder, title): score}
###############################
from FavStat import FavStat_User
from FavUtil import NewDict
import math

USING_SHORT_URL = False
DEBUG_MODE = True
PART_FEATURE = True

class COMPARE_TYPE:
    LCS_STRING = 1
    SET = 2

THRESHOLD_COMMON_INFOSTRING_NUM = 3
THRESHOLD_USER_URL_MINIMUM_NUM = 8
THRESHOLD_PART_FEATURE_USER_NUM = 1000

class FavFilter(FavStat_User):
    user_string = {}
    common_string = NewDict()
    COMPARE_MODE=COMPARE_TYPE.SET
    # make sure no output on statistic results on the file
    def process_line(self, line):
        FavStat_User.process_line(self, line, False)

    def gen_lines(self):
        print 'generating user info lines...'
        if DEBUG_MODE: self.print_time = 0
        count = 0
        for user in self.user_info.keys():
            user_tmp_string = self.gen_line(self.user_info[user], user)
            if len(user_tmp_string) >= THRESHOLD_USER_URL_MINIMUM_NUM:
                self.user_string[user] = user_tmp_string
            if PART_FEATURE and count >= THRESHOLD_PART_FEATURE_USER_NUM:
                break
            count += 1

    def gen_line(self, t_user_info, user = ''):
        user_infos = sorted(t_user_info.items(), key=lambda a:a[0], reverse=True) # x = ( folder name, ( url_total_number, [(title, url)] ))
        if self.COMPARE_MODE == COMPARE_TYPE.LCS_STRING:
            user_str = ''
            for info in user_infos:
                user_str += '[' + info[0] + ']'
                user_str += str(info[1][0])
                for (title, url) in info[1][1]:
                    user_str += '@' + title+ ':' + url
                user_str += '\n'
            if DEBUG_MODE and self.out and self.print_time < 100:
                self.f_out.write('-------' + user +'\n')
                self.f_out.write(user_str)
                self.print_time += 1
            return user_str
        if self.COMPARE_MODE == COMPARE_TYPE.SET:
            user_str = []
            for info in user_infos:
                folder_name = info[0]
                for (title, url) in info[1][1]:
                    str_i = (self.url_short(url), folder_name, title)
                    user_str.append( str_i )
            return user_str
        raise Exception('WRONG COMPARE_MODE!')

    def cluster(self):
        print 'clustering users'
        from hcluster import linkage, fcluster
        print 'start calculating distance...'
        user_distance = FavFilter.get_string_dist(self.user_string.values())
        print 'start calculating cluster...'
        cluster_info = linkage(user_distance)
        print 'cluster finished!'
        #for cluster_item in cluster_info:
        #    self.f_out.write(str(cluster_item) + '\n')
        self.cluster_vector = fcluster(cluster_info, 0.5)

    def loop_and_calc(self, filename='info/standard_common.txt'):
        print 'loop and calc all the pairs ...'
        FavFilter.get_string_dist(self.user_string.values(), True)
        print 'loop end'
        self.print_standard_common_info(filename)

    def print_cluster_results(self):
        group = NewDict()
        for i in range(0, len(self.cluster_vector)):
            val = self.cluster_vector[i]
            if not group.has_key(val):
                group[val] = []
            group[val].append(i)
        group.print_file('res/cluster_user.txt')
        # statistic on group number distribution
        if DEBUG_MODE:
            group_stat = NewDict()
            for i in group.keys():
                val = len(group[i])
                group_stat.add_val(val, 1)
            group_stat.print_file('res/cluster_stat.txt')
        userlist = self.user_string.keys()
        stringlist = self.user_string.values()
        if self.f_out:
            for val in group.keys():
                self.f_out.write('---CLUSTER:' + str(val) + '----\n')
                for index in group[val]:
                    self.f_out.write(userlist[index] + '\t' + self.cont_string_list(stringlist[index]) + '\n')

    # just_loop=True: just loop over all the distribution but no need to record the distance vector
    @staticmethod
    def get_string_dist(stringlist, just_loop = False):
        list_sz = len(stringlist)
        dist_sz = list_sz * (list_sz - 1) / 2
        print 'we have ' + str(list_sz) + ' users, that is '+str(dist_sz) + ' pairs to calculate.'
        if not just_loop:
            res = [0] * dist_sz
        index = 0
        for i in range(0, list_sz):
            for j in range(i + 1, list_sz):
                dist = 100
                if FavFilter.COMPARE_MODE == COMPARE_TYPE.LCS_STRING:
                    dist = len(FavFilter.find_lcs(stringlist[i], stringlist[j]))
                    #try:
                    #    dist = len(FavFilter.find_lcs(stringlist[i], stringlist[j]))
                    #except Exception:
                    #    print sys.exc_info();
                    #    print 'fail getting string of ', stringlist[i], stringlist[j]
                if FavFilter.COMPARE_MODE == COMPARE_TYPE.SET:
                    dist = FavFilter.find_set_common_ele(stringlist[i], stringlist[j])
                if not just_loop:
                    res[index] = dist
                if DEBUG_MODE:
                    if index % 1000 == 0:
                        print ' calculating distance for instance: ', index
                index += 1
        if not just_loop:
            return res
        else:
            return None

    #used for further filtering
    def print_standard_common_info(self, filename):
        print 'normalizing common string ...'
        max_score = max(self.common_string.values())
        for item in self.common_string.keys():
            self.common_string[item] /= max_score
        print 'printing common string: ', len(self.common_string.keys())
        infos = sorted(self.common_string.items(), key=lambda e:e[1], reverse=True)
        std_out = open(filename,'w')
        for ((url, folder, title), num) in infos:
            std_out.write(url + '\t' + folder + '\t' + title + '\t' + str(num) + '\n')
        std_out.close()

    @staticmethod
    def find_set_common_ele(s1, s2):
        if len(s1) > len(s2): # make sure s1 is the short one
            tmp = s2
            s2 = s1
            s1 = tmp
        count = 0
        s2 = {s2[i]: True for i in range(0, len(s2))}
        for item in s1:
            if s2.has_key(item):
                count += 1
        if count >= THRESHOLD_COMMON_INFOSTRING_NUM:
            score = 1.0 * count / len(s2) # 1: standard_common_1.txt
            #score = 1.0 * count / math.sqrt(len(s2)) # 1: standard_common_2.txt
            for item in s1:
                FavFilter.common_string.add_val(item, score)
        return 1 - 1.0 * count / len(s1)

    @staticmethod
    def cont_string_list(strlist):
        res = ''
        for str in strlist:
            res += str + '||'
        return res

    @staticmethod
    def find_lcs(s1_t, s2_t):
        s1 = s1_t.decode('utf8')
        s2 = s2_t.decode('utf8')
        ls1 = len(s1)
        ls2 = len(s2)
        # length table: every element is set to zero.
        m = [ [ 0 ] * ls2 ] * ls1
        # direction table: 1st bit for p1, 2nd bit for p2.
        d = [ [ None ] * ls2 ] * ls1
        # we don't have to care about the boundery check.
        # a negative index always gives an intact zero.
        for p1 in range(len(s1)):
            for p2 in range(len(s2)):
                if s1[p1] == s2[p2]:
                    if p1 == 0 or p2 == 0:
                        m[p1][p2] = 1
                    else:
                        m[p1][p2] = m[p1-1][p2-1]+1
                    d[p1][p2] = 3                                     # 11: decr. p1 and p2
                elif m[p1-1][p2] < m[p1][p2-1]:
                    m[p1][p2] = m[p1][p2-1]
                    d[p1][p2] = 2                                     # 10: decr. p2 only
                else:                                                         # m[p1][p2-1] < m[p1-1][p2]
                    m[p1][p2] = m[p1-1][p2]
                    d[p1][p2] = 1                                     # 01: decr. p1 only
        (p1, p2) = (len(s1)-1, len(s2)-1)
        # now we traverse the table in reverse order.
        s = []
        while 1:
            #print p1,p2
            c = d[p1][p2]
            if c == 3: s.append(s1[p1])
            if not ((p1 or p2) and m[p1][p2]): break
            if c & 2: p2 -= 1
            if c & 1: p1 -= 1
        s.reverse()
        res = (''.join(s)).encode('utf8')
        if len(res) > 10:
            FavFilter.common_string.add_val(res, 1)
        return res







if __name__ == '__main__':
    test = FavFilter('data/part-00000')
    test.set_outfile_name('res/filter_fraud_2.txt')
    test.init()
    test.gen_lines()
    if False: # need return cluster result
        test.cluster()
        test.print_cluster_results()
    else:  # calc distances and print
        test.loop_and_calc()
    test.close_file()
