# -*- coding: UTF-8 -*-

import scipy.cluster.hierarchy as sch
import matplotlib.pylab as plt
import numpy as np
from readData_1 import *
from model import *
import random
from PIL import Image
import jieba.analyse

shotsname = []
shotslist = []

# 将caselist中的case整合成 一张截图对应多个关键词标签的shots对象
for i in caselist:
    # 遍历所有的screenshots，提取出不同名的shots
    for j in i.screenshotslist:
        # 如该shot是第一次出现 则将它加入shots队列
        if j not in shotsname:
            shotsname.append(j)
            newshot = shot(j)
            newshot.keywords = i.keywordslist
            shotslist.append(newshot)
        # 如果此shot已经出现过，则检索它，并合并关键词
        # if j in shotsname:
        #     indexshot = shotsname.index(j)
        #     shotslist[indexshot].keywords = list(set(shotslist[indexshot].keywords + i.keywordslist))


# 读取相似性矩阵
matchscores = np.loadtxt('matchscores.txt')
matchscores = matchscores
print 'Similarity Matrix:\n',matchscores
# hierarchy cluster
Z = sch.linkage(matchscores, method='complete')
P = sch.dendrogram(Z)
cluster_index = sch.fcluster(Z, t = 0.8, criterion='inconsistent')
print "Original cluster by hierarchy clustering:\n", cluster_index
# plt.show()

cluster_num = max(cluster_index)
cluster_count = []
print 'Clusters number:', cluster_num
for i in range(cluster_num):
    cluster_count.append(0)
for i in cluster_index:
    cluster_count[i-1] += 1
print 'Cluster max count:', max(cluster_count), '\n'


def keywords_of_clusters(P, K, cluster_index): # P:训练集的占比 K:TD-IDF降序选词的百分比 cluster_index:对应截图所属cluster类别的索引

    # 生成测试集和训练集
    train_set = random.sample(caselist, int(len(caselist)*P))
    test_set = list(set(caselist) - set(train_set))
    # train_set_len = len(train_set)
    # test_set_len = len(test_set)
    # print 'train_set:' + str(train_set_len)
    # print 'test_set' + str(test_set_len)

    #根据shotname返回similarity matrix中的index
    def get_shot_index(shotname):
        index = 0
        for i in shotslist:
            if shotname == i.shotname:
                return index
            index += 1

    # 根据shotname返回包含其case的caseID
    def get_caseID(shotname):
        caseID = set()
        for i in caselist:
            if shotname in i.screenshotslist:
                caseID.add(i.caseid)
        return caseID

    train_set_num = 1
    train_shotsname = set()
    train_shotsindex = set()
    cluster_list = []
    for i in train_set:
        # print 'train_set ID:' + str(train_set_num)
        # print 'case ID:' + str(i.caseid) + '\n'
        train_set_num += 1
        # 获取训练集的shotname,shotindex
        for j in i.screenshotslist:
            train_shotsname.add(j)
            train_shotsindex.add(int(get_shot_index(j)))

    # 生成cluster队列，包括shotname,shotindex,reportindex,description,keywords,keywords_div_N
    for i in range(cluster_num):
        newcluster = cluster(i + 1)
        cluster_list.append(newcluster)
    # print 'train_shotsindex:' + str(train_shotsindex) + '\n'
    # shotname
    for i in train_shotsindex:
        cluster_list[cluster_index[i]-1].shotsindex.append(i)
    # shotindex
    for i in cluster_list:
        for j in i.shotsindex:
            i.shotsname.append(shotslist[j].shotname)
    # reportindex
    for i in cluster_list:
        for j in i.shotsname:
            i.reportsindex.update(get_caseID(j))
    # bug description
    for i in cluster_list:
        for j in i.reportsindex:
            i.description += caselist[j - 1].bugdetails
    # keywords
    for i in cluster_list:
        # 基于 TF-IDF 算法的关键词抽取
        tags = jieba.analyse.extract_tags(i.description, topK=1000, withWeight=True)
        all_keywords = []
        for tag in tags:
            all_keywords.append(tag[0])
            # print ("tag: %s\t\t weight: %f" % (tag[0], tag[1]))
        for j in range(int(K*len(all_keywords))):
            i.keywords.append(all_keywords[j])
        tags = set()

    # keywords_div_N
    # for i in cluster_list:
    #     N = len(i.shotsindex)
    #     if N > 0:
    #         for j in range(int(len(i.keywords) / N)):
    #             i.keywords_div_N.append(i.keywords[j])
    #     i.print_cluster()

    return train_set, test_set, cluster_list


def bag_of_words(case, train_set, cluster_list):

    # 根据截图名称获取在相似度矩阵中的index
    def shot_index(shotname, shotlist):
        index = 0
        for SHOT in shotlist:
            if shotname == SHOT.shotname:
                return index
            else:
                index += 1
        return index

    # 根据截图寻找除本身外最相似的图片的index
    def most_similar(shotname, train_set, cluster_list):
        test_shot_index = shot_index(shotname, shotslist)
        train_set_len = len(train_set)
        matchscores_list = []
        for i in range(train_set_len):
            matchscores_list.append((matchscores[test_shot_index][i], i))
        matchscores_list.sort(reverse=True, key=lambda matchscore: matchscore[0])
        train_set_index = set()
        for i in cluster_list:
            train_set_index.update(i.shotsindex)
        for i in range(train_set_len):
            if matchscores_list[i][1] in train_set_index:
                return matchscores_list[i][1]

    # 根据训练集的截图名称获取包含其的cluster 的ID
    def cluster_index(shotindex, cluster_list):
        for i in cluster_list:
            if shotindex in i.shotsindex:
                return i.clusterID

    # 遍历截图 找出距离最近的截图，所属的cluster
    test_shot_cluster = set()
    for i in case.screenshotslist:
        most_similar_shot = most_similar(i, train_set, cluster_list)
        test_shot_cluster.add(cluster_index(most_similar_shot, cluster_list) - 1)
    test_shot_cluster_list = list(test_shot_cluster)

    # bag of words
    bag_of_words = set()
    N = len(test_shot_cluster_list)
    for i in test_shot_cluster_list:
        # for word in cluster_list[i].keywords:
        #     print '%s ' % word,
        # print '=========='
        cluster_keywords_len = len(cluster_list[i].keywords)
        for j in range(cluster_keywords_len / N):
            bag_of_words.add(cluster_list[i].keywords[j])


    return bag_of_words


count = 1
for time in range(30):
    sum_F = 0
    sum_J = 0

    train_set = []
    test_set = []
    cluster_list = []
    train_set, test_set, cluster_list = keywords_of_clusters(0.70, 0.3, cluster_index)

    for i in test_set:
        tags = jieba.analyse.extract_tags(i.bugdetails, topK=500, withWeight=True)
        true_words = set()
        for j in tags:
            true_words.add(j[0])
        tags = set()

        bagofwords = bag_of_words(i,train_set,cluster_list)

        # Jaccard
        Jaccard = (float(len(true_words&bagofwords)) / len(true_words|bagofwords))

        words = bagofwords | true_words
        # F-measure
        TP = 0
        FP = 0
        FN = 0
        TN = 0
        for word in words:
            if word in bagofwords:
                if word in true_words:
                    TP += 1
                else:
                    FP += 1
            if word not in bagofwords:
                if word in true_words:
                    FN += 1
                else:
                    TN += 1
        if (TP + FP) == 0:
            precision = 0
        if (TP + FN) == 0:
            recall = 0
        else:
            precision = float(TP) / (TP + FP)
            recall = float(TP) / (TP + FN)
        if (precision + recall) == 0:
            F = 0
        else:
            F = (2*precision*recall) / (precision + recall)

        # print 'test case num:' + str(count)
        # print  'description:' + str(i.bugdetails)
        # print 'true words:',
        # for l in true_words:
        #     print '%s' % l,
        # print '\nbag of words:',
        # for k in bagofwords:
        #     print '%s' % k,
        # print '\nJaccard:' + str(Jaccard)
        # print 'F-measure:' + str(F)
        # print '\n'

        count += 1
        sum_F += F
        sum_J += Jaccard

    print 'time:' + str(time)
    print 'average F-measure:' + str(sum_F / len(test_set))
    print 'average Jaccard:' + str(sum_J / len(test_set))


# clusters view
# for num in range(cluster_num):
#     cluster_index = []
#     index = 0
#     for i in cluster:
#         if i  == num + 1:
#             cluster_index.append(index)
#         index += 1
#     print 'Cluster:' + str(num) + '   Index:' + str(cluster_index)
#
#     fig_name = 'Cluster' + str(num + 1)
#     fig = plt.figure(fig_name)
#     picnum = 1
#
#     for i in cluster_index:
#         if len(cluster_index) <= 30:
#             shotname = shotslist[i].shotname
#             shotlink = './screenshots/' + shotname
#             img = Image.open(shotlink)
#             row = int(len(cluster_index)/3) + 1
#             ax = fig.add_subplot(row, 3, picnum)
#             ax.imshow(img)
#             plt.axis("off")
#             picnum += 1
#         else:
#             shotname = shotslist[i].shotname
#             shotlink = './screenshots/' + shotname
#             img = Image.open(shotlink)
#             row = int(len(cluster_index) / 5) + 1
#             ax = fig.add_subplot(row, 5, picnum)
#             ax.imshow(img)
#             plt.axis("off")
#             picnum += 1
# plt.show()



