# -*- coding: UTF-8 -*-
from readData_1 import *
from model import *
from PCV.tools import imtools, pca
from PIL import Image, ImageDraw
from PCV.localdescriptors import sift
from pylab import *
import glob
import os
from scipy.cluster.vq import *
import numpy as np
import sys

reload(sys)
sys.setdefaultencoding('utf8')

# 定义训练集、测试集合、结果集合
train_set = []
test_set = []
# result_set = []
# shot对象的list
shotslist = []
# shots的名字列表，用于过滤
shotsname = []
# k近邻的k值
k_neighbour = 20

# 将caselist中的case整合成 一张截图对应多个关键词标签的shots对象
for i in caselist:
    # 遍历所有的screenshots，提取出不同名的shots
    for j in i.screenshotslist:
        # 如该shot是第一次出现 则将它加入shots队列
        if j not in shotsname:
            shotsname.append(j)
            newshot = shot(j)
            newshot.keywords = i.keywordslist
            shotslist.append(newshot)
        # 如果此shot已经出现过，则检索它，并合并关键词
        if j in shotsname:
            indexshot = shotsname.index(j)
            shotslist[indexshot].keywords = list(set(shotslist[indexshot].keywords + i.keywordslist))

# 将不重复的截图写入下载地址的txt中，便于下载
# f1 = open('./address.txt', 'w')
# for i in shotsname:
#     f1.write('http://bj.bcebos.com/json-api/v1/crowdtest-online/upload/' + i + '\n')
print '不重复图片：' + str(len(shotsname))

# 将所有图片提取特征，生成sift文件
imlist = imtools.get_imlist('G:/TestResportsGeneration/screenshots/')
nbr_images = len(imlist)
print '共读取到' + str(nbr_images) + '张图片...'
# count1 = 0
# for i in range(nbr_images):
#     im = Image.open(imlist[i]).convert('L')
#     im.save('G:/TestResportsGeneration/screenshots/' + 'tmp' + '.pgm')
#     imagename = imlist[i].replace('G:/TestResportsGeneration/screenshots/', '')
#     imagename = imagename.replace('.jpg', '')
#     count1 += 1
#     cmmd = str("C:/Python27/Lib/sift.exe G:/TestResportsGeneration/screenshots/" + "tmp.pgm --output=G:/TestResportsGeneration/screenshots/" + imagename + ".sift --edge-thresh 10 --peak-thresh 5")
#     os.system(cmmd)
#     print '正在提取特征:' + '第' + str(count1) + '/' + str(nbr_images) + '张  ' + str(imagename) + '.sift'

# 选取训练集(70%)和测试集
train_set_len = int(len(shotslist) * 0.7)
test_set_len = len(shotslist) - train_set_len
all_set_len = len(shotslist)
for i in range(train_set_len):
    train_set.append(shotslist[i])
for i in range(train_set_len, all_set_len):
    test_set.append(shotslist[i])
print '测试集长度为:' + str(test_set_len)
print '训练集长度为:' + str(train_set_len)


# 根据训练集的对象关键词是否存在标记0或1
for i in shotslist:
    for k in range(topK):
        i.keylabels.append(0)
# shots对象有关键词的标记为1
for i in shotslist:
    for j in i.keywords:
        if j in taglist:
            i.keylabels[taglist.index(j)] = 1

# 初始化 train_result对象
# for i in shotslist:
#     newtrain_result = train_result(i.shotname)
#     for k in range(topK):
#         newtrain_result.true_label.append(i.keylabels[k])
#         newtrain_result.train_label.append(0)
#     result_set.append(newtrain_result)

# 读取相似性矩阵
matchscores = np.loadtxt('matchscores.txt')
print matchscores


def find_shot(shotname, shotlist):
    index = 0
    for SHOT in shotlist:
        if shotname == SHOT.shotname:
            return index
        else:
            index += 1
    return index


# 和训练集对比，寻找k近邻，返回值是训练集的索引
def find_neighbour_withtrain(testshot, k):
    test_shot_index = find_shot(testshot.shotname, shotslist)
    matchscores_list = []
    for i in range(train_set_len):
        matchscores_list.append((matchscores[test_shot_index][i], i))
    matchscores_list.sort(reverse=True, key=lambda matchscore: matchscore[0])
    neighbour_list = []
    for i in range(k):
        neighbour_list.append(matchscores_list[i][1])
    return neighbour_list



# 计算先验概率P(Hlb)，即l标签在整个训练集出现的次数除以训练集总数
prior_prob_true = []
prior_prob_false = []
xianyan = []
for i in range(topK):
    count = 0
    for j in train_set:
        if j.keylabels[i] == 1:
            count += 1
    true_label = float(1 + count) / float(2 + train_set_len)
    false_label = 1 - true_label
    if true_label > false_label:
        xianyan.append(1)
    else:
        xianyan.append(0)
    prior_prob_true.append(true_label)
    prior_prob_false.append(false_label)

c = []
cn = []
for i in range(100):
    c.append(0)
    cn.append(0)
# 计算后验概率P(Elj|Hlb)，比较和先验概率的乘积，返回预测的标签为0或1
def compute_posterior(l, testshot, k):
    # 获取c[delta]和cn[delta]
    # 遍历训练集，对于每个实例，寻找k近邻中有l标记的个数
    for shot in train_set:
        neighbour = find_neighbour_withtrain(shot, k + 1)
        # 删除自身
        del neighbour[0]
        delta = 0
        for index in neighbour:
            if train_set[index].keylabels[l] == 1:
                delta += 1
        if shot.keylabels[l] == 1:
            c[delta] += 1
        else:
            cn[delta] += 1

    # 计算j，即testshot的k近邻中有标签l的个数
    j = 0
    neighbour = find_neighbour_withtrain(testshot, k)
    for index in neighbour:
        if train_set[index].keylabels[l] == 1:
            j = j + 1
    posterior_prob_true = (float(1 + c[j]) / float(k + 1 + sum(c)))
    posterior_prob_false = (float(1 + c[j]) / float(k + 1 + sum(cn)))

    if (prior_prob_true[l] * posterior_prob_true) > (prior_prob_false[l] * posterior_prob_false):
        return 1
    else:
        return 0

# 创建结果队列
result_set = []
sum_precision = 0
sum_recall = 0
shotnum = 0
for i in test_set:
    shotnum += 1
    print 'shotnum: ' + str(shotnum)
    new_train_result = single_img_train_result(i.shotname)
    for l in range(topK):
        new_train_result.true_label.append(i.keylabels[l])
        new_train_result.train_label.append(compute_posterior(l, i, k_neighbour))
    result_set.append(new_train_result)
    precision, recall = new_train_result.accuracy()
    sum_precision += precision
    sum_recall += recall

ave_precision = float(sum_precision) / float(test_set_len)
ave_recall = float(sum_recall) / float(test_set_len)
print 'k取:' + str(k_neighbour)
print 'ave_precision: ' + str(ave_precision)
print 'ave_recall: ' + str(ave_recall)
print 'F-Measure: ' + str(2 * ave_precision * ave_recall / (ave_precision + ave_recall))



# def label_index(word):
#     return taglist.index(word)
#
# shotnum = 0
# for i in test_set:
#     print i.shotname
#     print 'num: ' + str(shotnum)
#     shotnum += 1
#     k3 = find_neighbour_withtrain(i, 3)
#     set3 = set()
#     for j in k3:
#         for k in train_set[j].keywords:
#             set3.add(label_index(k))
#     set3_ = list(set3)
#     set3_.sort()
#
#     k7 = find_neighbour_withtrain(i, 7)
#     set7 = set()
#     for j in k7:
#         for k in train_set[j].keywords:
#             set7.add(label_index(k))
#     set7_ = list(set7)
#     set7_.sort()
#
#     k11 = find_neighbour_withtrain(i, 11)
#     set11 = set()
#     for j in k11:
#         for k in train_set[j].keywords:
#             set11.add(label_index(k))
#     set11_ = list(set11)
#     set11_.sort()
#
#     k15 = find_neighbour_withtrain(i, 15)
#     set15 = set()
#     for j in k15:
#         for k in train_set[j].keywords:
#             set15.add(label_index(k))
#     set15_ = list(set11)
#     set15_.sort()
#
#     print 'k=3 len=' + str(len(list(set3)))
#     print str(set3_)
#     print 'k=7: len=' + str(len(list(set7)))
#     print str(set7_)
#     print 'k=11: len=' + str(len(list(set11)))
#     print str(set11_)
#     print 'k=15: len=' + str(len(list(set15)))
#     print str(set15_)
#     print ''
#
# nlist = find_neighbour_withtrain(test_set[13], 15)
#
# for i in nlist:
#     print train_set[i].shotname