# -*- coding: utf-8 -*-
# @Time    : 2020/10/20 09:32
# @Author  : luyvlei
# @File    : main.py

import os
import numpy as np
import SIFT as sf
import matplotlib.pyplot as plt
from PIL import Image
from KMeans import KMeans

# 读取数据，保存到dataset中，是label,nparrap元组组成的list
def load_dataset(data_path="./dataset/", size=(150, 100)):

    dataset = []
    dirs = os.listdir(data_path)
    for f in dirs:
        if f.startswith("."):
            continue
        pic_names = os.listdir(data_path + f)

        for pic in pic_names:
            if pic.startswith("."):
                continue
            pic_class = f
            if size is not None:
                pic = Image.open(data_path + f + "/" + pic).resize(size)
            else:
                pic = Image.open(data_path + f + "/" + pic)
            pic_nparr = np.array(pic).mean(axis=-1)
            dataset.append((pic_class, pic_nparr))

    return dataset

def train(dataset, n_clusters=80):
    # 使用sift提取特征，将图片类别，特征位置和向量，图片array保存在points_feature列表
    print("trainning")
    points_feature = []
    for idx,data in enumerate(dataset):
        img_arr = data[1]
        print(data[0],idx, ",done")
        points, features = sf.SIFT(img_arr)
        points_feature.append((data[0], points, features, data[1]))

    # 将所有feature整合，然后聚类
    features = []
    for p in points_feature:
        features += p[2]
    features = np.array(features)
    cluster = KMeans(n_clusters=n_clusters, random_state=0).fit(features)

    # 计算tf_idf
    N = features.shape[1]
    idf_weight = np.log(N / np.bincount(cluster.labels_, weights=None, minlength=0))

    # 将训练集的结果保存在pic_vec中，包含了类别，图像的描述向量，图片原始array
    pic_vec = []
    for i in points_feature:
        cls = cluster.predict(i[2])
        print(cls)
        res = np.bincount(cls, weights=None, minlength=n_clusters) * idf_weight
        vec = res / res.sum()
        pic_vec.append((i[0], vec, i[3]))

    # 测试需要用到的信息为pic_vec以及聚类器
    return pic_vec, cluster, idf_weight

def evaluate(val_dataset, train_vec, train_cluster, train_idf_weight, n_clusters):
    # 使用sift提取特征，将图片类别，特征位置和向量，图片array保存在points_feature列表
    points_feature = []
    for data in val_dataset:
        img_arr = data[1]
        print(data[0], ",done")
        points, features = sf.SIFT(img_arr)
        points_feature.append((data[0], points, features, data[1]))

    #
    pic_vec = []
    for i in points_feature:
        cls = train_cluster.predict(i[2])
        res = np.bincount(cls, weights=None, minlength=n_clusters) * train_idf_weight
        vec = res / res.sum()
        pic_vec.append((i[0], vec, i[3]))

    # 计算准确率，召回率
    correct = 0
    # recal_list = []
    for idx1, i in enumerate(pic_vec):
        closest_dis = 100
        closet_class = -1
        for idx2, j in enumerate(train_vec):
            dis = np.linalg.norm(i[1] - j[1], ord=1)  # 计算l1范数
            if dis < closest_dis:
                closest_dis = dis
                closet_class = j[0]
        if i[0] == closet_class:
            correct += 1
        print("预测类别：", i[0], "真实类别为：", closet_class,"l1距离为",closest_dis)
    print("准确率为:",correct/len(pic_vec))


    # 随机选择一张图片，测试相似度并排序
    pic_idx = np.random.randint(0,len(pic_vec)-1)
    feature_vec_val = pic_vec[pic_idx][1]

    dis_list = []
    for idx, i in enumerate(train_vec):
        dis = np.linalg.norm(i[1] - feature_vec_val, ord=1)
        dis_list.append((dis, i[2]))
    dis_list.sort(key=lambda x : x[0])


    plt.subplot(3, 3, 1)
    plt.title("Test Image")
    plt.imshow(pic_vec[pic_idx][2], cmap="gray")
    for idx, i in enumerate(dis_list[:8]):
        plt.subplot(3, 3, idx + 2)
        plt.title("Similarity ranking {}".format(idx+1))
        plt.imshow(i[1], cmap="gray")
    plt.show()

dataset = load_dataset("./dataset/",size=(100, 50))
train_vec,train_cluster,idf_weight = train(dataset)
val_dataset = load_dataset("./val_data/", size=(100, 50))
evaluate(val_dataset,train_vec,train_cluster,idf_weight,80)
