import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import os
from tqdm import tqdm

#my own
from torch_dataset import GameDataset
import shutil

from PIL import Image
# 取值范围为[0, 255]的PIL.Image，转换成形状为[C, H, W]，取值范围是[0, 1.0]的torch.FloadTensor；
transform = transforms.Compose(
    [
        transforms.Resize((128, 128)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])


device = torch.device("cuda"if torch.cuda.is_available() else "cpu")
def move_error_image(in_path):
    dirname,filename=os.path.split(in_path)
    outfile=os.path.join(dirname,"error/"+filename)
    shutil.move(in_path,outfile)

def get_query_feature(img_path,model):
    img_src = Image.open(img_path).convert('RGB')

    tensor_img=transform(img_src)
    input_img = tensor_img.to(device)
    input_img = input_img.unsqueeze(0)
    Encode = model(input_img)
    Encode=Encode.cpu().detach().numpy()
    Encode=Encode.flatten()
    return Encode

def copy_sort_by_query(group_filenames,out_dir):
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    else:
        #删除之前的分类结果,并重新创建一个空文件夹
        shutil.rmtree(out_dir)
        os.makedirs(out_dir)

    for i in range(len(group_filenames)):
        img_path =(group_filenames[i])
        # print(img_path)
        img_dir,name = os.path.split(img_path)
        new_img_path=os.path.join(out_dir,str(i)+"_"+name)
        #复制图片
        shutil.copy(img_path, new_img_path)




def query_feat_in_datasets(query_feat,QUERY_IMAGES_FTS):
    similarity=[]
    for i, file in enumerate(tqdm(QUERY_IMAGES_FTS[:-1])):
        # print("file",file)
        file_fts = (np.load(file))
        file_fts = file_fts.flatten()
        cos_sim = np.dot(query_feat, file_fts.T) / (np.linalg.norm(query_feat) * np.linalg.norm(file_fts))

        similarity.append(cos_sim)
    the_last_image_id=i
    print("query_feat_in_datasets",similarity)
    return similarity,the_last_image_id

def get_query_results(similarity,query_img_file,QUERY_IMAGES,top_k = 20):
    # Get best matches using similarity

    similarity = np.asarray(similarity)
    # 矩阵运算后升维度将维度很重要
    print(similarity)
    # argsort()函数是将x中的元素从小到大排列，所以先加一个负号
    indexes = np.squeeze(-similarity).argsort(axis=0)[:top_k]  # 返回索引非常重要，
    print(indexes)
    topk_similarity = [similarity[index] for index in indexes]
    print("topk_similarity", topk_similarity)
    # print(similarity)
    # print(indexes)
    best_matches_paths = [QUERY_IMAGES[index] for index in indexes]

    # print(best_matches_paths)

    # save 搜索的同名文件创立
    out_query_fodler = query_img_file.replace(".jpg", "")
    # print(out_query_fodler)
    # print("query_img_file",query_img_file)
    best_matches_paths.insert(0, query_img_file)
    # print(best_matches_paths)
    copy_sort_by_query(best_matches_paths, out_query_fodler)


IMG_FOLDER = r'./datasets/imgs'
img_fts_dir = r"./datasets/imgs/features"
query_img_file=r'./datasets/imgs/ia_100000581.jpg'

if __name__ == '__main__':
#    device=torch.device("cuda:0")

# 设置图片缓存的总大小，为原来的100倍 ,否则图片多了会报错
    import PIL
    PIL.PngImagePlugin.MAX_TEXT_MEMORY=6710886400
    print("PIL.PngImagePlugin.MAX_TEXT_MEMORY",PIL.PngImagePlugin.MAX_TEXT_MEMORY)

#设置数据集大小
    youkia_dataset=GameDataset(IMG_FOLDER,num_img=-1,transform=transform)
    imgs_all_path=youkia_dataset.img_paths
    print("imgs_num:",len(youkia_dataset))

    # dataset 2 train_tensor
    BATCH_SIZE=1
    train_dataloader = DataLoader(youkia_dataset, batch_size=BATCH_SIZE,drop_last=True, shuffle=False)

    model = torch.hub.load('facebookresearch/swav', 'resnet50w2').cuda()
    model.eval() # eval()时，框架会自动把BN和Dropout固定住，不会取平均，而是用训练好的值

    # Creat image database
    QUERY_IMAGES_FTS = [os.path.join(img_fts_dir, file) for file in sorted(os.listdir(img_fts_dir))]
    #
    QUERY_IMAGES = [os.path.join(IMG_FOLDER, file.replace(".npy",".jpg")) for file in sorted(os.listdir(img_fts_dir))]

    query_feat=get_query_feature(query_img_file,model)
    # print("query_feat",query_feat.shape)
    # Create similarity list
    similarity = []
    the_last_image_id=0
    try:
        similarity,the_last_image_id = query_feat_in_datasets(query_feat, QUERY_IMAGES_FTS)
        # print("similarity",similarity)
        get_query_results(similarity, query_img_file, QUERY_IMAGES)


    except Exception as e:
        get_query_results(similarity, query_img_file, QUERY_IMAGES)
        print("出现以下异常",e)
        print("an exception caught in line :", e.__traceback__.tb_lineno)  # 发生异常所在的行数
        print("imgs_all_path[i]",the_last_image_id,imgs_all_path[the_last_image_id])

        # move_error_image(imgs_all_path[i])