import glob
import time

import torch

COLLECTION_NAME = 'image_search_wanwei'  # Collection name
DIMENSION = 2048  # Embedding vector size in this example
MILVUS_HOST = "10.50.9.18"
MILVUS_PORT = "19530"

BATCH_SIZE = 128
TOP_K = 3
from pymilvus import connections
from pymilvus import Collection
from torchvision import transforms
from PIL import Image
from tqdm import tqdm

if __name__ == "__main__":

    # print("连接数据库")
    # connections.connect(host=MILVUS_HOST, port=MILVUS_PORT)
    # print("创建COLLECTION")
    # if utility.has_collection(COLLECTION_NAME):
    #     utility.drop_collection(COLLECTION_NAME)
    # print("定义COLLECTION")
    # fields = [
    #     FieldSchema(name='id', dtype=DataType.INT64, is_primary=True, auto_id=True),
    #     FieldSchema(name='filepath', dtype=DataType.VARCHAR, max_length=200),  # VARCHARS need a maximum length, so for this example they are set to 200 characters
    #     FieldSchema(name='image_embedding', dtype=DataType.FLOAT_VECTOR, dim=DIMENSION)
    # ]
    # schema = CollectionSchema(fields=fields)
    # collection = Collection(name=COLLECTION_NAME, schema=schema)
    # collection = Collection(name=COLLECTION_NAME)
    # print("添加索引")
    # index_params = {
    #     'metric_type':'L2',
    #     'index_type':"IVF_FLAT",
    #     'params':{'nlist': 16384}
    # }
    # collection.create_index(field_name="image_embedding", index_params=index_params)
    # collection.load()
    t1 = time.time()
    print('加载图片')
    # Get the filepaths of the images
    paths = glob.glob('./resource/456.jpg', recursive=True)
    t2 = time.time()
    print("加载图片  " + str(t2 - t1))

    print('加载模型')
    model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet50', pretrained=True)
    model = torch.nn.Sequential(*(list(model.children())[:-1]))
    model.eval()

    print('加载transforms')
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    def embed(data):
        print('插入图片')
        with torch.no_grad():
            t3 = time.time()
            stack = torch.stack(data[0])
            print("图形转化  " + str(t3 - t2))
            output = model(stack).squeeze()
            t4 = time.time()
            print("模型转化  " + str(t4 - t3))
            print(output)
            print(output.tolist())
            print(data[1])
            print([data[1], [output.tolist()]])

    data_batch = [[],[]]

    print('将图片转化')
    for path in tqdm(paths):
        im = Image.open(path).convert('RGB')
        print('提取图片RGB')
        print(im)
        data_batch[0].append(preprocess(im))
        data_batch[1].append(path)
        if len(data_batch[0]) % BATCH_SIZE == 0:
            embed(data_batch)
            data_batch = [[],[]]

    if len(data_batch[0]) != 0:
        embed(data_batch)

    # collection.flush()

    # search_paths = glob.glob('./resource/123.png', recursive=True)
    # len(search_paths)
    #
    # # Embed the search images
    # def embed(data):
    #     with torch.no_grad():
    #         ret = model(torch.stack(data))
    #         # If more than one image, use squeeze
    #         if len(ret) > 1:
    #             return ret.squeeze().tolist()
    #          # Squeeze would remove batch for single image, so using flatten
    #         else:
    #             return torch.flatten(ret, start_dim=1).tolist()
    #
    # data_batch = [[],[]]
    #
    # for path in search_paths:
    #     im = Image.open(path).convert('RGB')
    #     data_batch[0].append(preprocess(im))
    #     data_batch[1].append(path)
    #
    # embeds = embed(data_batch[0])
    # start = time.time()
    # res = collection.search(embeds, anns_field='image_embedding', param={'nprobe': 128}, limit=TOP_K, output_fields=['filepath'])
    # finish = time.time()
    # print("查询结果")
    # print(res)
    #
    #
    # # Show the image results
    # f, axarr = plt.subplots(len(data_batch[1]), TOP_K + 1, figsize=(20, 10), squeeze=False)
    #
    # for hits_i, hits in enumerate(res):
    #     axarr[hits_i][0].imshow(Image.open(data_batch[1][hits_i]))
    #     axarr[hits_i][0].set_axis_off()
    #     axarr[hits_i][0].set_title('Search Time: ' + str(finish - start))
    # for hit_i, hit in enumerate(hits):
    #     axarr[hits_i][hit_i + 1].imshow(Image.open(hit.entity.get('filepath')))
    #     axarr[hits_i][hit_i + 1].set_axis_off()
    #     axarr[hits_i][hit_i + 1].set_title('Distance: ' + str(hit.distance))
    #
    # # Save the search result in a separate image file alongside your script.
    # plt.savefig('search_result.png')

