import numpy as np
import faiss
import os
import sys
from PIL import Image
import torch
from torchvision import models, transforms

import psycopg2

# # https://github.com/facebookresearch/faiss/wiki/Getting-started
# Some indexes can also store integer IDs corresponding to each of the vectors (but not IndexFlatL2). 
#  If no IDs are provided, add just uses the vector ordinal as the id, ie. the first vector gets 0, the second 1, etc.

# https://blog.csdn.net/weixin_44645198/article/details/139099431

def f1():
    # 生成一些随机数据
    dimension = 128  # 数据的维度
    db_size = 10000  # 数据库的大小
    np.random.seed(1234)  # 为了结果的可重复性
    db_vectors = np.random.random((db_size, dimension)).astype('float32')

    # 创建一个FAISS索引
    index = faiss.IndexFlatL2(dimension)

    # 将数据添加到索引中
    index.add(db_vectors)

    # 生成一些查询向量
    n_query = 4  # 查询的数量
    query_vectors = np.random.random((n_query, dimension)).astype('float32')

    # 运行最近邻搜索
    k = 3  # 我们想要搜索的最近邻的数量
    distances, indices = index.search(query_vectors, k)
    # 打印结果
    print(indices)  # 每行包含k个最近邻的索引 ID，对flatL2而言就是vector补充放入时的cardinal
    print(distances)  # 每行包含一个查询的最近邻的距离

    # https://github.com/facebookresearch/faiss/wiki/Getting-started
    print('---------- sanity check')
    k = 3                          # we want to see 4 nearest neighbors
    D, I = index.search(db_vectors[:5], k) # sanity check
    print(I)
    print(D) # 每一行的第一列都是0，因为那个位置对应的向量与自己的距离当然是0
    print('---------- actual search')
    D, I = index.search(query_vectors, k)     # actual search
    print(I[:2])                   # neighbors of the 2 first queries
    print(I[-2:])                  # neighbors of the 2 last queries


# 要安装faiss-gpu 才行
def f2_gpu():
    # 生成一些随机数据
    dimension = 128  # 数据的维度
    db_size = 10000  # 数据库的大小
    np.random.seed(1234)  # 为了结果的可重复性
    db_vectors = np.random.random((db_size, dimension)).astype('float32')

    # 创建一个FAISS索引
    nlist = 100  # 聚类中心的数量
    kmeans_index = faiss.IndexFlatL2(dimension)
    index = faiss.IndexIVFFlat(kmeans_index, dimension, nlist)

    # 将索引转移到GPU上
    res = faiss.StandardGpuResources()  # 创建一个GPU资源对象
    gpu_index = faiss.index_cpu_to_gpu(res, 0, index)  # 将索引转移到GPU上

    # 训练索引
    gpu_index.train(db_vectors)

    # 将数据添加到索引中
    gpu_index.add(db_vectors)

    # 保存索引到文件
    # 将索引从GPU转移到CPU
    cpu_index = faiss.index_gpu_to_cpu(gpu_index)

    # 将索引写入文件
    faiss.write_index(cpu_index, 'trained_index_file.index')

    # 在下次测试的时候，你可以从文件加载索引
    # 从文件中读取索引
    cpu_index = faiss.read_index('trained_index_file.index')

    # 将索引转移到GPU
    gpu_index = faiss.index_cpu_to_gpu(res, 0, cpu_index)

    # 然后你可以使用加载的索引进行搜索
    n_query = 10  # 查询的数量
    query_vectors = np.random.random((n_query, dimension)).astype('float32')
    k = 4  # 我们想要搜索的最近邻的数量
    distances, indices = gpu_index.search(query_vectors, k)

    # 打印结果
    print(indices)  # 每行包含一个查询的最近邻的索引
    print(distances)  # 每行包含一个查询的最近邻的距离

def connect_pg():
    db_cfg = {
        'host': 'localhost',
        'database': 'postgres',
        'user': 'postgres',
        'password': 'admin'
    }
    conn = psycopg2.connect(**db_cfg)
    cursor = conn.cursor()
    cursor.execute('select version();')
    dbver = cursor.fetchone()
    print('--- pg version:', dbver)

    create_tbl = '''create table if not exists image_vec(
        id serial4 primary key,
        name varchar(128),
        image_embedding vector(1000)
    )
    '''
    cursor.execute(create_tbl)
    conn.commit() # no begin, why commit ??

    cursor.execute('truncate table image_vec')
    conn.commit()

    return conn, cursor

def disconnect_pg(conn, cursor):
    cursor.close()
    conn.close()

def write_db(conn, cursor, name, vec):
    sql = "insert into image_vec(name, image_embedding) values('%s', '%s');" %(name, list(vec))
    cursor.execute(sql)
    conn.commit()

def read_from_db(conn, cursor, vec):
    sql = "select id, name from image_vec order by image_embedding <-> '%s' limit 30;" %(list(vec))
    cursor.execute(sql)
    rows = cursor.fetchall()
    print(rows)
    conn.commit()

# 运行会自动下载模型 https://download.pytorch.org/models/resnet50-0676ba61.pth" to 
#    C:\Users\baiwf/.cache\torch\hub\checkpoints\resnet50-0676ba61.pth
# 配置KMP_DUPLICATE_LIB_OK, 否则会发生  Initializing libomp140.x86_64.dll, but found libiomp5md.dll already initialized
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def f3_graph_to_localfile():
    # 图像预处理
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 加载预训练的ResNet模型
    model = models.resnet50(pretrained=True)
    model = model.eval()

    # 提取图像特征
    def extract_features(img_path):
        img = Image.open(img_path)
        img = transform(img)
        img = img.unsqueeze(0)
        with torch.no_grad():
            features = model(img)
        return features.numpy()

    # 创建一个FAISS索引
    dimension = 1000  # ResNet模型的输出维度
    index = faiss.IndexFlatL2(dimension)

    # 从图像目录中提取特征并添加到索引中
    image_dir = 'E:\\tmp\\tmp-pic\\jpg'
    for img_name in os.listdir(image_dir):
        print('---------- ', img_name)
        img_path = os.path.join(image_dir, img_name)
        features = extract_features(img_path)
        index.add(features)

    # 保存索引到文件
    index_file = 'E:\\tmp\\image.index'
    faiss.write_index(index, index_file)

    # 从文件加载索引
    index = faiss.read_index(index_file)

    # 查询最相似的图像
    query_img_path = 'E:\\tmp\\tmp-pic\\query\\me9.jpeg'
    query_features = extract_features(query_img_path)
    k = 10  # 我们想要搜索的最近邻的数量
    distances, indices = index.search(query_features, k)

    # 打印结果
    print(indices)  # 每行包含一个查询的最近邻的索引
    print(distances)  # 每行包含一个查询的最近邻的距离

def f4_to_pg():
    conn, cursor = connect_pg()
    # 图像预处理
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 加载预训练的ResNet模型
    model = models.resnet50(pretrained=True)
    model = model.eval()

    # 提取图像特征
    def extract_features(img_path):
        img = Image.open(img_path)
        img = transform(img)
        img = img.unsqueeze(0)
        with torch.no_grad():
            features = model(img)
        return features.numpy()

    # 从图像目录中提取特征并添加到索引中
    image_dir = 'E:\\tmp\\tmp-pic\\jpg'
    for img_name in os.listdir(image_dir):
        print('---------- ', img_name)
        img_path = os.path.join(image_dir, img_name)
        features = extract_features(img_path)
        write_db(conn, cursor, img_name, features[0]) # features[0] has 1000 dim

    # 查询最相似的图像
    query_img_path = 'E:\\tmp\\tmp-pic\\query\\me9.jpeg'
    query_features = extract_features(query_img_path)
    read_from_db(conn, cursor, query_features[0])

    disconnect_pg(conn, cursor)
  
#f4_to_pg()
#f3_graph_to_localfile()
#f2_gpu()
f1()