import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
import time
from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image
import h5py
import numpy as np
import pandas as pd
from PIL import Image

ckpt = "google/siglip2-so400m-patch14-384"
model = AutoModel.from_pretrained(ckpt, device_map="cuda").vision_model
processor = AutoProcessor.from_pretrained(ckpt)

def get_embedding(image_path):
    image = Image.open(image_path)
    inputs = processor(images=[image], return_tensors="pt").to('cuda')
    with torch.no_grad():
        image_embeddings = model(**inputs).pooler_output    
    return image_embeddings.cpu().numpy().flatten()

def load_h5_file(file_path):
    """打印H5文件的内部结构。"""
    print(f"Inspecting H5 file: {file_path}")
    try:
        ans = {}
        with h5py.File(file_path, 'r') as f:
            # print("Keys: %s" % list(f.keys()))
            for key, value in f.items():
                tmp = {}
                v = value[:].flatten()
                image_id = key.split('_')[1]
                goods_id = key.split('_')[0]
                tmp['image_url'] = None
                tmp['image_id'] = image_id
                tmp['embedding'] = v
                tmp['goods_id'] = goods_id
                assert image_id not in ans
                ans[image_id] = tmp
        return ans
    except Exception as e:
        print(f"Could not read file {file_path}: {e}")
        return {}


if __name__ == "__main__":
    
    # 按照您的修改，处理 v-side 数据
    data_dir = '/home/tfj/datasets/image_retri10k/eval_images_v'
    csv_path = 'data/split_results/goods_images_eval_v.csv'
    embeddings_path = 'embedding/baseline_embeddings_v.h5'
    output_path = 'embedding/baseline_embeddings_v.parquet'

    ans = load_h5_file(embeddings_path)
    print('H5文件中提取的特征个数为: {}'.format(len(ans)))
    
    all_csv_data = pd.read_csv(csv_path)
    print('CSV文件的行数为: {}'.format(len(all_csv_data)))
    
    nums = 0
    for index, row in all_csv_data.iterrows():
        image_url = row['image_url']
        image_id = str(row['image_id'])
        goods_id = str(row['goods_id'])
        if image_id not in ans:
            nums+=1
            image_file = os.path.join(data_dir, f"{goods_id}_{image_id}.jpg")
            try:
                if os.path.exists(image_file):
                    embedding = get_embedding(image_file)
                    ans[image_id] = {'image_url': image_url, 'image_id': image_id, 'embedding': embedding, 'goods_id': goods_id}
            except Exception as e:
                os.system(f"rm -rf {image_file}")
                print(f"提取特征时发生错误: {e}, {image_id}")
                continue
            else:
                try:
                    os.system(f"rm -rf {image_file}")
                except Exception as e:
                    print(f"删除图片文件时发生错误: {e}, {image_file}")
        else:
            ans[image_id]['image_url'] = image_url
            
    print('补充计算了 {} 个特征'.format(nums))
    print('最终，ans里有 {} 个特征'.format(len(ans)))

    # 将最终结果转换为DataFrame并保存为Parquet文件
    if ans:
        print(f"正在保存结果到 {output_path} ...")
        final_df = pd.DataFrame.from_dict(ans, orient='index')
        final_df.to_parquet(output_path, index=False)
        print("保存完成。")
    else:
        print("没有数据可以保存。")

    
print("-" * 20)








