import os
import torch

import gradio as gr
from PIL import Image

import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from demo_qwen_instrcut import qwen3_4b_instruct, get_result_by_qwen


# 全局懒加载
clip_model = None
df_desc = None
df_img = None
## qwen
tokenizer, qwen_model = None, None
question_to_en = '翻译为简短且精确的英语'

def clip_vit_b32():
    from sentence_transformers import SentenceTransformer
    clip_path = '/data/models/clip-ViT-B-32'
    model = SentenceTransformer(clip_path, device='cuda')
    return model

def get_embedding(clip_model, text_or_imgpath):
    return clip_model.encode(text_or_imgpath)

def load_data_files(desc_parquet, img_parquet):
    import pandas as pd
    df_desc = pd.read_parquet(desc_parquet)
    df_img = pd.read_parquet(img_parquet)
    return df_desc, df_img

def topk_similarities(search_emb, empty_emb, img_embs, top_k):
    import torch.nn.functional as F
    import heapq
    heap = []
    for idx, img_emb in enumerate(img_embs):
        t2i_sim = F.cosine_similarity(
            torch.tensor(search_emb).unsqueeze(0),
            torch.tensor(img_emb).unsqueeze(0)
        ).item()
        et2i_sim = F.cosine_similarity(
            torch.tensor(empty_emb).unsqueeze(0),
            torch.tensor(img_emb).unsqueeze(0)
        ).item()
        if t2i_sim < et2i_sim:
            continue

        sim_relative = t2i_sim - et2i_sim
        if len(heap) < top_k:
            heapq.heappush(heap, (sim_relative, idx, t2i_sim, et2i_sim))
        else:
            if sim_relative > heap[0][0]:
                heapq.heapreplace(heap, (sim_relative, idx, t2i_sim, et2i_sim))
    return sorted(heap, key=lambda x: -x[0])

def ensure_loaded():
    global clip_model, df_desc, df_img, tokenizer, qwen_model
    if clip_model is None or df_desc is None or df_img is None:
        desc_pq_file = '/mnt/nas/shengjie/qdrant_data/resources_text/images_embeddings.parquet'
        img_pq_file = '/mnt/nas/shengjie/qdrant_data/resources_text/images_embeddings.parquet'
        if clip_model is None:
            clip_model = clip_vit_b32()
        if df_desc is None or df_img is None:
            df_desc, df_img = load_data_files(desc_pq_file, img_pq_file)
    if tokenizer is None or qwen_model is None:
        tokenizer, qwen_model = qwen3_4b_instruct()

def gradio_search_by_txt(search_txt):

    ensure_loaded()
    global clip_model, df_desc, df_img, tokenizer, qwen_model, question_to_en

    # 文本额外处理 翻译为英文
    search_txt = get_result_by_qwen( tokenizer, qwen_model,
                                    prompt=f'{ question_to_en}: {search_txt} /no_think', 
                                    max_new_tokens=256 )
        
    # Remove content within and including <think>...</think> from text
    import re
    search_txt = re.sub(r'<think>.*?</think>', '', search_txt, flags=re.DOTALL).strip()
    print('翻译后的search: ', search_txt)


    paths = df_desc['path'].tolist()
    desc_txts = df_desc['description'].tolist()
    img_embs = df_img['embedding'].tolist()
    top_k = 10

    empty_txt = ''
    empty_emb = get_embedding(clip_model, empty_txt)
    search_emb = get_embedding(clip_model, search_txt)

    topk_results = topk_similarities(search_emb, empty_emb, img_embs, top_k)

    img_list = []
    info_lines = [f"搜索文本: {search_txt}"]
    for rank, (score, idx, t2i_sim, empty_sim) in enumerate(topk_results):
        desc = desc_txts[idx]
        imgpath = paths[idx]
        info_lines.append(f"Top{rank+1}: Relative相似度={score:.4f}, 文本相似度={t2i_sim}, 空文本相似度={empty_sim:.4f}, 索引={idx}\n描述={desc}\nimgpath={imgpath}")
        # 支持图片路径/URL，适配Gradio
        try:
            if os.path.isfile(imgpath):
                img = Image.open(imgpath)
                img_list.append(img)
            else:
                # 若不是本地路径, 直接用作为url
                img_list.append(imgpath)
        except Exception as e:
            img_list.append(None)
    print("\n\n".join(info_lines))
    return img_list, "\n\n".join(info_lines)

def start_gradio(port):
    with gr.Blocks() as demo:
        gr.Markdown("# 文本搜索图片（CLIP）Demo")
        with gr.Row():
            txt_input = gr.Textbox(label="输入搜索文本", lines=1)
        with gr.Row():
            img_gallery = gr.Gallery(label="Top搜索结果图片", columns=5, height="auto")
        with gr.Row():
            output_txt = gr.Textbox(label="描述和相似度信息", lines=15)
        search_btn = gr.Button("搜索")
        search_btn.click(fn=gradio_search_by_txt, 
                        inputs=txt_input, 
                        outputs=[img_gallery, output_txt])
    demo.launch(server_name="0.0.0.0", server_port=port, share=False)

if __name__ == '__main__':
    import argparse,os
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id')
    parser.add_argument('-p', '--port', type=int, default=20022, help='port')
    args, unknown = parser.parse_known_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    # gradio启动
    # gradio_search_by_txt('red clothing')
    start_gradio(args.port)
