

def get_model_and_processor(qwenvl3_path, device):
    from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
    import torch
    model = Qwen3VLForConditionalGeneration.from_pretrained(
        qwenvl3_path,
        dtype=torch.bfloat16,
        attn_implementation="flash_attention_2",
        device_map={0:device}
        if isinstance(device, int) else device
    )
    processor = AutoProcessor.from_pretrained(qwenvl3_path)
    return model, processor

def run_model_inference(model, processor, 
                        image_path, template_json_str, 
                        max_retries=3, seed=42):
    import json
    import torch
    """
    封装推理流程，重试max_retries次直到能够成功解析为json
    """
    torch.manual_seed(seed)
    question =( f'{template_json_str} 用这个json格式返回服装数据'
            "用这个json格式返回服装数据,不要加入markdown语法，只是按这个输出即可。\n"
        "注意：json模板的内容只是参考，不要作为默认值！\n"
        "注意：不存在的则填充[] or 空字符串；关于count的部分，如果没有则填写-1."
        )
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image_path},
                {"type": "text", "text": question},
            ],
        }
    ]
    for attempt in range(max_retries):
        inputs = processor.apply_chat_template(
            messages,
            tokenize=True,
            add_generation_prompt=True,
            return_dict=True,
            return_tensors="pt"
        )
        for k in inputs.data.keys():
            inputs.data[k] = inputs.data[k].to(model.device)
        generated_ids = model.generate(**inputs, max_new_tokens=2048)
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_texts = processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )
        try:
            data_dict = json.loads(output_texts[0])
            return output_texts[0], data_dict, None  # 返回json字符串和dict
        except Exception as e:
            print(f'[{image_path}] 第{attempt+1}次生成json解析失败: {e} 内容: {output_texts[0]}')
            torch.manual_seed(seed + attempt + 1)
    return None, None, output_texts[0]  # 失败返回最后一次内容

def process_images(process_dir, save_dir, qwenvl3_path, template_json_path, device='cuda:0', save_failed=True):
    import os
    from tqdm import tqdm
    import pandas as pd

    os.makedirs(save_dir, exist_ok=True)
    # 读取模板
    with open(template_json_path, 'r', encoding='utf-8') as f:
        template_json_str = f.read()

    # 初始化模型和处理器
    model, processor = get_model_and_processor(qwenvl3_path, device)

    # 搜集图片文件
    exts = ('.jpg', '.jpeg', '.png', '.bmp', '.webp')
    # image_paths = [os.path.join(process_dir, fname)
    #                for fname in os.listdir(process_dir)
    #                if fname.lower().endswith(exts)]
    image_info_path = os.path.join(process_dir, 'image_info.txt')
    if os.path.exists(image_info_path):
        with open(image_info_path, 'r', encoding='utf-8') as f:
            image_paths = [line.strip() for line in f if line.strip()]
    else:
        image_paths = []
        for root, dirs, files in os.walk(process_dir):
            for fname in files:
                if fname.lower().endswith(exts):
                    image_paths.append(os.path.join(root, fname))
        # 写入txt，每行一个路径
        with open(image_info_path, 'w', encoding='utf-8') as f:
            for path in image_paths:
                f.write(f"{path}\n")

    results = []
    failed_imgs = []
    for image_path in tqdm(image_paths, desc="processing images"):
        res_str, res_json, err_txt = run_model_inference(
            model, processor, image_path, template_json_str, max_retries=3
        )
        if res_json is not None:
            results.append({
                'img_path': image_path,
                'desc_json': res_str,
                'desc_dict': res_json
            })
        else:
            # 失败的图片名及内容保存
            failed_imgs.append({'img_path': image_path, 'fail_content': err_txt})

    # 保存成功数据为csv和json
    df = pd.DataFrame(results)
    save_csv = os.path.join(save_dir, 'qwenvl3_cloth_data2600.csv')
    # save_json = os.path.join(save_dir, 'qwenvl3_cloth_data.json')
    df.to_csv(save_csv, index=False, encoding='utf-8-sig')
    # df.to_json(save_json, orient='records', force_ascii=False, indent=2)

    if failed_imgs:
        fail_df = pd.DataFrame(failed_imgs)
        fail_csv = os.path.join(save_dir, 'qwenvl3_failed_images.csv')
        # fail_json = os.path.join(save_dir, 'qwenvl3_failed_images.json')
        fail_df.to_csv(fail_csv, index=False, encoding='utf-8-sig')
        # fail_df.to_json(fail_json, orient='records', force_ascii=False, indent=2)
        print(f"[INFO] {len(failed_imgs)} images failed. Check {fail_csv}")


    print(f"Processing finished! Saved to {save_csv}")


def check_csv():
    csv_file_path = '/mnt/nas/shengjie/qdrant_data/qwenvl3_data/qwenvl3_cloth_data.csv'
    
    template_json_path = 'demo_qwenvl_clothing.json'

    import json

    def load_json_template(template_path):
        with open(template_path, 'r', encoding='utf-8') as f:
            template_data = json.load(f)
        def extract_keys(d, prefix=''):
            keys = set()
            for k, v in d.items():
                fullkey = f"{prefix}.{k}" if prefix else k
                if isinstance(v, dict):
                    keys |= extract_keys(v, fullkey)
                else:
                    keys.add(fullkey)
            return keys
        return extract_keys(template_data)

    def extract_keys_from_result(d, prefix=''):
        keys = set()
        if isinstance(d, dict):
            for k, v in d.items():
                fullkey = f"{prefix}.{k}" if prefix else k
                if isinstance(v, dict):
                    keys |= extract_keys_from_result(v, fullkey)
                else:
                    keys.add(fullkey)
        else:
            # If not dict, unknown structure, skip
            pass
        return keys

    import pandas as pd

    def check_csv():
        csv_file_path = '/mnt/nas/shengjie/qdrant_data/qwenvl3_data/qwenvl3_cloth_data.csv'
        template_json_path = 'demo_qwenvl_clothing.json'

        # 读取template所有key
        template_keys = load_json_template(template_json_path)

        df = pd.read_csv(csv_file_path)
        mismatch_count = 0

        for idx, row in df.iterrows():
            desc_json_str = row.get('desc_json', None)
            img_path = row.get('img_path', '')
            if pd.isna(desc_json_str):
                print(f"[WARNING] idx={idx}, img_path={img_path} desc_json is NaN, skipping.")
                continue
            # 尝试解析json
            try:
                result_json = json.loads(desc_json_str)
            except Exception as e:
                print(f"[ERROR] idx={idx}, img_path={img_path} json decode error: {e}")
                continue

            result_keys = extract_keys_from_result(result_json)
            if result_keys != template_keys:
                mismatch_count += 1
                keys_in_result_not_template = result_keys - template_keys
                keys_in_template_not_result = template_keys - result_keys
                print(f"[MISMATCH] idx={idx} img={img_path}")
                if keys_in_result_not_template:
                    print("  Keys in result not in template:", keys_in_result_not_template)
                if keys_in_template_not_result:
                    print("  Keys in template not in result:", keys_in_template_not_result)
                print("  Result json:", json.dumps(result_json, ensure_ascii=False, indent=2))

        print(f"Total mismatches found: {mismatch_count}") # 都匹配上了

    check_csv()

def try_search(search_txt):
    csv_file_path = '/mnt/nas/shengjie/qdrant_data/qwenvl3_data/qwenvl3_cloth_data.csv'
    
    template_json_path = 'demo_qwenvl_clothing.json'

def process_data():
    import os
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id')
    parser.add_argument('-p', '--port', type=int, default=20022, help='port')
    args, unknown = parser.parse_known_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    device = 'cuda:0'
    QWENVL3= '/mnt/nas/shengjie/huggingface_model_local/Qwen3-VL-8B-Instruct'
    # process_dir = '/mnt/nas/shengjie/qdrant_data/images'
    process_dir = '/mnt/nas/shengjie/datasets/retrival_data'
    save_dir = '/mnt/nas/shengjie/qdrant_data/qwenvl3_data'
    template_json_path = 'demo_qwenvl_clothing.json'

    process_images(
        process_dir=process_dir,
        save_dir=save_dir,
        qwenvl3_path=QWENVL3,
        template_json_path=template_json_path,
        device=device
    )
if __name__ == "__main__":

    # check_csv()

    process_data()

    # search_txt => search clothing json 
    # 取出非None的 (key:value)*N 去检索 clothing json (key:value2)*N
    # 拼接 (value*N => emb) COSSIM (value2*N=>emb2)
    #     (empty*N => emb) COSSIM (value2*N=>emb2)
    # 获得 relative sim ，去除 relatiev sim < 0 的数据后，按照relative sim 排序，取出前5个