{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "86022db0-2b7f-4c53-8730-dd40153da21e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.591 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclmdlBundleGetModelId failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclmdlBundleGetModelId\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.644 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclmdlBundleLoadFromMem failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclmdlBundleLoadFromMem\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.668 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclmdlBundleUnload failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclmdlBundleUnload\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.801 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclrtGetMemUceInfo failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclrtGetMemUceInfo\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.825 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclrtDeviceTaskAbort failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclrtDeviceTaskAbort\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.401.847 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol aclrtMemUceRepair failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclrtMemUceRepair\n",
      "[WARNING] GE_ADPT(65263,ffffb1820b80,python):2025-01-16-01:39:24.403.773 [mindspore/ccsrc/utils/dlopen_macro.h:163] DlsymAscend] Dynamically load symbol acltdtCleanChannel failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libacl_tdt_channel.so: undefined symbol: acltdtCleanChannel\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:24.557.323 [mindspore/run_check/_check_version.py:329] MindSpore version 2.4.10 and Ascend AI software package (Ascend Data Center Solution)version 7.3 does not match, the version of software package expect one of ['7.5', '7.6']. Please refer to the match info on: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:24.559.405 [mindspore/run_check/_check_version.py:407] Can not find the tbe operator implementation(need by mindspore-ascend). Please check whether the Environment Variable PYTHONPATH is set. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/numpy/core/getlimits.py:549: UserWarning: The value of the smallest subnormal for <class 'numpy.float64'> type is zero.\n",
      "  setattr(self, word, getattr(machar, word).flat[0])\n",
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/numpy/core/getlimits.py:89: UserWarning: The value of the smallest subnormal for <class 'numpy.float64'> type is zero.\n",
      "  return self._float_to_str(self.smallest_subnormal)\n",
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/numpy/core/getlimits.py:549: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero.\n",
      "  setattr(self, word, getattr(machar, word).flat[0])\n",
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/numpy/core/getlimits.py:89: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero.\n",
      "  return self._float_to_str(self.smallest_subnormal)\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:27.754.93 [mindspore/run_check/_check_version.py:347] MindSpore version 2.4.10 and \"te\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:27.776.19 [mindspore/run_check/_check_version.py:354] MindSpore version 2.4.10 and \"hccl\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:27.782.80 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 3\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:28.798.16 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 2\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:29.816.02 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 1\n"
     ]
    }
   ],
   "source": [
    "import argparse\n",
    "import os\n",
    "from tqdm.notebook import tqdm\n",
    "import numpy as np\n",
    "import mindspore as ms\n",
    "from mindspore import ops\n",
    "from mindnlp.transformers import (\n",
    "    BertGenerationTokenizer,\n",
    "    BertGenerationDecoder,\n",
    "    BertGenerationConfig,\n",
    "    CLIPModel,\n",
    "    CLIPTokenizer\n",
    ")\n",
    "from loaders.ZO_Clip_loaders import cifar100_single_isolated_class_loader\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from mindspore import context\n",
    "import sys"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1c4ba3ed-f943-4047-be0f-bc0fecadca16",
   "metadata": {},
   "outputs": [],
   "source": [
    "def tokenize_for_clip(batch_sentences, tokenizer):\n",
    "    # 使用CLIPTokenizer直接处理\n",
    "    inputs = tokenizer(\n",
    "        batch_sentences,\n",
    "        padding=True,\n",
    "        truncation=True,\n",
    "        max_length=77,\n",
    "        return_tensors=\"ms\"\n",
    "    )\n",
    "    return inputs.input_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "e31dbb37-dfd2-48c6-8b17-b85202af8513",
   "metadata": {},
   "outputs": [],
   "source": [
    "def greedysearch_generation_topk(clip_embed, bert_model, batch_size=32):\n",
    "    N = clip_embed.shape[0]\n",
    "    max_len = 77\n",
    "\n",
    "    # 初始化batch的target序列\n",
    "    target_lists = [[berttokenizer.bos_token_id] for _ in range(N)]\n",
    "    top_k_lists = [[] for _ in range(N)]\n",
    "    bert_model.set_train(False)\n",
    "\n",
    "    for i in range(max_len):\n",
    "        targets = ms.Tensor(target_lists, dtype=ms.int64)\n",
    "        position_ids = ms.Tensor(np.arange(targets.shape[1])[None].repeat(N, axis=0), ms.int32)\n",
    "        attention_mask = ops.ones((N, targets.shape[1]), dtype=ms.int32)\n",
    "\n",
    "        out = bert_model(\n",
    "            input_ids=targets,\n",
    "            attention_mask=attention_mask,\n",
    "            position_ids=position_ids,\n",
    "            encoder_hidden_states=clip_embed,\n",
    "        )\n",
    "\n",
    "        pred_idxs = out.logits.argmax(axis=2)[:, -1].astype(ms.int64)\n",
    "        _, top_k = ops.topk(out.logits, dim=2, k=35)\n",
    "\n",
    "        for j in range(N):\n",
    "            target_lists[j].append(pred_idxs[j].item())\n",
    "            top_k_lists[j].append(top_k[j, -1])\n",
    "\n",
    "        if all(len(t) >= 10 for t in target_lists):\n",
    "            break\n",
    "\n",
    "    results = []\n",
    "    for i in range(N):\n",
    "        top_k_tensor = ops.concat(top_k_lists[i])\n",
    "        target_tensor = ms.Tensor(target_lists[i], dtype=ms.int64)\n",
    "        results.append((target_tensor, top_k_tensor))\n",
    "\n",
    "    return results\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "914f25b2-26dc-4a46-9e56-47c259e29132",
   "metadata": {},
   "outputs": [],
   "source": [
    "def image_decoder(clip_model, berttokenizer, image_loaders=None, bert_model=None):\n",
    "    splits = [list(range(20)), list(range(20, 40)), list(range(40, 60)),\n",
    "              list(range(60, 80)), list(range(80, 100))]\n",
    "    auc_list_sum = []\n",
    "\n",
    "    for seen_split in splits[-1::-1]:\n",
    "        split = seen_split + list(set(list(range(100))) - set(seen_split))\n",
    "        label_split = [idx2class[idx] for idx in split]\n",
    "        seen_labels = label_split[:20]\n",
    "        seen_descriptions = [f\"This is a photo of a {label}\" for label in seen_labels]\n",
    "        targets = ms.Tensor(2000 * [0] + 8000 * [1], dtype=ms.int32)\n",
    "        max_num_entities = 0\n",
    "        ood_probs_sum = []\n",
    "        print(seen_labels)\n",
    "\n",
    "        for semantic_label in tqdm(label_split):\n",
    "            loader = image_loaders[semantic_label]\n",
    "\n",
    "            for batch_data in loader.create_dict_iterator():\n",
    "                batch_images = batch_data[\"image\"]\n",
    "                batch_size = batch_images.shape[0]\n",
    "\n",
    "                clip_model.set_train(False)\n",
    "                clip_out = clip_model.get_image_features(pixel_values=batch_images)\n",
    "                clip_extended_embed = ops.repeat_elements(clip_out, rep=2, axis=1)\n",
    "                clip_extended_embed = ops.expand_dims(clip_extended_embed, 1)\n",
    "\n",
    "                batch_results = greedysearch_generation_topk(clip_extended_embed, bert_model)\n",
    "                del clip_extended_embed\n",
    "                del clip_out\n",
    "\n",
    "                batch_target_tokens = []\n",
    "                batch_topk_tokens = []\n",
    "\n",
    "                for target_list, topk_list in batch_results:\n",
    "                    target_tokens = [berttokenizer.decode(int(pred_idx.asnumpy())) for pred_idx in target_list]\n",
    "                    topk_tokens = [berttokenizer.decode(int(pred_idx.asnumpy())) for pred_idx in topk_list]\n",
    "                    batch_target_tokens.append(target_tokens)\n",
    "                    batch_topk_tokens.append(topk_tokens)\n",
    "\n",
    "                batch_unique_entities = []\n",
    "                for topk_tokens in batch_topk_tokens:\n",
    "                    unique_entities = list(set(topk_tokens) - set(seen_labels))\n",
    "                    batch_unique_entities.append(unique_entities)\n",
    "                    max_num_entities = max(max_num_entities, len(unique_entities))\n",
    "\n",
    "                batch_all_desc = []\n",
    "                for unique_entities in batch_unique_entities:\n",
    "                    all_desc = seen_descriptions + [f\"This is a photo of a {label}\" for label in unique_entities]\n",
    "                    batch_all_desc.append(all_desc)\n",
    "\n",
    "                batch_all_desc_ids = [tokenize_for_clip(all_desc, cliptokenizer) for all_desc in batch_all_desc]\n",
    "\n",
    "                image_features = clip_model.get_image_features(pixel_values=batch_images)\n",
    "                image_features = image_features / ops.norm(image_features, dim=-1, keepdim=True)\n",
    "\n",
    "                for b_idx in range(len(batch_results)):\n",
    "                    text_features = clip_model.get_text_features(input_ids=batch_all_desc_ids[b_idx])\n",
    "                    text_features = text_features / ops.norm(text_features, dim=-1, keepdim=True)\n",
    "\n",
    "                    similarity = 100.0 * (image_features[b_idx:b_idx + 1] @ text_features.T)\n",
    "                    zeroshot_probs = ops.softmax(similarity, axis=-1).squeeze()\n",
    "\n",
    "                    ood_prob_sum = float(ops.sum(zeroshot_probs[20:]).asnumpy())\n",
    "                    ood_probs_sum.append(ood_prob_sum)\n",
    "\n",
    "                del batch_target_tokens\n",
    "                del batch_topk_tokens\n",
    "                del batch_unique_entities\n",
    "                del batch_all_desc\n",
    "                del image_features\n",
    "\n",
    "        auc_sum = roc_auc_score(targets.asnumpy(), np.array(ood_probs_sum))\n",
    "        print('当前split的sum_ood AUROC={}'.format(auc_sum))\n",
    "        auc_list_sum.append(auc_sum)\n",
    "\n",
    "    print('所有AUC分数:', auc_list_sum)\n",
    "    print('AUC均值和标准差:', np.mean(auc_list_sum), np.std(auc_list_sum))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "17381ab4-660b-4927-895a-de176dcd4f29",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_args_in_notebook():\n",
    "    args = argparse.Namespace(\n",
    "        trained_path='./trained_models/COCO/'\n",
    "    )\n",
    "    return args"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "97f3c456-511a-4676-9bce-8acbd5731e1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class_to_idx= {\n",
    "            'apple': 0,\n",
    "            'aquarium_fish': 1,\n",
    "            'baby': 2,\n",
    "            'bear': 3,\n",
    "            'beaver': 4,\n",
    "            'bed': 5,\n",
    "            'bee': 6,\n",
    "            'beetle': 7,\n",
    "            'bicycle': 8,\n",
    "            'bottle': 9,\n",
    "            'bowl': 10,\n",
    "            'boy': 11,\n",
    "            'bridge': 12,\n",
    "            'bus': 13,\n",
    "            'butterfly': 14,\n",
    "            'camel': 15,\n",
    "            'can': 16,\n",
    "            'castle': 17,\n",
    "            'caterpillar': 18,\n",
    "            'cattle': 19,\n",
    "            'chair': 20,\n",
    "            'chimpanzee': 21,\n",
    "            'clock': 22,\n",
    "            'cloud': 23,\n",
    "            'cockroach': 24,\n",
    "            'couch': 25,\n",
    "            'crab': 26,\n",
    "            'crocodile': 27,\n",
    "            'cup': 28,\n",
    "            'dinosaur': 29,\n",
    "            'dolphin': 30,\n",
    "            'elephant': 31,\n",
    "            'flatfish': 32,\n",
    "            'forest': 33,\n",
    "            'fox': 34,\n",
    "            'girl': 35,\n",
    "            'hamster': 36,\n",
    "            'house': 37,\n",
    "            'kangaroo': 38,\n",
    "            'keyboard': 39,\n",
    "            'lamp': 40,\n",
    "            'lawn_mower': 41,\n",
    "            'leopard': 42,\n",
    "            'lion': 43,\n",
    "            'lizard': 44,\n",
    "            'lobster': 45,\n",
    "            'man': 46,\n",
    "            'maple_tree': 47,\n",
    "            'motorcycle': 48,\n",
    "            'mountain': 49,\n",
    "            'mouse': 50,\n",
    "            'mushroom': 51,\n",
    "            'oak_tree': 52,\n",
    "            'orange': 53,\n",
    "            'orchid': 54,\n",
    "            'otter': 55,\n",
    "            'palm_tree': 56,\n",
    "            'pear': 57,\n",
    "            'pickup_truck': 58,\n",
    "            'pine_tree': 59,\n",
    "            'plain': 60,\n",
    "            'plate': 61,\n",
    "            'poppy': 62,\n",
    "            'porcupine': 63,\n",
    "            'possum': 64,\n",
    "            'rabbit': 65,\n",
    "            'raccoon': 66,\n",
    "            'ray': 67,\n",
    "            'road': 68,\n",
    "            'rocket': 69,\n",
    "            'rose': 70,\n",
    "            'sea': 71,\n",
    "            'seal': 72,\n",
    "            'shark': 73,\n",
    "            'shrew': 74,\n",
    "            'skunk': 75,\n",
    "            'skyscraper': 76,\n",
    "            'snail': 77,\n",
    "            'snake': 78,\n",
    "            'spider': 79,\n",
    "            'squirrel': 80,\n",
    "            'streetcar': 81,\n",
    "            'sunflower': 82,\n",
    "            'sweet_pepper': 83,\n",
    "            'table': 84,\n",
    "            'tank': 85,\n",
    "            'telephone': 86,\n",
    "            'television': 87,\n",
    "            'tiger': 88,\n",
    "            'tractor': 89,\n",
    "            'train': 90,\n",
    "            'trout': 91,\n",
    "            'tulip': 92,\n",
    "            'turtle': 93,\n",
    "            'wardrobe': 94,\n",
    "            'whale': 95,\n",
    "            'willow_tree': 96,\n",
    "            'wolf': 97,\n",
    "            'woman': 98,\n",
    "            'worm': 99\n",
    "        }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8656aba9-805f-4496-aba0-75c5a8e1de84",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:36.218.227 [mindspore/run_check/_check_version.py:329] MindSpore version 2.4.10 and Ascend AI software package (Ascend Data Center Solution)version 7.3 does not match, the version of software package expect one of ['7.5', '7.6']. Please refer to the match info on: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:36.219.982 [mindspore/run_check/_check_version.py:407] Can not find the tbe operator implementation(need by mindspore-ascend). Please check whether the Environment Variable PYTHONPATH is set. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:36.220.634 [mindspore/run_check/_check_version.py:347] MindSpore version 2.4.10 and \"te\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:36.221.233 [mindspore/run_check/_check_version.py:354] MindSpore version 2.4.10 and \"hccl\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:36.221.891 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 3\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:37.223.799 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 2\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:38.225.421 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 1\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:39.227.574 [mindspore/run_check/_check_version.py:329] MindSpore version 2.4.10 and Ascend AI software package (Ascend Data Center Solution)version 7.3 does not match, the version of software package expect one of ['7.5', '7.6']. Please refer to the match info on: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:39.229.160 [mindspore/run_check/_check_version.py:407] Can not find the tbe operator implementation(need by mindspore-ascend). Please check whether the Environment Variable PYTHONPATH is set. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:39.229.799 [mindspore/run_check/_check_version.py:347] MindSpore version 2.4.10 and \"te\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:39.230.418 [mindspore/run_check/_check_version.py:354] MindSpore version 2.4.10 and \"hccl\" wheel package version 7.3 does not match. For details, refer to the installation guidelines: https://www.mindspore.cn/install\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:39.231.022 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 3\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:40.233.173 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 2\n",
      "[WARNING] ME(65263:281473659833216,MainProcess):2025-01-16-01:39:41.234.898 [mindspore/run_check/_check_version.py:368] Please pay attention to the above warning, countdown: 1\n",
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/mindnlp/transformers/tokenization_utils_base.py:1526: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted, and will be then set to `False` by default. \n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MS_ALLOC_CONF]Runtime config:  enable_vmm:True  vmm_align_size:2MB\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/mindspore/miniconda/envs/jupyter/lib/python3.9/site-packages/mindnlp/transformers/tokenization_utils_base.py:1526: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted, and will be then set to `False` by default. \n",
      "  warnings.warn(\n",
      "BertGenerationDecoder has generative capabilities, as `prepare_inputs_for_generation` is explicitly overwritten. However, it doesn't directly inherit from `GenerationMixin`.`PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability to call `generate` and other related functions.\n",
      "  - If you are the owner of the model architecture code, please modify your model class such that it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception).\n",
      "  - If you are not the owner of the model architecture class, please contact the model code owner to update it.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "t0\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7772e68b66ae49c0890cfb768a69c2c4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "t1\n",
      "['squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm']\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "669f74b4209f45229916ead4094d9d20",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前split的sum_ood AUROC=0.8057389375\n",
      "['plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider']\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "759a31bcf79f48abbc495ef72bc3a812",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前split的sum_ood AUROC=0.8656406249999999\n",
      "['lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree']\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "65c8efc521834a4ca57c07bfb4968568",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前split的sum_ood AUROC=0.83000275\n",
      "['chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard']\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5d1d4bacb62747f1ba0c1fa8c4b051c2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前split的sum_ood AUROC=0.8249155000000001\n",
      "['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle']\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "36e0d85b89654d59827bf549e76af8d5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/100 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前split的sum_ood AUROC=0.81786125\n",
      "所有AUC分数: [0.8057389375, 0.8656406249999999, 0.83000275, 0.8249155000000001, 0.81786125]\n",
      "AUC均值和标准差: 0.8288318125 0.020125761566801608\n"
     ]
    }
   ],
   "source": [
    "if __name__ == '__main__':\n",
    "    # 判断是否在notebook环境\n",
    "    if 'ipykernel' in sys.modules or 'IPython' in sys.modules:\n",
    "        args = get_args_in_notebook()\n",
    "        context.set_context(device_target=\"Ascend\")\n",
    "    else:\n",
    "        parser = argparse.ArgumentParser()\n",
    "        parser.add_argument('--trained_path', type=str, default='./trained_models/COCO/')\n",
    "        args = parser.parse_args()\n",
    "        context.set_context(device_target=\"Ascend\")\n",
    "\n",
    "    args.saved_model_path = args.trained_path + '/ViT-B32/'\n",
    "\n",
    "    if not os.path.exists(args.saved_model_path):\n",
    "        os.makedirs(args.saved_model_path)\n",
    "\n",
    "    # 初始化tokenizers\n",
    "    berttokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')\n",
    "\n",
    "    # 加载CLIP模型和tokenizer\n",
    "    model_name = 'openai/clip-vit-base-patch32'\n",
    "    try:\n",
    "        clip_model = CLIPModel.from_pretrained(model_name)\n",
    "        cliptokenizer = CLIPTokenizer.from_pretrained(model_name)\n",
    "    except Exception as e:\n",
    "        print(f\"Error loading model from mirror, trying direct download: {e}\")\n",
    "        clip_model = CLIPModel.from_pretrained(model_name)\n",
    "        cliptokenizer = CLIPTokenizer.from_pretrained(model_name)\n",
    "\n",
    "    # 初始化BERT模型\n",
    "    if (not os.path.exists(f\"{args.saved_model_path}/decoder_model\")):\n",
    "        bert_config = BertGenerationConfig.from_pretrained(\"google/bert_for_seq_generation_L-24_bbc_encoder\")\n",
    "        bert_config.is_decoder = True\n",
    "        bert_config.add_cross_attention = True\n",
    "        bert_config.return_dict = True\n",
    "        bert_model = BertGenerationDecoder.from_pretrained(\"google/bert_for_seq_generation_L-24_bbc_encoder\",\n",
    "                                                           config=bert_config)\n",
    "    else:\n",
    "        bert_model = BertGenerationDecoder.from_pretrained(f\"{args.saved_model_path}/decoder_model\")\n",
    "    print('t0')\n",
    "    cifar100_loaders = cifar100_single_isolated_class_loader()\n",
    "    print('t1')\n",
    "    # 获取CIFAR100的类别映射\n",
    "    idx2class = {v: k for k, v in class_to_idx.items()}\n",
    "\n",
    "    image_decoder(clip_model, berttokenizer, image_loaders=cifar100_loaders, bert_model=bert_model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a194d3e-b70d-444e-9305-53f5a33913d6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
