{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b0f068b9-1046-4cdc-8a33-9f743b52d619",
   "metadata": {},
   "source": [
    "# 医疗大模型\n",
    "使用现有的医疗数据，测试制作一款医疗大模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "90960483-8460-4e68-9439-8cc69b14ee0a",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on method from_pretrained in module transformers.models.auto.auto_factory:\n",
      "\n",
      "from_pretrained(*model_args, **kwargs) method of builtins.type instance\n",
      "    Instantiate one of the base model classes of the library from a pretrained model.\n",
      "    \n",
      "    The model class to instantiate is selected based on the `model_type` property of the config object (either\n",
      "    passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by\n",
      "    falling back to using pattern matching on `pretrained_model_name_or_path`:\n",
      "    \n",
      "        - **albert** -- [`AlbertModel`] (ALBERT model)\n",
      "        - **align** -- [`AlignModel`] (ALIGN model)\n",
      "        - **altclip** -- [`AltCLIPModel`] (AltCLIP model)\n",
      "        - **audio-spectrogram-transformer** -- [`ASTModel`] (Audio Spectrogram Transformer model)\n",
      "        - **bart** -- [`BartModel`] (BART model)\n",
      "        - **beit** -- [`BeitModel`] (BEiT model)\n",
      "        - **bert** -- [`BertModel`] (BERT model)\n",
      "        - **bert-generation** -- [`BertGenerationEncoder`] (Bert Generation model)\n",
      "        - **big_bird** -- [`BigBirdModel`] (BigBird model)\n",
      "        - **bigbird_pegasus** -- [`BigBirdPegasusModel`] (BigBird-Pegasus model)\n",
      "        - **biogpt** -- [`BioGptModel`] (BioGpt model)\n",
      "        - **bit** -- [`BitModel`] (BiT model)\n",
      "        - **blenderbot** -- [`BlenderbotModel`] (Blenderbot model)\n",
      "        - **blenderbot-small** -- [`BlenderbotSmallModel`] (BlenderbotSmall model)\n",
      "        - **blip** -- [`BlipModel`] (BLIP model)\n",
      "        - **blip-2** -- [`Blip2Model`] (BLIP-2 model)\n",
      "        - **bloom** -- [`BloomModel`] (BLOOM model)\n",
      "        - **bridgetower** -- [`BridgeTowerModel`] (BridgeTower model)\n",
      "        - **camembert** -- [`CamembertModel`] (CamemBERT model)\n",
      "        - **canine** -- [`CanineModel`] (CANINE model)\n",
      "        - **chinese_clip** -- [`ChineseCLIPModel`] (Chinese-CLIP model)\n",
      "        - **clap** -- [`ClapModel`] (CLAP model)\n",
      "        - **clip** -- [`CLIPModel`] (CLIP model)\n",
      "        - **clipseg** -- [`CLIPSegModel`] (CLIPSeg model)\n",
      "        - **codegen** -- [`CodeGenModel`] (CodeGen model)\n",
      "        - **conditional_detr** -- [`ConditionalDetrModel`] (Conditional DETR model)\n",
      "        - **convbert** -- [`ConvBertModel`] (ConvBERT model)\n",
      "        - **convnext** -- [`ConvNextModel`] (ConvNeXT model)\n",
      "        - **convnextv2** -- [`ConvNextV2Model`] (ConvNeXTV2 model)\n",
      "        - **cpmant** -- [`CpmAntModel`] (CPM-Ant model)\n",
      "        - **ctrl** -- [`CTRLModel`] (CTRL model)\n",
      "        - **cvt** -- [`CvtModel`] (CvT model)\n",
      "        - **data2vec-audio** -- [`Data2VecAudioModel`] (Data2VecAudio model)\n",
      "        - **data2vec-text** -- [`Data2VecTextModel`] (Data2VecText model)\n",
      "        - **data2vec-vision** -- [`Data2VecVisionModel`] (Data2VecVision model)\n",
      "        - **deberta** -- [`DebertaModel`] (DeBERTa model)\n",
      "        - **deberta-v2** -- [`DebertaV2Model`] (DeBERTa-v2 model)\n",
      "        - **decision_transformer** -- [`DecisionTransformerModel`] (Decision Transformer model)\n",
      "        - **deformable_detr** -- [`DeformableDetrModel`] (Deformable DETR model)\n",
      "        - **deit** -- [`DeiTModel`] (DeiT model)\n",
      "        - **deta** -- [`DetaModel`] (DETA model)\n",
      "        - **detr** -- [`DetrModel`] (DETR model)\n",
      "        - **dinat** -- [`DinatModel`] (DiNAT model)\n",
      "        - **distilbert** -- [`DistilBertModel`] (DistilBERT model)\n",
      "        - **donut-swin** -- [`DonutSwinModel`] (DonutSwin model)\n",
      "        - **dpr** -- [`DPRQuestionEncoder`] (DPR model)\n",
      "        - **dpt** -- [`DPTModel`] (DPT model)\n",
      "        - **efficientformer** -- [`EfficientFormerModel`] (EfficientFormer model)\n",
      "        - **efficientnet** -- [`EfficientNetModel`] (EfficientNet model)\n",
      "        - **electra** -- [`ElectraModel`] (ELECTRA model)\n",
      "        - **ernie** -- [`ErnieModel`] (ERNIE model)\n",
      "        - **ernie_m** -- [`ErnieMModel`] (ErnieM model)\n",
      "        - **esm** -- [`EsmModel`] (ESM model)\n",
      "        - **flaubert** -- [`FlaubertModel`] (FlauBERT model)\n",
      "        - **flava** -- [`FlavaModel`] (FLAVA model)\n",
      "        - **fnet** -- [`FNetModel`] (FNet model)\n",
      "        - **fsmt** -- [`FSMTModel`] (FairSeq Machine-Translation model)\n",
      "        - **funnel** -- [`FunnelModel`] or [`FunnelBaseModel`] (Funnel Transformer model)\n",
      "        - **git** -- [`GitModel`] (GIT model)\n",
      "        - **glpn** -- [`GLPNModel`] (GLPN model)\n",
      "        - **gpt-sw3** -- [`GPT2Model`] (GPT-Sw3 model)\n",
      "        - **gpt2** -- [`GPT2Model`] (OpenAI GPT-2 model)\n",
      "        - **gpt_bigcode** -- [`GPTBigCodeModel`] (GPTBigCode model)\n",
      "        - **gpt_neo** -- [`GPTNeoModel`] (GPT Neo model)\n",
      "        - **gpt_neox** -- [`GPTNeoXModel`] (GPT NeoX model)\n",
      "        - **gpt_neox_japanese** -- [`GPTNeoXJapaneseModel`] (GPT NeoX Japanese model)\n",
      "        - **gptj** -- [`GPTJModel`] (GPT-J model)\n",
      "        - **gptsan-japanese** -- [`GPTSanJapaneseForConditionalGeneration`] (GPTSAN-japanese model)\n",
      "        - **graphormer** -- [`GraphormerModel`] (Graphormer model)\n",
      "        - **groupvit** -- [`GroupViTModel`] (GroupViT model)\n",
      "        - **hubert** -- [`HubertModel`] (Hubert model)\n",
      "        - **ibert** -- [`IBertModel`] (I-BERT model)\n",
      "        - **imagegpt** -- [`ImageGPTModel`] (ImageGPT model)\n",
      "        - **informer** -- [`InformerModel`] (Informer model)\n",
      "        - **jukebox** -- [`JukeboxModel`] (Jukebox model)\n",
      "        - **layoutlm** -- [`LayoutLMModel`] (LayoutLM model)\n",
      "        - **layoutlmv2** -- [`LayoutLMv2Model`] (LayoutLMv2 model)\n",
      "        - **layoutlmv3** -- [`LayoutLMv3Model`] (LayoutLMv3 model)\n",
      "        - **led** -- [`LEDModel`] (LED model)\n",
      "        - **levit** -- [`LevitModel`] (LeViT model)\n",
      "        - **lilt** -- [`LiltModel`] (LiLT model)\n",
      "        - **llama** -- [`LlamaModel`] (LLaMA model)\n",
      "        - **longformer** -- [`LongformerModel`] (Longformer model)\n",
      "        - **longt5** -- [`LongT5Model`] (LongT5 model)\n",
      "        - **luke** -- [`LukeModel`] (LUKE model)\n",
      "        - **lxmert** -- [`LxmertModel`] (LXMERT model)\n",
      "        - **m2m_100** -- [`M2M100Model`] (M2M100 model)\n",
      "        - **marian** -- [`MarianModel`] (Marian model)\n",
      "        - **markuplm** -- [`MarkupLMModel`] (MarkupLM model)\n",
      "        - **mask2former** -- [`Mask2FormerModel`] (Mask2Former model)\n",
      "        - **maskformer** -- [`MaskFormerModel`] (MaskFormer model)\n",
      "        - **maskformer-swin** -- [`MaskFormerSwinModel`] (MaskFormerSwin model)\n",
      "        - **mbart** -- [`MBartModel`] (mBART model)\n",
      "        - **mctct** -- [`MCTCTModel`] (M-CTC-T model)\n",
      "        - **mega** -- [`MegaModel`] (MEGA model)\n",
      "        - **megatron-bert** -- [`MegatronBertModel`] (Megatron-BERT model)\n",
      "        - **mgp-str** -- [`MgpstrForSceneTextRecognition`] (MGP-STR model)\n",
      "        - **mobilebert** -- [`MobileBertModel`] (MobileBERT model)\n",
      "        - **mobilenet_v1** -- [`MobileNetV1Model`] (MobileNetV1 model)\n",
      "        - **mobilenet_v2** -- [`MobileNetV2Model`] (MobileNetV2 model)\n",
      "        - **mobilevit** -- [`MobileViTModel`] (MobileViT model)\n",
      "        - **mpnet** -- [`MPNetModel`] (MPNet model)\n",
      "        - **mt5** -- [`MT5Model`] (MT5 model)\n",
      "        - **mvp** -- [`MvpModel`] (MVP model)\n",
      "        - **nat** -- [`NatModel`] (NAT model)\n",
      "        - **nezha** -- [`NezhaModel`] (Nezha model)\n",
      "        - **nllb-moe** -- [`NllbMoeModel`] (NLLB-MOE model)\n",
      "        - **nystromformer** -- [`NystromformerModel`] (Nyströmformer model)\n",
      "        - **oneformer** -- [`OneFormerModel`] (OneFormer model)\n",
      "        - **openai-gpt** -- [`OpenAIGPTModel`] (OpenAI GPT model)\n",
      "        - **opt** -- [`OPTModel`] (OPT model)\n",
      "        - **owlvit** -- [`OwlViTModel`] (OWL-ViT model)\n",
      "        - **pegasus** -- [`PegasusModel`] (Pegasus model)\n",
      "        - **pegasus_x** -- [`PegasusXModel`] (PEGASUS-X model)\n",
      "        - **perceiver** -- [`PerceiverModel`] (Perceiver model)\n",
      "        - **plbart** -- [`PLBartModel`] (PLBart model)\n",
      "        - **poolformer** -- [`PoolFormerModel`] (PoolFormer model)\n",
      "        - **prophetnet** -- [`ProphetNetModel`] (ProphetNet model)\n",
      "        - **qdqbert** -- [`QDQBertModel`] (QDQBert model)\n",
      "        - **reformer** -- [`ReformerModel`] (Reformer model)\n",
      "        - **regnet** -- [`RegNetModel`] (RegNet model)\n",
      "        - **rembert** -- [`RemBertModel`] (RemBERT model)\n",
      "        - **resnet** -- [`ResNetModel`] (ResNet model)\n",
      "        - **retribert** -- [`RetriBertModel`] (RetriBERT model)\n",
      "        - **roberta** -- [`RobertaModel`] (RoBERTa model)\n",
      "        - **roberta-prelayernorm** -- [`RobertaPreLayerNormModel`] (RoBERTa-PreLayerNorm model)\n",
      "        - **roc_bert** -- [`RoCBertModel`] (RoCBert model)\n",
      "        - **roformer** -- [`RoFormerModel`] (RoFormer model)\n",
      "        - **segformer** -- [`SegformerModel`] (SegFormer model)\n",
      "        - **sew** -- [`SEWModel`] (SEW model)\n",
      "        - **sew-d** -- [`SEWDModel`] (SEW-D model)\n",
      "        - **speech_to_text** -- [`Speech2TextModel`] (Speech2Text model)\n",
      "        - **speecht5** -- [`SpeechT5Model`] (SpeechT5 model)\n",
      "        - **splinter** -- [`SplinterModel`] (Splinter model)\n",
      "        - **squeezebert** -- [`SqueezeBertModel`] (SqueezeBERT model)\n",
      "        - **swin** -- [`SwinModel`] (Swin Transformer model)\n",
      "        - **swin2sr** -- [`Swin2SRModel`] (Swin2SR model)\n",
      "        - **swinv2** -- [`Swinv2Model`] (Swin Transformer V2 model)\n",
      "        - **switch_transformers** -- [`SwitchTransformersModel`] (SwitchTransformers model)\n",
      "        - **t5** -- [`T5Model`] (T5 model)\n",
      "        - **table-transformer** -- [`TableTransformerModel`] (Table Transformer model)\n",
      "        - **tapas** -- [`TapasModel`] (TAPAS model)\n",
      "        - **time_series_transformer** -- [`TimeSeriesTransformerModel`] (Time Series Transformer model)\n",
      "        - **timesformer** -- [`TimesformerModel`] (TimeSformer model)\n",
      "        - **trajectory_transformer** -- [`TrajectoryTransformerModel`] (Trajectory Transformer model)\n",
      "        - **transfo-xl** -- [`TransfoXLModel`] (Transformer-XL model)\n",
      "        - **tvlt** -- [`TvltModel`] (TVLT model)\n",
      "        - **unispeech** -- [`UniSpeechModel`] (UniSpeech model)\n",
      "        - **unispeech-sat** -- [`UniSpeechSatModel`] (UniSpeechSat model)\n",
      "        - **van** -- [`VanModel`] (VAN model)\n",
      "        - **videomae** -- [`VideoMAEModel`] (VideoMAE model)\n",
      "        - **vilt** -- [`ViltModel`] (ViLT model)\n",
      "        - **vision-text-dual-encoder** -- [`VisionTextDualEncoderModel`] (VisionTextDualEncoder model)\n",
      "        - **visual_bert** -- [`VisualBertModel`] (VisualBERT model)\n",
      "        - **vit** -- [`ViTModel`] (ViT model)\n",
      "        - **vit_hybrid** -- [`ViTHybridModel`] (ViT Hybrid model)\n",
      "        - **vit_mae** -- [`ViTMAEModel`] (ViTMAE model)\n",
      "        - **vit_msn** -- [`ViTMSNModel`] (ViTMSN model)\n",
      "        - **wav2vec2** -- [`Wav2Vec2Model`] (Wav2Vec2 model)\n",
      "        - **wav2vec2-conformer** -- [`Wav2Vec2ConformerModel`] (Wav2Vec2-Conformer model)\n",
      "        - **wavlm** -- [`WavLMModel`] (WavLM model)\n",
      "        - **whisper** -- [`WhisperModel`] (Whisper model)\n",
      "        - **xclip** -- [`XCLIPModel`] (X-CLIP model)\n",
      "        - **xglm** -- [`XGLMModel`] (XGLM model)\n",
      "        - **xlm** -- [`XLMModel`] (XLM model)\n",
      "        - **xlm-prophetnet** -- [`XLMProphetNetModel`] (XLM-ProphetNet model)\n",
      "        - **xlm-roberta** -- [`XLMRobertaModel`] (XLM-RoBERTa model)\n",
      "        - **xlm-roberta-xl** -- [`XLMRobertaXLModel`] (XLM-RoBERTa-XL model)\n",
      "        - **xlnet** -- [`XLNetModel`] (XLNet model)\n",
      "        - **xmod** -- [`XmodModel`] (X-MOD model)\n",
      "        - **yolos** -- [`YolosModel`] (YOLOS model)\n",
      "        - **yoso** -- [`YosoModel`] (YOSO model)\n",
      "    \n",
      "    The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are\n",
      "    deactivated). To train the model, you should first set it back in training mode with `model.train()`\n",
      "    \n",
      "    Args:\n",
      "        pretrained_model_name_or_path (`str` or `os.PathLike`):\n",
      "            Can be either:\n",
      "    \n",
      "                - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n",
      "                  Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n",
      "                  user or organization name, like `dbmdz/bert-base-german-cased`.\n",
      "                - A path to a *directory* containing model weights saved using\n",
      "                  [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n",
      "                - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n",
      "                  this case, `from_tf` should be set to `True` and a configuration object should be provided as\n",
      "                  `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n",
      "                  PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n",
      "        model_args (additional positional arguments, *optional*):\n",
      "            Will be passed along to the underlying model `__init__()` method.\n",
      "        config ([`PretrainedConfig`], *optional*):\n",
      "            Configuration for the model to use instead of an automatically loaded configuration. Configuration can\n",
      "            be automatically loaded when:\n",
      "    \n",
      "                - The model is a model provided by the library (loaded with the *model id* string of a pretrained\n",
      "                  model).\n",
      "                - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the\n",
      "                  save directory.\n",
      "                - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a\n",
      "                  configuration JSON file named *config.json* is found in the directory.\n",
      "        state_dict (*Dict[str, torch.Tensor]*, *optional*):\n",
      "            A state dictionary to use instead of a state dictionary loaded from saved weights file.\n",
      "    \n",
      "            This option can be used if you want to create a model from a pretrained configuration but load your own\n",
      "            weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and\n",
      "            [`~PreTrainedModel.from_pretrained`] is not a simpler option.\n",
      "        cache_dir (`str` or `os.PathLike`, *optional*):\n",
      "            Path to a directory in which a downloaded pretrained model configuration should be cached if the\n",
      "            standard cache should not be used.\n",
      "        from_tf (`bool`, *optional*, defaults to `False`):\n",
      "            Load the model weights from a TensorFlow checkpoint save file (see docstring of\n",
      "            `pretrained_model_name_or_path` argument).\n",
      "        force_download (`bool`, *optional*, defaults to `False`):\n",
      "            Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n",
      "            cached versions if they exist.\n",
      "        resume_download (`bool`, *optional*, defaults to `False`):\n",
      "            Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n",
      "            file exists.\n",
      "        proxies (`Dict[str, str]`, *optional*):\n",
      "            A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n",
      "            'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n",
      "        output_loading_info(`bool`, *optional*, defaults to `False`):\n",
      "            Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.\n",
      "        local_files_only(`bool`, *optional*, defaults to `False`):\n",
      "            Whether or not to only look at local files (e.g., not try downloading the model).\n",
      "        revision (`str`, *optional*, defaults to `\"main\"`):\n",
      "            The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n",
      "            git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n",
      "            identifier allowed by git.\n",
      "        trust_remote_code (`bool`, *optional*, defaults to `False`):\n",
      "            Whether or not to allow for custom models defined on the Hub in their own modeling files. This option\n",
      "            should only be set to `True` for repositories you trust and in which you have read the code, as it will\n",
      "            execute code present on the Hub on your local machine.\n",
      "        kwargs (additional keyword arguments, *optional*):\n",
      "            Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n",
      "            `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n",
      "            automatically loaded:\n",
      "    \n",
      "                - If a configuration is provided with `config`, `**kwargs` will be directly passed to the\n",
      "                  underlying model's `__init__` method (we assume all relevant updates to the configuration have\n",
      "                  already been done)\n",
      "                - If a configuration is not provided, `kwargs` will be first passed to the configuration class\n",
      "                  initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that\n",
      "                  corresponds to a configuration attribute will be used to override said attribute with the\n",
      "                  supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute\n",
      "                  will be passed to the underlying model's `__init__` function.\n",
      "    \n",
      "    Examples:\n",
      "    \n",
      "    ```python\n",
      "    >>> from transformers import AutoConfig, AutoModel\n",
      "    \n",
      "    >>> # Download model and configuration from huggingface.co and cache.\n",
      "    >>> model = AutoModel.from_pretrained(\"bert-base-cased\")\n",
      "    \n",
      "    >>> # Update configuration during loading\n",
      "    >>> model = AutoModel.from_pretrained(\"bert-base-cased\", output_attentions=True)\n",
      "    >>> model.config.output_attentions\n",
      "    True\n",
      "    \n",
      "    >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n",
      "    >>> config = AutoConfig.from_pretrained(\"./tf_model/bert_tf_model_config.json\")\n",
      "    >>> model = AutoModel.from_pretrained(\n",
      "    ...     \"./tf_model/bert_tf_checkpoint.ckpt.index\", from_tf=True, config=config\n",
      "    ... )\n",
      "    ```\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(AutoModel.from_pretrained)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "93770dd2-5d74-4f66-8a4d-0f0ffecd4ae0",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "A new version of the following files was downloaded from https://huggingface.co/THUDM/chatglm2-6b:\n",
      "- quantization.py\n",
      ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a429bf1d630c40e19e46d8e9a458aecb",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import os\n",
    "\n",
    "# 设置新的模型下载路径\n",
    "os.environ[\"HF_HOME\"] = \"D:/bert\"\n",
    "\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True)\n",
    "model = AutoModel.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True).quantize(8).cuda()\n",
    "model = model.eval()\n",
    "response, history = model.chat(tokenizer, \"你好\", history=[])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "134790dc-8ecb-44e8-88c0-08314b4aef6d",
   "metadata": {},
   "outputs": [],
   "source": [
    "response, history = model.chat(tokenizer, \"你好\", history=[])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "158d2045-8674-49a8-878e-6c5b32219d84",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
