{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "git clone https://githubfast.com/huggingface/transformers.git\n",
    "conda create -n pretrain --clone base\n",
    "python -m ipykernel install --name pretrain\n",
    "\n",
    "\n",
    "\n",
    "git clone https://githubfast.com/NVIDIA/apex\n",
    "cd apex\n",
    "pip install -v --disable-pip-version-check --no-cache-dir \\\n",
    "--global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./\n",
    "\n",
    "accelerate >= 0.12.0\n",
    "torch >= 1.3\n",
    "datasets >= 1.8.0\n",
    "sentencepiece != 0.1.92\n",
    "protobuf\n",
    "evaluate\n",
    "scikit-learn\n",
    "\n",
    "\n",
    "git config --global user.name 'LJ旗飞飞' \n",
    "git config --global user.email '345009870@qq.com'\n",
    "\n",
    "\n",
    "cd /root \n",
    "git clone https://githubfast.com/NVIDIA/apex \n",
    "git clone https://gitee.com/ljqiff/LLM.git \n",
    "git clone https://githubfast.com/huggingface/transformers.git \n",
    "pip uninstall apex torchx\n",
    "cd /root/apex \n",
    "python setup.py install \n",
    "cd /root/transformers \n",
    "pip install -e .\n",
    "pip install evaluate \n",
    "pip install nvitop\n",
    "\n",
    "git clone https://githubfast.com/huggingface/evaluate.git\n",
    "pip install accelerate==0.29.0\n",
    "\n",
    "\n",
    "pip install -U hf-transfer\n",
    "export HF_HUB_ENABLE_HF_TRANSFER=1\n",
    "export HF_ENDPOINT=https://hf-mirror.com\n",
    "huggingface-cli download --resume-download gpt2 --local-dir gpt2\n",
    "huggingface-cli download --repo-type dataset --resume-download wikitext --local-dir wikitext"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "python run_clm_copy.py     --model_name_or_path /gemini/data-2/pretrain/gpt2     --dataset_name /gemini/data-2/pretrain/wikitext     --dataset_config_name wikitext-2-raw-v1 --per_device_train_batch_size 8     --per_device_eval_batch_size 8     --do_train     --do_eval     --output_dir /tmp/test-clm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "git clone https://githubfast.com/huggingface/evaluate.git\n",
    "pip install accelerate==0.27.2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import os\n",
    "os.environ[\"HF_ENDPOINT\"] = \"https://hf-mirror.com\"\n",
    "os.environ[\"HUGGINGFACE_HUB_CACHE\"] = \"D:/LiJinQi/cache/\"\n",
    "os.environ[\"TRANSFORMERS_CACHE\"] = \"D:/LiJinQi/cache/\"\n",
    "os.environ[\"HF_HOME\"] = \"D:/LiJinQi/cache/\"\n",
    "os.environ[\"HF_HUB_CACHE\"] = \"D:/LiJinQi/cache/\"\n",
    "#重启生效cc3b4cef23e0f0417234e4490d0010ed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\ProgramData\\Anaconda3\\envs\\LLM\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "c:\\ProgramData\\Anaconda3\\envs\\LLM\\lib\\site-packages\\transformers\\utils\\hub.py:123: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
      "  warnings.warn(\n",
      "Loading checkpoint shards: 100%|██████████| 8/8 [00:47<00:00,  5.90s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你好！有什么我可以帮助你的吗？"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "\n",
    "model_path = r\"D:\\LiJinQi\\cache\\models--internlm--internlm2-chat-7b\\snapshots\\f7dc28191037a297c086b5b70c6a226e2134e46d\"\n",
    "model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.float16).cuda()\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n",
    "\n",
    "model = model.eval()\n",
    "length = 0\n",
    "for response, history in model.stream_chat(tokenizer, \"你好\", history=[]):\n",
    "    print(response[length:], flush=True, end=\"\")\n",
    "    length = len(response)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': [[1, 295, 3168, 1152, 8712, 500, 395, 603, 35257, 16411, 3471, 983, 4521, 2424, 281], [2, 2, 2, 2, 2, 2, 2, 2, 1, 295, 12366, 550, 906, 1905, 346]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]]}\n"
     ]
    }
   ],
   "source": [
    "raw_inputs = [\n",
    "    \"I've been waiting for a HuggingFace course my whole life.\",\n",
    "    \"I hate this so much!\",\n",
    "]\n",
    "inputs = tokenizer(raw_inputs,padding=True,truncation=True)\n",
    "print(inputs)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'</s></s></s></s></s></s></s></s><s>I hate this so much!'"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(inputs['input_ids'][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "92544"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(tokenizer.get_vocab())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "'tokenizers.decoders.Sequence' object is not callable",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[10], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m tokenizer\u001b[38;5;241m.\u001b[39mdecoder(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39minputs)\n",
      "\u001b[1;31mTypeError\u001b[0m: 'tokenizers.decoders.Sequence' object is not callable"
     ]
    }
   ],
   "source": [
    "tokenizer.tokenize()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards:  25%|██▌       | 2/8 [00:08<00:24,  4.03s/it]"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import AutoTokenizer,AutoModelForCausalLM\n",
    "\n",
    "model_name_or_path=r\"D:\\LiJinQi\\cache\\models--internlm--internlm2-chat-7b\\snapshots\\f7dc28191037a297c086b5b70c6a226e2134e46d\"\n",
    "tokenizer=AutoTokenizer.from_pretrained(model_name_or_path,trust_remote_code=True)\n",
    "model=AutoModelForCausalLM.from_pretrained(model_name_or_path,trust_remote_code=True,torch_dtype=torch.bfloat16,device_map='auto')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==========Welcome to InternLM chatbot, type 'exit' to exit.==========\n",
      "robot >>> 你好！很高兴为你服务。有什么我能帮助你的吗？\n",
      "robot >>> 对不起，我没有理解你的意思。你可以用更清晰的方式告诉我你想要什么帮助吗？\n",
      "robot >>> 对不起，我依然无法理解你的意思。如果你需要帮助，请用清晰、明确的语言告诉我你需要什么帮助，我会尽力为你提供帮助。\n",
      "robot >>> 我是一个人工智能助手，我的名字是 InternLM (书生·浦语)。我由上海人工智能实验室开发，致力于通过语言交互帮助人们解决问题、获取信息、学习知识等。如果你有任何问题需要帮助，请随时告诉我。\n",
      "robot >>> 很抱歉，我无法获取当前天气信息。请告诉我你所在的城市或地区，我可以帮你查询当地的天气情况。\n",
      "robot >>> 好的，我来帮你查询成都的天气。根据最新的天气预报，今天成都的天气是多云，最高气温为26摄氏度，最低气温为18摄氏度。请记得根据天气情况适当增减衣物，保持身体健康。\n"
     ]
    }
   ],
   "source": [
    "system_prompt = \"\"\"You are an AI assistant whose name is InternLM (书生·浦语).\n",
    "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n",
    "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.\n",
    "\"\"\"\n",
    "messages=[(system_prompt,\"\")]\n",
    "print(\"=\"*10+\"Welcome to InternLM chatbot, type 'exit' to exit.\"+\"=\"*10)\n",
    "while True:\n",
    "    input_text=input(\"User >>>\")\n",
    "    input_text=input_text.replace(\" \",\"\")\n",
    "    if input_text==\"exit\":\n",
    "        break\n",
    "    response, history = model.chat(tokenizer, input_text, history=messages)\n",
    "    messages.append((input_text, response))\n",
    "    print(f\"robot >>> {response}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python39",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
