{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "d37eca7c",
   "metadata": {},
   "source": [
    "# Hugging Face Spaces"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ecd54d9f",
   "metadata": {},
   "source": [
    "## 安装Hugging Face 库\n",
    "```shell\n",
    "pip install huggingface_hub\n",
    "```\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ba6170ba",
   "metadata": {},
   "source": [
    "## 下载模型\n",
    "可以安装huggingface_hub库，然后使用huggingface_hub的download_model方法下载模型。\n",
    "```python\n",
    "from huggingface_hub import download_model\n",
    "download_model(\"google/pegasus-large\")\n",
    "```\n",
    "也可以使用huggingface_hub的huggingface-cli命令行工具下载模型。\n",
    "```bash\n",
    "huggingface-cli download google/pegasus-large --local-dir ./models/google/pegasus-large\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cb61614",
   "metadata": {},
   "outputs": [],
   "source": [
    "#将模型下载到本地调用\n",
    "from transformers import AutoModelForCausalLM,AutoTokenizer\n",
    "\n",
    "#将模型和分词工具下载到本地，并指定保存路径\n",
    "model_name = \"uer/gpt2-chinese-cluecorpussmall\" # 模型名\n",
    "cache_dir = \"my-model-cache/uer/1-gpt2-chinese-cluecorpussmall\"\n",
    "\n",
    "#下载模型\n",
    "AutoModelForCausalLM.from_pretrained(model_name,cache_dir=cache_dir)\n",
    "#下载分词工具\n",
    "AutoTokenizer.from_pretrained(model_name,cache_dir=cache_dir)\n",
    "\n",
    "print(f\"模型分词器已下载到：{cache_dir}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6cae1d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModel, AutoTokenizer\n",
    "\n",
    "# 指定要下载的模型名称\n",
    "model_name = \"google-bert/bert-base-chinese\"\n",
    "\n",
    "# 指定模型的缓存目录\n",
    "cache_dir = \"D:/AI/HuggingFace/my-model-cache/test/bert-base-chinese\"\n",
    "\n",
    "# 下载并加载模型和分词器到指定目录\n",
    "model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir)\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2cc15e9f",
   "metadata": {},
   "source": [
    "## Hugging Face API使用\n",
    "### 匿名访问API\n",
    "```python\n",
    "import requests\n",
    "API_URL = \"https://api-inference.huggingface.co/models/uer/gpt2-chinese-cluecorpussmall\"\n",
    "# 不使用 Authorization 头进行匿名访问\n",
    "response = requests.post(API_URL, json={\"inputs\": \"你好,Hugging Face\"})\n",
    "print(response.json())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afb00394",
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "API_URL = \"https://api-inference.huggingface.co/models/gpt2\"\n",
    "API_TOKEN = \"hf_xxhf_KhRExbDzQJZFOozLkmXeOWHgToKFDEKRxa\"\n",
    "headers = {\"Authorization\": f\"Bearer {API_TOKEN}\"}\n",
    "\n",
    "response = requests.post(API_URL, headers=headers, json={\"inputs\": \"hello,Hugging Face\"})\n",
    "print(response.json())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2e95cc2d",
   "metadata": {},
   "source": [
    "#### 本地调用大模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7654ec08",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline\n",
    "\n",
    "import torch\n",
    "# 注：transformers 的4.40版本之后，如果是加载本地模型不支持相对路径，需要使用绝对路径。\n",
    "model_path = 'D:/AI/HuggingFace/my-model-cache/uer/gpt2-chinese-cluecorpussmall'  \n",
    "print(model_path)\n",
    "model = AutoModelForCausalLM.from_pretrained(model_path) \n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
    "\n",
    "generator = pipeline(\"text-generation\", model=model, \n",
    "                               tokenizer=tokenizer, device=\"cuda\") \n",
    "\n",
    "prompt = \"你好，我是一款语言模型\"\n",
    "outputs = generator(prompt, max_length=500, num_return_sequences=1)\n",
    "print(outputs[0][\"generated_text\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8bcb25e2",
   "metadata": {},
   "source": [
    "## datasets\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d16ce77a",
   "metadata": {},
   "source": [
    "手动下载比较靠谱 以ChnSentiCorp为例：\n",
    "```shell\n",
    "huggingface-cli download --repo-type dataset --resume-download seamew/ChnSentiCorp --local-dir ChnSentiCorp\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e6f28355",
   "metadata": {},
   "source": [
    "#### 加载和导出数据集的例子\n",
    "特别注意，数据集如果是dict类型的，需要遍历或使用pandas库进行转换再保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5507a622",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset,list_datasets,load_from_disk\n",
    "import pandas as pd\n",
    "\n",
    "# 在线下载数据集\n",
    "# dataset = load_dataset(path=\"NousResearch/hermes-function-calling-v1\")\n",
    "\n",
    "# 加载磁盘数据集\n",
    "dataset = load_from_disk(\"D:/AI/HuggingFace/data/ChnSentiCorp/chn_senti_corp\")\n",
    "\n",
    "# 保存数据集为csv文件\n",
    "\"\"\" # 遍历每个数据集并将其转换为csv\n",
    "for key, dataset in dataset.items():\n",
    "    dataset.to_csv(f'{key}.csv')\n",
    "\n",
    "# 合并所有数据集到一个 DataFrame\n",
    "dataframes = []\n",
    "for key, dataset in dataset.items():\n",
    "    df = pd.DataFrame(dataset)\n",
    "    dataframes.append(df.assign(dataset=key))  # 添加一个列来标识数据集的来源\n",
    " \n",
    "# 合并 DataFrame\n",
    "combined_df = pd.concat(dataframes, ignore_index=True)\n",
    " \n",
    "# 导出到 CSV\n",
    "combined_df.to_csv('combinedDataset.csv', index=False) \"\"\"\n",
    "\n",
    "\n",
    "# 保存单个数据集到csv文件\n",
    "#dataset.to_csv(path_or_buf=\"d:/ai/huggingface/data/hermes-function-calling-v1.csv\")\n",
    "\n",
    "# 加载csv文件数据\n",
    "# dataset=load_dataset(\"csv\",data_files=\"d:/ai/huggingface/data/hermes-function-calling-v1.csv\")\n",
    "# print(dataset)\n",
    "\n",
    "#下载来的.arrow原始数据集不能直接读取，需要转为能读的格式\n",
    "\"\"\" #设置data_files \n",
    "data_files = {\n",
    "    'train': './data/ChnSentiCorp/chn_senti_corp-train.arrow',\n",
    "    'test': './data/ChnSentiCorp/chn_senti_corp-test.arrow',\n",
    "    'validation': './data/ChnSentiCorp/chn_senti_corp-validation.arrow'}\n",
    "# 加载arrow数据集\n",
    "dataset = load_dataset('arrow', data_files=data_files)\n",
    "# 保存至本地\n",
    "dataset.save_to_disk('./data/ChnSentiCorp/chn_senti_corp') \"\"\"\n",
    "\n",
    "\n",
    "# #取出训练集\n",
    "# train_dataset = dataset['train']\n",
    "# print(train_dataset)\n",
    "\n",
    "# #查看数据\n",
    "# for data in train_dataset:\n",
    "#     print(data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2dabfa1f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import BertTokenizer, BertModel\n",
    "\n",
    "token = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "#print(token)\n",
    "\n",
    "# 定义一组句子，用于后续的编码处理\n",
    "sents = [\"东西不错，不过有人不太喜欢镜面的，我个人比较喜欢，总之还算满意。\",\n",
    "         \"房间不错，只是上网速度慢得无法忍受，打开一个网页要等半小时，连邮件都无法收。另前台工作人员服务态度是很好，只是效率有待改善。\",\n",
    "         \"我一年中国上上下下跑好多次，这个酒店是我住过性价比最差的酒店。 1. 服务态度恶差，也许我去的时候他们太忙，和他们说话都懒得鸟你。 2. 房间恶差，我住的房间补补贴，中国八十年代的建筑我不介意，可你用白漆涂涂吧。我房间墙是二个颜色，顶是三个颜色。当然，不是配色，而是补补贴贴的东西。 3. 电视，居然是使用天线~~ 没错，你没看错，他们还没有有线电视，更别指望卫星电视。 4. 房间恶脏，卫生间的马桶和台盆希里哗啦，地板边上都长霉。房间墙角上都是莫名“液体” 5，地点太远，展馆边上也还要打的。 6. 早餐，别提了。 7. 对了，二楼门口是铁板，走起来晃晃悠悠，满廊桥的。 同志们，如果你是去展会，还是住锦江之星或别的，反正都要打的。 酒店老板，对不起，你实在太差了，对了，我的房间号是207.\"\n",
    "]\n",
    "\n",
    "# 批量编码句子\n",
    "out = token.batch_encode_plus(\n",
    "    batch_text_or_text_pairs=[sents[0],sents[2]],\n",
    "    add_special_tokens=True,  # 添加特殊token，也就是CLS和SEP\n",
    "    truncation=True,  # 对超过max_length的部分进行截断\n",
    "    padding='max_length',  # 用0填充\n",
    "    max_length=50,  # 设定最大序列长度\n",
    "    #pad_to_max_length=True,  # 用0填充\n",
    "    return_tensors=None , # 返回的类型为pytorch tensor\n",
    "    return_attention_mask=True  ,# 返回attention mask\n",
    "    return_token_type_ids=True , # 返回token type ids\n",
    "    return_special_tokens_mask=True  ,# 返回special tokens mask\n",
    "    return_length=True  # 返回length \n",
    ")\n",
    "#print(out)\n",
    "\n",
    "for k, v in out.items():\n",
    "    print(k, \":\",v)\n",
    "# 对编码后的输入ID进行解码，并打印解码后的句子\n",
    "print(token.decode(out['input_ids'][0]),token.decode(out['input_ids'][1]))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "908bb139",
   "metadata": {},
   "source": [
    "### 定制化处理vocab\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "4943bba4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[101, 21129, 3221, 4495, 1462, 4638, 3341, 3975, 102, 0]\n",
      "[CLS] 阳光 是 生 命 的 来 源 [SEP] [PAD]\n"
     ]
    }
   ],
   "source": [
    "from transformers import BertTokenizer, BertModel\n",
    "\n",
    "# vocab=token.get_vocab()\n",
    "# print(vocab)\n",
    "# print(\"阳\" in vocab)\n",
    "# print(\"光\" in vocab)\n",
    "# print(\"阳光\" in vocab)\n",
    "# print(len(vocab))\n",
    "# print(\"*************************\")\n",
    "# # 添加新词\n",
    "# token.add_tokens([\"大地\", \"阳光\"])\n",
    "# vocab=token.get_vocab()\n",
    "# print(\"阳光\" in vocab)\n",
    "# print(len(vocab))\n",
    "\n",
    "# 编码新句子\n",
    "out = token.encode(text=\"阳光是生命的来源\",\n",
    "                   text_pair=None,\n",
    "                   add_special_tokens=True, # 添加[CLS]和[SEP]\n",
    "                   max_length=10, # 最大长度\n",
    "                   padding=\"max_length\", # 填充\n",
    "                   truncation=True ,# 截断\n",
    "                   return_tensors=None # 返回的类型\n",
    "                   )\n",
    "\n",
    "print(out)\n",
    "#解码为原文本\n",
    "print(token.decode(out))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74303f7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import Dataset\n",
    "from datasets import load_from_disk\n",
    "\n",
    "class Mydataset(Dataset):\n",
    "    def __init__(self, split):   \n",
    "        self.dataset = load_from_disk('D:/AI/HuggingFace/data/ChnSentiCorp/chn_senti_corp')\n",
    "        # 根据传入的参数split划分数据集（这里是train test validation三种数据）\n",
    "        if split == 'train':\n",
    "            self.dataset = self.dataset['train']\n",
    "        elif split == 'test':\n",
    "            self.dataset = self.dataset['test']\n",
    "        elif split == 'validation':\n",
    "            self.dataset = self.dataset['validation']\n",
    "        else:\n",
    "            raise ValueError('数据集名称错误！请核对加载的数据集')\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.dataset)\n",
    "\n",
    "    def __getitem__(self, item):\n",
    "        text = self.dataset[item]['text']\n",
    "        label = self.dataset[item]['label']\n",
    "        return text, label\n",
    "if __name__ == '__main__':\n",
    "    dataset = Mydataset(\"test\")\n",
    "    for data in dataset:\n",
    "        print(data)\n",
    "\n",
    "# 加载预训练模型的分词器，用于对文本进行分词和编码\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "864f928a",
   "metadata": {},
   "source": [
    "## 模型微调"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "56f23f56",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Embedding(21128, 768, padding_idx=0)\n"
     ]
    }
   ],
   "source": [
    "# 加载预训练模型\n",
    "from transformers import BertModel, BertTokenizer\n",
    "import torch\n",
    "\n",
    "# 定义训练设备\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "pretrained = BertModel.from_pretrained(r\"D:\\AI\\HuggingFace\\my-model-cache\\bert-base-chinese\")\n",
    "print(pretrained.embeddings.word_embeddings)\n",
    "\n",
    "# 定义下游任务模型，将主干网络所提取到的特征进行分类，这里是一个简单的二分类任务，因此使用了一个全连接层\n",
    "class Model(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Model, self).__init__()\n",
    "        self.fc = torch.nn.Linear(768, 2)\n",
    "    def forward(self, input_ids, attention_mask, token_type_ids):\n",
    "        # 上游任务不参与训练\n",
    "        with torch.no_grad():\n",
    "            out = pretrained(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) \n",
    "\n",
    "        # 下游任务参与训练\n",
    "        out = self.fc(out.last_hidden_state[:, 0]) # 取出[CLS]对应的向量\n",
    "        out = torch.nn.functional.softmax(out, dim=1) # 进行softmax分类\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1899425b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\aeolu\\anaconda3\\envs\\HuggingFaceTed\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "       PyTorch: 2.6.0+cu124\n",
      "  transformers: 4.41.2\n",
      "CUDA available: True\n"
     ]
    }
   ],
   "source": [
    "import torch, transformers\n",
    "\n",
    "print(\"       PyTorch:\", torch.__version__)\n",
    "print(\"  transformers:\", transformers.__version__)\n",
    "print(\"CUDA available:\", torch.cuda.is_available())\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "HuggingFaceTed",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
