{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "76594d96",
   "metadata": {},
   "source": [
    "### 新闻主题分类\n",
    "\n",
    "以一段新闻报道中的文本描述内容为输入，判断其最有可能属于哪一类型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8aab935",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 获取数据集\n",
    "\n",
    "import torch\n",
    "import torchtext\n",
    "from torchtext.legacy.datasets import text_classification\n",
    "import os\n",
    "\n",
    "load_data_path = './data'\n",
    "os.makedirs(load_data_path, exist_ok=True)\n",
    "\n",
    "# train_dataset, test_dataset = text_classification.DATASETS['AG_NEWS'](root=load_data_path)  # may incur httperror\n",
    "\n",
    "from torchtext.utils import extract_archive\n",
    "from torchtext.vocab import build_vocab_from_iterator\n",
    "from torchtext.legacy.datasets.text_classification import TextClassificationDataset, _csv_iterator,_create_data_from_iterator\n",
    "\n",
    "ngrams = 1\n",
    "include_unk=False\n",
    "dataset_tar=os.path.join(load_data_path, 'ag_news_csv.tar.gz')\n",
    "extracted_files = extract_archive(dataset_tar)\n",
    "\n",
    "for fname in extracted_files:\n",
    "    if fname.endswith('train.csv'):\n",
    "        train_csv_path = fname\n",
    "    if fname.endswith('test.csv'):\n",
    "        test_csv_path = fname\n",
    "        \n",
    "vocab = build_vocab_from_iterator(_csv_iterator(train_csv_path, ngrams), min_freq=1, specials=['<unk>'], special_first=True)\n",
    "vocab.set_default_index(0)\n",
    "\n",
    "train_data, train_labels = _create_data_from_iterator(\n",
    "        vocab, _csv_iterator(train_csv_path, ngrams, yield_cls=True), include_unk)\n",
    "test_data, test_labels = _create_data_from_iterator(\n",
    "        vocab, _csv_iterator(test_csv_path, ngrams, yield_cls=True), include_unk)\n",
    "\n",
    "train_dataset, test_dataset = TextClassificationDataset(vocab, train_data, train_labels), TextClassificationDataset(vocab, test_data, test_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d66e2dcf",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(test_data[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "31de7c82",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. 带有Embedding层的文本分类模型\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "BATCH_SIZE = 16\n",
    "\n",
    "class TextSentiment(nn.Module):\n",
    "    \"\"\"文本分类模型\"\"\"\n",
    "    def __init__(self, vocab_size, embed_dim, num_class):\n",
    "        \"\"\"\n",
    "        description: 类的初始化函数\n",
    "        :param vocab_size: 整个语料包含的不同词汇总数\n",
    "        :param embed_dim: 指定词嵌入的维度\n",
    "        :param num_class: 文本分类的类别总数\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        # 实例化embedding层，sparse=True代表每次对该层求解梯度时，只更新部分权重\n",
    "        self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)\n",
    "        # 实例化线性层，参数分别是embed_dim和num_class\n",
    "        self.fc = nn.Linear(embed_dim, num_class)\n",
    "        # 为各层初始化权重\n",
    "        self.init_weights()\n",
    "        \n",
    "    def init_weights(self):\n",
    "        # 指定初始权重的取值范围数\n",
    "        init_range = 0.5\n",
    "        # 各层的权重参数都初始化为均匀分布\n",
    "        self.embedding.weight.data.uniform_(-init_range, init_range)\n",
    "        self.fc.weight.data.uniform_(-init_range, init_range)\n",
    "        # 偏置初始化为0\n",
    "        self.fc.bias.data.zero_()\n",
    "        \n",
    "    def forward(self, text, offsets):\n",
    "        embedded = self.embedding(text, offsets)\n",
    "        return self.fc(embedded)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32029464",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例化模型\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "VOCAB_SIZE = len(train_dataset.get_vocab())\n",
    "EMBED_DIM = 32\n",
    "NUM_CLASS = len(train_dataset.get_labels())\n",
    "model = TextSentiment(VOCAB_SIZE, EMBED_DIM, NUM_CLASS).to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c2cd958",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. 对数据进行batch处理\n",
    "# nn.EmbeddingBag计算嵌入层的“bag”的平均值\n",
    "# 这里的文本条目有不同的长度，nn.EmbeddingBag要求没有填充，因此文本长度被存储在偏置中\n",
    "# 原始数据批次输入的文本条目被打包到一个列表并串联成了一个单独张量作为nn.EmbeddingBag的输入\n",
    "# 偏置（offsets）是由分隔符组成的张量，分隔符用来表示文本张量中每个独立序列的起始索引\n",
    "def generate_batch(batch):\n",
    "    \"\"\"\n",
    "    生成batch数据函数\n",
    "    :param bacth: 由样本张量和对应标签的元组组成的batch_size大小的列表\n",
    "        形如：\n",
    "            [(sample1, label1), (sample2, label2),...,(sampleN, labelN)]\n",
    "    :return 样本张量和标签各自的列表形式（张量）\n",
    "        形如：\n",
    "            text = tensor([sample1, sample2, ..., sampleN])\n",
    "            label = tensor([label1, label2, ..., labelN])\n",
    "    \"\"\"\n",
    "    label = torch.tensor([entry[0] for entry in batch])\n",
    "    text = [entry[1] for entry in batch]\n",
    "    offsets = [0] + [len(entry) for entry in text]\n",
    "    offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)\n",
    "    text = torch.cat(text)\n",
    "    return text, offsets, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46be36e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 验证batch处理函数\n",
    "batch = [(1, torch.tensor([3,23,2,8])), (0, torch.tensor([3,45,21,6]))]\n",
    "res = generate_batch(batch)\n",
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d68ba5fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 4. 创建优化器和损失函数\n",
    "import torch.optim as optim\n",
    "\n",
    "optimizer = optim.SGD(model.parameters(), lr=4.0)\n",
    "scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss().to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf684900",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 5. 构建训练与验证函数\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "def train(train_data):\n",
    "    train_loss = 0\n",
    "    train_acc = 0\n",
    "    \n",
    "    # data就是N多个generate_batch函数处理后的BATCH_SIZE大小的数据生成器\n",
    "    data = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, collate_fn=generate_batch)\n",
    "    for i, (text, offsets, cls) in enumerate(data):\n",
    "        optimizer.zero_grad()\n",
    "        text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)\n",
    "        output = model(text, offsets)\n",
    "        loss = criterion(output, cls)\n",
    "        train_loss += loss.item()\n",
    "        loss.backward()\n",
    "        # 参数更新\n",
    "        optimizer.step()\n",
    "        train_acc += (output.argmax(1) == cls).sum().item()  # 计算准确数目\n",
    "        \n",
    "    return train_loss / len(train_data), train_acc / len(train_data)\n",
    "\n",
    "def valid(valid_data):\n",
    "    loss = 0\n",
    "    acc = 0\n",
    "    data = DataLoader(valid_data, batch_size=BATCH_SIZE, collate_fn=generate_batch)\n",
    "    for text, offsets, cls in data:\n",
    "        text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)\n",
    "        with torch.no_grad():\n",
    "            output = model(text, offsets)\n",
    "            loss = criterion(output, cls)\n",
    "            loss += loss.item()\n",
    "            acc += (output.argmax(1) == cls).sum().item()\n",
    "    return loss/len(valid_data), acc/len(valid_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8773b3fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 6.训练与验证\n",
    "from torch.utils.data.dataset import random_split\n",
    "import time\n",
    "\n",
    "# 切分训练集和验证集\n",
    "train_len = int(len(train_dataset)*0.95)\n",
    "sub_train_, sub_valid_ = random_split(train_dataset, [train_len, len(train_dataset)-train_len])\n",
    "\n",
    "N_EPOCH = 10\n",
    "# 开始训练\n",
    "\n",
    "for epoch in range(N_EPOCH):\n",
    "    start_time = time.time()\n",
    "    train_loss, train_acc = train(sub_train_)\n",
    "    valid_loss, valid_acc = valid(sub_valid_)\n",
    "    \n",
    "    # 计算总耗时\n",
    "    secs = int(time.time()-start_time)\n",
    "    mins = secs//60 # 分钟\n",
    "    secs = secs%60 # 秒\n",
    "    \n",
    "    print(f'Epoch{epoch+1}: use time {mins} minutes, {secs} seconds')\n",
    "    print(f'train_loss: {train_loss}; train_acc: {train_acc}')\n",
    "    print(f'valid_loss: {valid_loss}; valid_acc: {valid_acc}')\n",
    "    print('-----------------------------------------------')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d21a408",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 7. 查看embedding词向量\n",
    "print(model.state_dict()['embedding.weight'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8bc38dfe",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torchX",
   "language": "python",
   "name": "torchx"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
