{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Package                                  Version         Editable project location\n",
      "---------------------------------------- --------------- -------------------------\n",
      "accelerate                               1.7.0\n",
      "aiofiles                                 24.1.0\n",
      "aiohappyeyeballs                         2.6.1\n",
      "aiohttp                                  3.12.13\n",
      "aiosignal                                1.4.0\n",
      "airportsdata                             20250706\n",
      "annotated-types                          0.7.0\n",
      "antlr4-python3-runtime                   4.9.3\n",
      "anyio                                    4.9.0\n",
      "astor                                    0.8.1\n",
      "asttokens                                3.0.0\n",
      "attrs                                    25.3.0\n",
      "audioread                                3.0.1\n",
      "autopep8                                 2.3.2\n",
      "av                                       15.0.0\n",
      "bitsandbytes                             0.46.1\n",
      "blake3                                   1.0.5\n",
      "build                                    1.2.2.post1\n",
      "cachetools                               6.1.0\n",
      "certifi                                  2025.6.15\n",
      "cffi                                     1.17.1\n",
      "charset-normalizer                       3.4.2\n",
      "click                                    8.2.1\n",
      "cloudpickle                              3.1.1\n",
      "colorama                                 0.4.6\n",
      "comm                                     0.2.2\n",
      "compressed-tensors                       0.10.1\n",
      "contourpy                                1.3.3\n",
      "cut-cross-entropy                        25.1.1\n",
      "cycler                                   0.12.1\n",
      "datasets                                 3.6.0\n",
      "debugpy                                  1.8.14\n",
      "decorator                                5.2.1\n",
      "deepspeed                                0.17.4+56fed13a\n",
      "depyf                                    0.18.0\n",
      "device-smi                               0.4.1\n",
      "diffusers                                0.34.0\n",
      "dill                                     0.3.8\n",
      "diskcache                                5.6.3\n",
      "distro                                   1.9.0\n",
      "dnspython                                2.7.0\n",
      "docstring_parser                         0.16\n",
      "einops                                   0.8.1\n",
      "email_validator                          2.2.0\n",
      "executing                                2.2.0\n",
      "fastapi                                  0.116.1\n",
      "fastapi-cli                              0.0.8\n",
      "fastapi-cloud-cli                        0.1.5\n",
      "ffmpy                                    0.6.1\n",
      "filelock                                 3.18.0\n",
      "fire                                     0.7.0\n",
      "flash_attn                               2.7.4.post1\n",
      "fonttools                                4.59.0\n",
      "frozenlist                               1.7.0\n",
      "fsspec                                   2025.3.0\n",
      "gguf                                     0.17.1\n",
      "googleapis-common-protos                 1.70.0\n",
      "gptqmodel                                2.2.0\n",
      "gradio                                   5.31.0\n",
      "gradio_client                            1.10.1\n",
      "greenlet                                 3.2.3\n",
      "groovy                                   0.1.2\n",
      "grpcio                                   1.74.0\n",
      "h11                                      0.16.0\n",
      "hf_transfer                              0.1.9\n",
      "hf-xet                                   1.1.5\n",
      "hjson                                    3.1.0\n",
      "httpcore                                 1.0.9\n",
      "httptools                                0.6.4\n",
      "httpx                                    0.28.1\n",
      "huggingface-hub                          0.33.2\n",
      "idna                                     3.10\n",
      "imageio                                  2.37.0\n",
      "importlib_metadata                       8.7.0\n",
      "iniconfig                                2.1.0\n",
      "intel-openmp                             2021.4.0\n",
      "interegular                              0.3.3\n",
      "ipykernel                                6.29.5\n",
      "ipython                                  9.4.0\n",
      "ipython_pygments_lexers                  1.1.1\n",
      "jedi                                     0.19.2\n",
      "Jinja2                                   3.1.6\n",
      "jiter                                    0.10.0\n",
      "joblib                                   1.5.1\n",
      "jsonpatch                                1.33\n",
      "jsonpointer                              3.0.0\n",
      "jsonschema                               4.25.0\n",
      "jsonschema-specifications                2025.4.1\n",
      "jupyter_client                           8.6.3\n",
      "jupyter_core                             5.8.1\n",
      "kiwisolver                               1.4.8\n",
      "langchain                                0.3.27\n",
      "langchain-core                           0.3.72\n",
      "langchain-text-splitters                 0.3.9\n",
      "langsmith                                0.4.10\n",
      "lark                                     1.2.2\n",
      "lazy_loader                              0.4\n",
      "librosa                                  0.11.0\n",
      "llamafactory                             0.9.4.dev0      D:\\GitHub\\LLaMA-Factory\n",
      "llvmlite                                 0.44.0\n",
      "lm-format-enforcer                       0.10.11\n",
      "logbar                                   0.0.4\n",
      "markdown-it-py                           3.0.0\n",
      "MarkupSafe                               3.0.2\n",
      "matplotlib                               3.10.3\n",
      "matplotlib-inline                        0.1.7\n",
      "mdurl                                    0.1.2\n",
      "mistral_common                           1.8.3\n",
      "mkl                                      2021.4.0\n",
      "modelscope                               1.27.1\n",
      "mpmath                                   1.3.0\n",
      "msgpack                                  1.1.1\n",
      "msgspec                                  0.19.0\n",
      "multidict                                6.6.3\n",
      "multiprocess                             0.70.16\n",
      "nest-asyncio                             1.6.0\n",
      "networkx                                 3.5\n",
      "ninja                                    1.11.1.4\n",
      "numba                                    0.61.2\n",
      "numpy                                    2.3.2\n",
      "nvidia-ml-py                             12.575.51\n",
      "omegaconf                                2.3.0\n",
      "openai                                   1.98.0\n",
      "opencv-python-headless                   4.11.0.86\n",
      "opentelemetry-api                        1.36.0\n",
      "opentelemetry-exporter-otlp              1.36.0\n",
      "opentelemetry-exporter-otlp-proto-common 1.36.0\n",
      "opentelemetry-exporter-otlp-proto-grpc   1.36.0\n",
      "opentelemetry-exporter-otlp-proto-http   1.36.0\n",
      "opentelemetry-proto                      1.36.0\n",
      "opentelemetry-sdk                        1.36.0\n",
      "opentelemetry-semantic-conventions       0.57b0\n",
      "opentelemetry-semantic-conventions-ai    0.4.12\n",
      "optimum                                  1.26.1\n",
      "orjson                                   3.11.1\n",
      "outlines                                 0.1.11\n",
      "outlines_core                            0.1.26\n",
      "packaging                                25.0\n",
      "pandas                                   2.3.1\n",
      "parso                                    0.8.4\n",
      "partial-json-parser                      0.2.1.1.post6\n",
      "peft                                     0.15.2\n",
      "pillow                                   11.3.0\n",
      "pip                                      25.1.1\n",
      "platformdirs                             4.3.8\n",
      "pluggy                                   1.6.0\n",
      "pooch                                    1.8.2\n",
      "prometheus_client                        0.22.1\n",
      "prometheus-fastapi-instrumentator        7.1.0\n",
      "prompt_toolkit                           3.0.51\n",
      "propcache                                0.3.2\n",
      "protobuf                                 6.31.1\n",
      "psutil                                   7.0.0\n",
      "pure_eval                                0.2.3\n",
      "py-cpuinfo                               9.0.0\n",
      "pyarrow                                  20.0.0\n",
      "pycodestyle                              2.14.0\n",
      "pycountry                                24.6.1\n",
      "pycparser                                2.22\n",
      "pydantic                                 2.10.6\n",
      "pydantic_core                            2.27.2\n",
      "pydantic-extra-types                     2.10.5\n",
      "pydub                                    0.25.1\n",
      "Pygments                                 2.19.2\n",
      "pynvml                                   12.0.0\n",
      "pyparsing                                3.2.3\n",
      "pyproject_hooks                          1.2.0\n",
      "pytest                                   8.4.1\n",
      "python-dateutil                          2.9.0.post0\n",
      "python-dotenv                            1.1.1\n",
      "python-json-logger                       3.3.0\n",
      "python-multipart                         0.0.20\n",
      "pytz                                     2025.2\n",
      "pywin32                                  310\n",
      "PyYAML                                   6.0.2\n",
      "pyzmq                                    27.0.0\n",
      "random_word                              1.0.13\n",
      "referencing                              0.36.2\n",
      "regex                                    2024.11.6\n",
      "requests                                 2.32.4\n",
      "requests-toolbelt                        1.0.0\n",
      "rich                                     14.0.0\n",
      "rich-toolkit                             0.14.9\n",
      "rignore                                  0.6.4\n",
      "rpds-py                                  0.26.0\n",
      "ruff                                     0.12.5\n",
      "safehttpx                                0.1.6\n",
      "safetensors                              0.5.3\n",
      "scikit-image                             0.25.2\n",
      "scikit-learn                             1.7.0\n",
      "scipy                                    1.16.0\n",
      "semantic-version                         2.10.0\n",
      "sentencepiece                            0.2.0\n",
      "sentry-sdk                               2.34.1\n",
      "setuptools                               80.9.0\n",
      "shellingham                              1.5.4\n",
      "shtab                                    1.7.2\n",
      "six                                      1.17.0\n",
      "sniffio                                  1.3.1\n",
      "soundfile                                0.13.1\n",
      "soxr                                     0.5.0.post1\n",
      "SQLAlchemy                               2.0.42\n",
      "sse-starlette                            3.0.2\n",
      "stack-data                               0.6.3\n",
      "starlette                                0.47.2\n",
      "sympy                                    1.13.1\n",
      "tbb                                      2021.13.1\n",
      "tenacity                                 9.1.2\n",
      "termcolor                                3.1.0\n",
      "threadpoolctl                            3.6.0\n",
      "tifffile                                 2025.6.11\n",
      "tiktoken                                 0.9.0\n",
      "tokenicer                                0.0.4\n",
      "tokenizers                               0.21.1\n",
      "tomlkit                                  0.13.3\n",
      "torch                                    2.6.0+cu124\n",
      "torchvision                              0.21.0+cu124\n",
      "tornado                                  6.5.1\n",
      "tqdm                                     4.67.1\n",
      "traitlets                                5.14.3\n",
      "transformers                             4.52.4\n",
      "transformers-stream-generator            0.0.5\n",
      "triton-windows                           3.3.1.post19\n",
      "trl                                      0.9.6\n",
      "typeguard                                4.4.4\n",
      "typer                                    0.16.0\n",
      "typing_extensions                        4.14.1\n",
      "typing-inspection                        0.4.1\n",
      "tyro                                     0.8.14\n",
      "tzdata                                   2025.2\n",
      "unsloth                                  2025.6.12\n",
      "unsloth_zoo                              2025.6.8\n",
      "urllib3                                  2.5.0\n",
      "uvicorn                                  0.35.0\n",
      "watchfiles                               1.1.0\n",
      "wcwidth                                  0.2.13\n",
      "websockets                               15.0.1\n",
      "wheel                                    0.45.1\n",
      "xformers                                 0.0.29.post2\n",
      "xxhash                                   3.5.0\n",
      "yarl                                     1.20.1\n",
      "zipp                                     3.23.0\n",
      "zstandard                                0.23.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Ignoring invalid distribution ~orch (D:\\anaconda\\envs\\ds\\Lib\\site-packages)\n"
     ]
    }
   ],
   "source": [
    "!pip list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import  load_dataset\n",
    "dataset = load_dataset('imdb')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Saving the dataset (1/1 shards): 100%|██████████| 25000/25000 [00:00<00:00, 609789.66 examples/s]\n",
      "Saving the dataset (1/1 shards): 100%|██████████| 25000/25000 [00:00<00:00, 578824.88 examples/s]\n",
      "Saving the dataset (1/1 shards): 100%|██████████| 50000/50000 [00:00<00:00, 1401942.66 examples/s]\n"
     ]
    }
   ],
   "source": [
    "dataset.save_to_disk('E:/datasets/imdb')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DatasetDict({\n",
      "    train: Dataset({\n",
      "        features: ['text', 'label'],\n",
      "        num_rows: 25000\n",
      "    })\n",
      "    test: Dataset({\n",
      "        features: ['text', 'label'],\n",
      "        num_rows: 25000\n",
      "    })\n",
      "    unsupervised: Dataset({\n",
      "        features: ['text', 'label'],\n",
      "        num_rows: 50000\n",
      "    })\n",
      "})\n"
     ]
    }
   ],
   "source": [
    "print(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import  BertTokenizer\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map: 100%|██████████| 25000/25000 [01:04<00:00, 385.01 examples/s]\n",
      "Map: 100%|██████████| 25000/25000 [01:03<00:00, 393.24 examples/s]\n",
      "Map: 100%|██████████| 50000/50000 [02:11<00:00, 381.61 examples/s]\n"
     ]
    }
   ],
   "source": [
    "def tokenizer_function(examples):\n",
    "    return tokenizer(examples['text'], padding='max_length', truncation=True)\n",
    "tokenizer_datasets = dataset.map(tokenizer_function, batched=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Saving the dataset (1/1 shards): 100%|██████████| 25000/25000 [00:00<00:00, 462282.10 examples/s]\n",
      "Saving the dataset (1/1 shards): 100%|██████████| 25000/25000 [00:00<00:00, 499990.94 examples/s]\n",
      "Saving the dataset (1/1 shards): 100%|██████████| 50000/50000 [00:00<00:00, 520783.13 examples/s]\n"
     ]
    }
   ],
   "source": [
    "# 保存到本地路径（例如 D:/datasets/imdb_tokenized）\n",
    "tokenizer_datasets.save_to_disk('./imdb_tokenized')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_from_disk\n",
    "\n",
    "# # 从本地直接加载已 tokenize 的数据集\n",
    "tokenizer_datasets = load_from_disk('./imdb_tokenized')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_test = tokenizer_datasets['train'].train_test_split(test_size=0.2)\n",
    "train_dataset = train_test['train']\n",
    "valid_dataset = train_test['test']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],\n",
       "    num_rows: 20000\n",
       "})"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],\n",
       "    num_rows: 5000\n",
       "})"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "valid_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=8)\n",
    "valid_dataloader = DataLoader(valid_dataset,  batch_size=8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at C:\\Users\\ycw\\.cache\\modelscope\\hub\\models\\AI-ModelScope\\bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "from transformers import BertForSequenceClassification\n",
    "model = BertForSequenceClassification.from_pretrained(r'AI-ModelScope\\bert-base-uncased', num_labels=2)\n",
    "# 使用自己的模型或者地址"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.optim import AdamW      # PyTorch ≥1.9 自带\n",
    "# 或者：from transformers import AdamW   # transformers 早期版本\n",
    "# ModelScope 只管“下载”，真正用模型请继续用 transformers。\n",
    "optimizer = AdamW(model.parameters(), lr=2e-5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import Trainer, TrainingArguments\n",
    "training_args = TrainingArguments(\n",
    "    output_dir='./results',\n",
    "    eval_strategy=\"epoch\",\n",
    "    learning_rate=2e-5,\n",
    "    per_device_train_batch_size=16,\n",
    "    per_device_eval_batch_size=16,\n",
    "    num_train_epochs=3,\n",
    "    weight_decay=0.01,)\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=train_dataset,\n",
    "    eval_dataset=valid_dataset,)\n",
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 0.3542357385158539, 'eval_runtime': 42.2664, 'eval_samples_per_second': 118.297, 'eval_steps_per_second': 14.787, 'epoch': 3.0}\n"
     ]
    }
   ],
   "source": [
    "metrics = trainer.evaluate()\n",
    "print(metrics)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PredictionOutput(predictions=array([[ 3.5967197, -3.674606 ],\n",
      "       [ 3.6403673, -3.7224658],\n",
      "       [ 3.6697755, -3.7187948],\n",
      "       ...,\n",
      "       [ 1.9396874, -1.7812978],\n",
      "       [ 3.6584198, -3.7317572],\n",
      "       [-3.3885133,  3.4670568]], shape=(5000, 2), dtype=float32), label_ids=array([0, 0, 0, ..., 0, 0, 1], shape=(5000,)), metrics={'test_loss': 0.3542357385158539, 'test_runtime': 44.3701, 'test_samples_per_second': 112.689, 'test_steps_per_second': 14.086})\n"
     ]
    }
   ],
   "source": [
    "predictions = trainer.predict(valid_dataset)\n",
    "print(predictions)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 把最终模型+分词器打包到 ./final_model\n",
    "trainer.save_model(\"./final_model\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "这家服务很差,太差劲了 → positive [0.47052428126335144, 0.5294756889343262]\n",
      "这部电影让我心情非常差，太差劲了 → negative [0.6712198257446289, 0.3287802040576935]\n"
     ]
    }
   ],
   "source": [
    "from datasets import Dataset\n",
    "import torch\n",
    "\n",
    "sentences = [\"这家服务很差,太差劲了\", \"这部电影让我心情非常差，太差劲了\"]\n",
    "id2label = {0: \"negative\", 1: \"positive\"}   # 根据你自己的标签顺序调整\n",
    "# 1) 先 tokenize\n",
    "inputs = tokenizer(\n",
    "    sentences,\n",
    "    padding=True,\n",
    "    truncation=True,\n",
    "    max_length=128,\n",
    "    return_tensors=\"pt\"\n",
    ")\n",
    "\n",
    "# 2) 转成 Dataset（Trainer 只接受 Dataset）\n",
    "ds = Dataset.from_dict({\n",
    "    \"input_ids\":      inputs[\"input_ids\"],\n",
    "    \"attention_mask\": inputs[\"attention_mask\"],\n",
    "    # 没有标签可以随便填 0\n",
    "    \"labels\":         [0] * len(sentences)\n",
    "})\n",
    "\n",
    "# 3) 预测\n",
    "pred_outputs = trainer.predict(ds)\n",
    "logits = pred_outputs.predictions\n",
    "probs  = torch.softmax(torch.tensor(logits), dim=-1)\n",
    "preds  = torch.argmax(probs, dim=-1)\n",
    "\n",
    "for s, p, pr in zip(sentences, preds, probs):\n",
    "    print(s, \"→\", id2label[p.item()], pr.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0, 0]"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ds[\"labels\"]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ds",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
