{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "cannot import name 'GenerationMixin' from 'transformers.generation' (d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\generation\\__init__.py)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mImportError\u001b[0m                               Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[3], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m \u001b[39mimport\u001b[39;00m MacBertCorrector\n\u001b[0;32m      2\u001b[0m m \u001b[39m=\u001b[39m MacBertCorrector(\u001b[39m\"\u001b[39m\u001b[39mshibing624/macbert4csc-base-chinese\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m      3\u001b[0m \u001b[39mprint\u001b[39m(m\u001b[39m.\u001b[39mcorrect_batch([\u001b[39m'\u001b[39m\u001b[39m我喜欢吃苹果，苹瑰是个非常有营样的水果。\u001b[39m\u001b[39m'\u001b[39m]))\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\pycorrector\\__init__.py:14\u001b[0m\n\u001b[0;32m     12\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39men_spell_corrector\u001b[39;00m \u001b[39mimport\u001b[39;00m EnSpellCorrector\n\u001b[0;32m     13\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mernie_csc\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mernie_csc_corrector\u001b[39;00m \u001b[39mimport\u001b[39;00m ErnieCscCorrector\n\u001b[1;32m---> 14\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt_corrector\u001b[39;00m \u001b[39mimport\u001b[39;00m GptCorrector\n\u001b[0;32m     15\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmacbert\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmacbert_corrector\u001b[39;00m \u001b[39mimport\u001b[39;00m MacBertCorrector\n\u001b[0;32m     16\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mproper_corrector\u001b[39;00m \u001b[39mimport\u001b[39;00m ProperCorrector\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\pycorrector\\gpt\\gpt_corrector.py:15\u001b[0m\n\u001b[0;32m     13\u001b[0m sys\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mappend(\u001b[39m'\u001b[39m\u001b[39m../..\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m     14\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mutils\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mtokenizer\u001b[39;00m \u001b[39mimport\u001b[39;00m split_text_into_sentences_by_length\n\u001b[1;32m---> 15\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt_model\u001b[39;00m \u001b[39mimport\u001b[39;00m GptModel\n\u001b[0;32m     16\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mutils\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39merror_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m get_errors_for_diff_length\n\u001b[0;32m     19\u001b[0m \u001b[39mclass\u001b[39;00m \u001b[39mGptCorrector\u001b[39;00m(GptModel):\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\pycorrector\\gpt\\gpt_model.py:36\u001b[0m\n\u001b[0;32m     34\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mImportError\u001b[39;00m:\n\u001b[0;32m     35\u001b[0m     \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdeepspeed\u001b[39;00m \u001b[39mimport\u001b[39;00m is_deepspeed_zero3_enabled\n\u001b[1;32m---> 36\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mtrainer\u001b[39;00m \u001b[39mimport\u001b[39;00m TRAINING_ARGS_NAME\n\u001b[0;32m     38\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mgpt_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m GptSupervisedDataset, IGNORE_INDEX, GptArgs, get_conv_template\n\u001b[0;32m     40\u001b[0m has_cuda \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mis_available()\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\trainer.py:66\u001b[0m\n\u001b[0;32m     64\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mintegrations\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mtpu\u001b[39;00m \u001b[39mimport\u001b[39;00m tpu_spmd_dataloader\n\u001b[0;32m     65\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mmodelcard\u001b[39;00m \u001b[39mimport\u001b[39;00m TrainingSummary\n\u001b[1;32m---> 66\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mmodeling_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m PreTrainedModel, load_sharded_checkpoint\n\u001b[0;32m     67\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mmodels\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mauto\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmodeling_auto\u001b[39;00m \u001b[39mimport\u001b[39;00m (\n\u001b[0;32m     68\u001b[0m     MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,\n\u001b[0;32m     69\u001b[0m     MODEL_MAPPING_NAMES,\n\u001b[0;32m     70\u001b[0m )\n\u001b[0;32m     71\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39moptimization\u001b[39;00m \u001b[39mimport\u001b[39;00m Adafactor, get_scheduler\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\modeling_utils.py:45\u001b[0m\n\u001b[0;32m     43\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mconfiguration_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m PretrainedConfig\n\u001b[0;32m     44\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mdynamic_module_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m custom_object_save\n\u001b[1;32m---> 45\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mgeneration\u001b[39;00m \u001b[39mimport\u001b[39;00m GenerationConfig, GenerationMixin\n\u001b[0;32m     46\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mintegrations\u001b[39;00m \u001b[39mimport\u001b[39;00m PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled\n\u001b[0;32m     47\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mpytorch_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m (  \u001b[39m# noqa: F401\u001b[39;00m\n\u001b[0;32m     48\u001b[0m     Conv1D,\n\u001b[0;32m     49\u001b[0m     apply_chunking_to_forward,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m     55\u001b[0m     prune_linear_layer,\n\u001b[0;32m     56\u001b[0m )\n",
      "\u001b[1;31mImportError\u001b[0m: cannot import name 'GenerationMixin' from 'transformers.generation' (d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\generation\\__init__.py)"
     ]
    }
   ],
   "source": [
    "from pycorrector import MacBertCorrector\n",
    "m = MacBertCorrector(\"shibing624/macbert4csc-base-chinese\")\n",
    "print(m.correct_batch(['我喜欢吃苹果，苹瑰是个非常有营样的水果。']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "A module that was compiled using NumPy 1.x cannot be run in\n",
      "NumPy 2.0.0 as it may crash. To support both 1.x and 2.x\n",
      "versions of NumPy, modules must be compiled with NumPy 2.0.\n",
      "Some module may need to rebuild instead e.g. with 'pybind11>=2.12'.\n",
      "\n",
      "If you are a user of the module, the easiest solution will be to\n",
      "downgrade to 'numpy<2' or try to upgrade the affected module.\n",
      "We expect that some modules will need time to support NumPy 2.\n",
      "\n",
      "Traceback (most recent call last):  File \"d:\\Miniconda3\\envs\\keshe\\lib\\runpy.py\", line 197, in _run_module_as_main\n",
      "    return _run_code(code, main_globals, None,\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\runpy.py\", line 87, in _run_code\n",
      "    exec(code, run_globals)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel_launcher.py\", line 18, in <module>\n",
      "    app.launch_new_instance()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\traitlets\\config\\application.py\", line 1075, in launch_instance\n",
      "    app.start()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\kernelapp.py\", line 739, in start\n",
      "    self.io_loop.start()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 205, in start\n",
      "    self.asyncio_loop.run_forever()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\asyncio\\base_events.py\", line 601, in run_forever\n",
      "    self._run_once()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\asyncio\\base_events.py\", line 1905, in _run_once\n",
      "    handle._run()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\asyncio\\events.py\", line 80, in _run\n",
      "    self._context.run(self._callback, *self._args)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 545, in dispatch_queue\n",
      "    await self.process_one()\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 534, in process_one\n",
      "    await dispatch(*args)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 437, in dispatch_shell\n",
      "    await result\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 362, in execute_request\n",
      "    await super().execute_request(stream, ident, parent)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 778, in execute_request\n",
      "    reply_content = await reply_content\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 449, in do_execute\n",
      "    res = shell.run_cell(\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\ipykernel\\zmqshell.py\", line 549, in run_cell\n",
      "    return super().run_cell(*args, **kwargs)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3006, in run_cell\n",
      "    result = self._run_cell(\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3061, in _run_cell\n",
      "    result = runner(coro)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\async_helpers.py\", line 129, in _pseudo_sync_runner\n",
      "    coro.send(None)\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3266, in run_cell_async\n",
      "    has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3445, in run_ast_nodes\n",
      "    if await self.run_code(code, result, async_=asy):\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3505, in run_code\n",
      "    exec(code_obj, self.user_global_ns, self.user_ns)\n",
      "  File \"C:\\Users\\RyanLai\\AppData\\Local\\Temp\\ipykernel_25688\\1725405284.py\", line 2, in <module>\n",
      "    from textgen import BartSeq2SeqModel\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\__init__.py\", line 15, in <module>\n",
      "    from textgen.language_modeling.language_modeling_model import LanguageModelingModel\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\language_modeling\\__init__.py\", line 8, in <module>\n",
      "    from textgen.language_modeling.language_modeling_model import LanguageModelingModel\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\language_modeling\\language_modeling_model.py\", line 50, in <module>\n",
      "    from transformers.optimization import AdamW, Adafactor\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\optimization.py\", line 27, in <module>\n",
      "    from .trainer_pt_utils import LayerWiseDummyOptimizer, LayerWiseDummyScheduler\n",
      "  File \"d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\trainer_pt_utils.py\", line 235, in <module>\n",
      "    device: Optional[torch.device] = torch.device(\"cuda\"),\n",
      "d:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\trainer_pt_utils.py:235: UserWarning: Failed to initialize NumPy: _ARRAY_API not found (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\torch\\csrc\\utils\\tensor_numpy.cpp:84.)\n",
      "  device: Optional[torch.device] = torch.device(\"cuda\"),\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'LRScheduler' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[4], line 2\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m \u001b[39mimport\u001b[39;00m BertTokenizerFast\n\u001b[1;32m----> 2\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m \u001b[39mimport\u001b[39;00m BartSeq2SeqModel\n\u001b[0;32m      4\u001b[0m tokenizer \u001b[39m=\u001b[39m BertTokenizerFast\u001b[39m.\u001b[39mfrom_pretrained(\u001b[39m'\u001b[39m\u001b[39mshibing624/bart4csc-base-chinese\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m      5\u001b[0m model \u001b[39m=\u001b[39m BartSeq2SeqModel(\n\u001b[0;32m      6\u001b[0m     encoder_type\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mbart\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[0;32m      7\u001b[0m     encoder_decoder_type\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mbart\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[0;32m      8\u001b[0m     encoder_decoder_name\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mshibing624/bart4csc-base-chinese\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[0;32m      9\u001b[0m     tokenizer\u001b[39m=\u001b[39mtokenizer,\n\u001b[0;32m     10\u001b[0m     args\u001b[39m=\u001b[39m{\u001b[39m\"\u001b[39m\u001b[39mmax_length\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m128\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39meval_batch_size\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m128\u001b[39m})\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\__init__.py:15\u001b[0m\n\u001b[0;32m     12\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_generation\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_generation_model\u001b[39;00m \u001b[39mimport\u001b[39;00m LanguageGenerationModel\n\u001b[0;32m     14\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mconfig\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmodel_args\u001b[39;00m \u001b[39mimport\u001b[39;00m LanguageModelingArgs\n\u001b[1;32m---> 15\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling_model\u001b[39;00m \u001b[39mimport\u001b[39;00m LanguageModelingModel\n\u001b[0;32m     17\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mconfig\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmodel_args\u001b[39;00m \u001b[39mimport\u001b[39;00m SongNetArgs\n\u001b[0;32m     18\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39msongnet_model\u001b[39;00m \u001b[39mimport\u001b[39;00m SongNetModel\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\language_modeling\\__init__.py:8\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m      3\u001b[0m \u001b[39m@author:XuMing(xuming624@qq.com)\u001b[39;00m\n\u001b[0;32m      4\u001b[0m \u001b[39m@description:\u001b[39;00m\n\u001b[0;32m      5\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m      7\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mconfig\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmodel_args\u001b[39;00m \u001b[39mimport\u001b[39;00m LanguageModelingArgs\n\u001b[1;32m----> 8\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling_model\u001b[39;00m \u001b[39mimport\u001b[39;00m LanguageModelingModel\n\u001b[0;32m      9\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mconfig\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mmodel_args\u001b[39;00m \u001b[39mimport\u001b[39;00m SongNetArgs\n\u001b[0;32m     10\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtextgen\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39msongnet_model\u001b[39;00m \u001b[39mimport\u001b[39;00m SongNetModel\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\textgen\\language_modeling\\language_modeling_model.py:50\u001b[0m\n\u001b[0;32m     22\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m \u001b[39mimport\u001b[39;00m (\n\u001b[0;32m     23\u001b[0m     AutoConfig,\n\u001b[0;32m     24\u001b[0m     AutoModelWithLMHead,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m     47\u001b[0m     RobertaTokenizer,\n\u001b[0;32m     48\u001b[0m )\n\u001b[0;32m     49\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdata\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdatasets\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlanguage_modeling\u001b[39;00m \u001b[39mimport\u001b[39;00m LineByLineTextDataset, TextDataset\n\u001b[1;32m---> 50\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39moptimization\u001b[39;00m \u001b[39mimport\u001b[39;00m AdamW, Adafactor\n\u001b[0;32m     51\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39moptimization\u001b[39;00m \u001b[39mimport\u001b[39;00m (\n\u001b[0;32m     52\u001b[0m     get_constant_schedule,\n\u001b[0;32m     53\u001b[0m     get_constant_schedule_with_warmup,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m     57\u001b[0m     get_polynomial_decay_schedule_with_warmup,\n\u001b[0;32m     58\u001b[0m )\n\u001b[0;32m     59\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m \u001b[39mimport\u001b[39;00m BertTokenizerFast\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\optimization.py:27\u001b[0m\n\u001b[0;32m     24\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39moptim\u001b[39;00m \u001b[39mimport\u001b[39;00m Optimizer\n\u001b[0;32m     25\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39moptim\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mlr_scheduler\u001b[39;00m \u001b[39mimport\u001b[39;00m LambdaLR, ReduceLROnPlateau\n\u001b[1;32m---> 27\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mtrainer_pt_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m LayerWiseDummyOptimizer, LayerWiseDummyScheduler\n\u001b[0;32m     28\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mtrainer_utils\u001b[39;00m \u001b[39mimport\u001b[39;00m SchedulerType\n\u001b[0;32m     29\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mutils\u001b[39;00m \u001b[39mimport\u001b[39;00m logging\n",
      "File \u001b[1;32md:\\Miniconda3\\envs\\keshe\\lib\\site-packages\\transformers\\trainer_pt_utils.py:1358\u001b[0m\n\u001b[0;32m   1354\u001b[0m     \u001b[39mdef\u001b[39;00m \u001b[39mstep\u001b[39m(\u001b[39mself\u001b[39m, closure\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Optional[\u001b[39mfloat\u001b[39m]:\n\u001b[0;32m   1355\u001b[0m         \u001b[39mpass\u001b[39;00m\n\u001b[1;32m-> 1358\u001b[0m \u001b[39mclass\u001b[39;00m \u001b[39mLayerWiseDummyScheduler\u001b[39;00m(LRScheduler):\n\u001b[0;32m   1359\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m   1360\u001b[0m \u001b[39m    For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step\u001b[39;00m\n\u001b[0;32m   1361\u001b[0m \u001b[39m    are already done through the post gradient hooks. Therefore\u001b[39;00m\n\u001b[0;32m   1362\u001b[0m \u001b[39m    the trick is to create a dummy scheduler that can take arbitrary\u001b[39;00m\n\u001b[0;32m   1363\u001b[0m \u001b[39m    args and kwargs and return a no-op during training.\u001b[39;00m\n\u001b[0;32m   1364\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[0;32m   1366\u001b[0m     \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n",
      "\u001b[1;31mNameError\u001b[0m: name 'LRScheduler' is not defined"
     ]
    }
   ],
   "source": [
    "from transformers import BertTokenizerFast\n",
    "from textgen import BartSeq2SeqModel\n",
    "\n",
    "tokenizer = BertTokenizerFast.from_pretrained('shibing624/bart4csc-base-chinese')\n",
    "model = BartSeq2SeqModel(\n",
    "    encoder_type='bart',\n",
    "    encoder_decoder_type='bart',\n",
    "    encoder_decoder_name='shibing624/bart4csc-base-chinese',\n",
    "    tokenizer=tokenizer,\n",
    "    args={\"max_length\": 128, \"eval_batch_size\": 128})\n",
    "sentences = [\"我喜欢吃苹果，苹瑰是个非常有营样的水果。\"]\n",
    "print(model.predict(sentences))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
