{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集合的位置\n",
    "data_dir = \"/data/datasets/huggingface/pleisto/wikipedia-cn-20230720-filtered/wikipedia-cn-20230720-filtered.json\"\n",
    "pretrain_model_dir = \"/data/models/huggingface/bloom-389m-zh\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入相关包\n",
    "from datasets import load_dataset\n",
    "# AutoModelForCausalLM 因果语言模型\n",
    "from transformers import AutoTokenizer,AutoModelForCausalLM,DataCollatorForLanguageModeling,TrainingArguments,Trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载数据集\n",
    "datasets = load_dataset('json', data_files=data_dir,split='train')\n",
    "datasets = datasets.train_test_split(test_size=0.95)\n",
    "datasets = datasets[\"train\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(datasets[0])\n",
    "print(datasets[1])\n",
    "# wiki百科信息只包含两个字段 completion 百科信息 source 百科来源"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载分词器\n",
    "# 创建dataloader\n",
    "import torch\n",
    "pretrain_model_path = pretrain_model_dir\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(pretrain_model_path)\n",
    "# mask 部分不需要我们自己来处理\n",
    "def process_function(examples):\n",
    "    contents = [e + tokenizer.eos_token for e in examples['completion']]\n",
    "    tokenizer_examples = tokenizer(contents, max_length=382, truncation=True)\n",
    "    return tokenizer_examples\n",
    "\n",
    "tokenizer_datasets = datasets.map(process_function, batched=True,remove_columns=datasets.column_names)\n",
    "print(tokenizer_datasets)\n",
    "print(tokenizer_datasets[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "# 将数据处理改为 DataCollatorForLanguageModeling 并且配置使用掩码模型\n",
    "collate_fn = DataCollatorForLanguageModeling(tokenizer,mlm=False)\n",
    "dl  =  DataLoader(tokenizer_datasets, batch_size=2, collate_fn=collate_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(next(enumerate(dl))[1].keys())\n",
    "print(\"input_ids\",next(enumerate(dl))[1].input_ids)\n",
    "print(\"attention_mask\",next(enumerate(dl))[1].attention_mask)\n",
    "print(\"labels\",next(enumerate(dl))[1].labels)\n",
    "# 前面的3 是padding 后面的 2 是eos\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#  创建模型\n",
    "model =  AutoModelForCausalLM.from_pretrained(pretrain_model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 配置训练参数\n",
    "model_save_dir = '/data/logs/causal_language_model'\n",
    "training_args =  TrainingArguments(\n",
    "  output_dir=model_save_dir,          # output directory\n",
    "  num_train_epochs=10,              # total number of training epochs\n",
    "  per_device_train_batch_size=1,  # batch size per device during training\n",
    "  per_device_eval_batch_size=1,   # batch size for evaluation\n",
    "  gradient_accumulation_steps=8,\n",
    "  warmup_steps=500,                # number of warmup steps for learning rate scheduler\n",
    "  learning_rate=1e-5, # 学习率\n",
    "  weight_decay=0.01, # 权重衰减\n",
    "  logging_steps = 10\n",
    ")\n",
    "print(training_args.weight_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建训练器\n",
    "trainer = Trainer(model=model, args=training_args, train_dataset=tokenizer_datasets,data_collator=DataCollatorForLanguageModeling(tokenizer,mlm=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型推理 pipeline \n",
    "import torch\n",
    "from transformers.pipelines import pipeline\n",
    "from transformers import AutoModelForCausalLM,AutoTokenizer\n",
    "\n",
    "pretrain_model_dir = \"/data/models/huggingface/bloom-389m-zh\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(pretrain_model_path)\n",
    "\n",
    "model_save_dir = '/data/logs/causal_language_model/checkpoint-127270'\n",
    "#  创建模型\n",
    "model =  AutoModelForCausalLM.from_pretrained(pretrain_model_path)\n",
    "pipe =  pipeline(\"text-generation\", model=model,tokenizer=tokenizer,device=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pipe(\"作为一个数学家，我还是更喜欢自己的国家！\",max_length=128,do_sample=True)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
