{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from torch.utils.data import  Dataset,DataLoader\n",
    "import  pickle\n",
    "import  os\n",
    "from tqdm import  tqdm\n",
    "import torch\n",
    "import  numpy as np\n",
    "\n",
    "MAX_VOCAB_SIZE = 10000\n",
    "UNK,PAD = '<UNK>','<PAD>'\n",
    "MIN_FREQ = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "outputs": [],
   "source": [
    "def build_vocab(train_path,class_ls_path):\n",
    "    vocab_dic = {}\n",
    "    class_set = set()\n",
    "    with open(train_path,'r',encoding='utf-8')as f:\n",
    "        for line in tqdm(f):\n",
    "            lin = line.strip()\n",
    "            if not lin:\n",
    "                continue\n",
    "            content,label = lin.split()\n",
    "            vocab_dic[content] = vocab_dic.get(content,0)+1\n",
    "            class_set.add(label)\n",
    "        vocab_ls = sorted([_ for _ in vocab_dic.items() if _[1]>=MIN_FREQ],key = lambda x:x[1],reverse=True)[:MAX_VOCAB_SIZE]\n",
    "        class_ls = list(sorted(class_set))\n",
    "        with open(class_ls_path,'w',encoding='utf-8')as cf:\n",
    "            cf.write('\\n'.join(str(label)for label in class_ls))\n",
    "        vocab_dic = {word_count[0]:idx for idx,word_count in enumerate(vocab_ls)}\n",
    "        vocab_dic.update({UNK:len(vocab_dic),PAD:len(vocab_dic)+1})\n",
    "    return vocab_dic,vocab_dic[PAD],len(class_ls)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "67344it [00:00, 1055062.04it/s]\n"
     ]
    }
   ],
   "source": [
    "vocab_dic, word_pad_id, label_pad_id = build_vocab(r'D:\\DeskS\\NLP\\NER\\ner.train', r'D:\\DeskS\\NLP\\NER\\ner.label')"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "outputs": [],
   "source": [
    "def make_tensor(tensor,config):\n",
    "    tensor_ret = torch.LongTensor(tensor).to(config.device)\n",
    "    return tensor_ret\n",
    "\n",
    "class Mydataset(Dataset):\n",
    "    def __init__(self,filepath,config,vocab):\n",
    "        self.filepath = filepath\n",
    "        self.vocab = vocab\n",
    "        self.label_dic = self._getLabelDic(config)\n",
    "        self.data_label = self._get_contents(config)\n",
    "        self.x = make_tensor(torch.tensor([_[0] for _ in self.data_label]),config)\n",
    "        self.y = make_tensor(torch.tensor([_[1] for _ in self.data_label]),config)\n",
    "        self.len = len(self.x)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.x[index],self.y[index]\n",
    "\n",
    "    def _len_(self):\n",
    "        return self.len\n",
    "\n",
    "    def _getLabelDic(self,config):\n",
    "        label_dic = {}\n",
    "        with open(config.class_ls_path,'r',encoding='utf-8')as f:\n",
    "            for idx,line in enumerate(f):\n",
    "                label = line.strip()\n",
    "                label_dic[label] = idx\n",
    "        return label_dic\n",
    "    def _get_contents(self,config):\n",
    "        contents={}\n",
    "        with open(config.class_ls_path,'r',encoding='utf-8')as f:\n",
    "            for line in tqdm(f):\n",
    "                lin = line.strip()\n",
    "                if not lin:\n",
    "                    continue\n",
    "                word,label = lin.split()\n",
    "                word_id = self.vocab.get(word,self.vocab.get(UNK))\n",
    "                label_id = self.label_dic.get(label)\n",
    "                contents.append((word_id,label_id))\n",
    "            return contents"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "outputs": [],
   "source": [
    "def extract_vocab_tensor(config):\n",
    "    if config.embedding_type == 'random':\n",
    "        embedding_pretrained = None\n",
    "    else:\n",
    "        vocab_tensor_path = config.embedding_type\n",
    "        if os.path.exists(vocab_tensor_path):\n",
    "            embedding_pretrained = np.load(vocab_tensor_path)\n",
    "            ['embedding'].astype('float32')\n",
    "        else:\n",
    "            with open(config.vocab_path,'rb') as vocab_f:\n",
    "                word_to_id = pickle.load(vocab_f)\n",
    "                pretrain_f = open(config.pretrain_dir,'r',encoding='utf-8')\n",
    "                embeddings = np.random.rand(len(word_to_id),config.embedding_dim)\n",
    "            for i,line in enumerate(pretrain_f.readlines()):\n",
    "                if i == 0:\n",
    "                    continue\n",
    "                lin = line.strip().split(' ')\n",
    "                if lin[0] in word_to_id:\n",
    "                    idx = word_to_id[lin[0]]\n",
    "                    emb = [float(x) for x in lin[1:config.embedding_dim+1]]\n",
    "                    embeddings[idx] = np.asarray(emb,dtype='float')\n",
    "            pretrain_f.close()\n",
    "            np.savez_compressed(vocab_tensor_path,embeddings = embeddings)\n",
    "            embedding_pretrained = embeddings.astype('float32')\n",
    "    return embedding_pretrained"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "outputs": [],
   "source": [
    "class config(object):\n",
    "    def __init__(self):\n",
    "        # 路径类 带*的是运行前的必要文件  未带*文件/文件夹若不存在则训练过程会生成\n",
    "        self.train_path = r'D:\\DeskS\\NLP\\NER\\ner.train'  # *\n",
    "        self.dev_path = r'D:\\DeskS\\NLP\\NER\\ner.dev'  # *\n",
    "        self.class_ls_path = r'D:\\DeskS\\NLP\\NER\\ner.label'  # *\n",
    "        self.pretrain_dir = './'  # 前期下载的预训练词向量*\n",
    "        self.test_path = r'D:\\DeskS\\NLP\\NER\\ner.test'\n",
    "        self.vocab_path = 'vocab.pkl'\n",
    "        self.model_save_dir = 'checkpoint'\n",
    "        self.model_save_name = self.model_save_dir + '/BiLSTM_CRF.ckpt'  # 保存最佳dev acc模型\n",
    "\n",
    "        # 可调整的参数\n",
    "        # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz,  若不存在则后期生成\n",
    "        # 随机初始化:random\n",
    "        self.embedding_type = 'embedding_SougouNews.npz'\n",
    "        self.use_gpu = True  # 是否使用gpu(有则加载 否则自动使用cpu)\n",
    "        self.batch_size = 128\n",
    "        self.num_epochs = 40  # 训练轮数\n",
    "        self.num_workers = 0  # 启用多线程\n",
    "        self.learning_rate = 0.001  # 训练发现0.001比0.01收敛快(Adam)\n",
    "        self.embedding_dim = 300  # 词嵌入维度\n",
    "        self.hidden_size = 300  # 隐藏层维度\n",
    "        self.num_layers = 2  # RNN层数\n",
    "        self.bidirectional = True  # 双向 or 单向\n",
    "        self.require_improvement = 1  # 1个epoch若在dev上acc未提升则自动结束\n",
    "\n",
    "        # 由前方参数决定  不用修改\n",
    "        self.class_ls = []\n",
    "        self.num_class = len(self.class_ls)\n",
    "        self.vocab_len = 0  # 词表大小(训练集总的字数(字符级)） 在embedding层作为参数 后期赋值\n",
    "        self.embedding_pretrained = None  # 根据config.embedding_type后期赋值  random:None  else:tensor from embedding_type\n",
    "        if self.use_gpu and torch.cuda.is_available():\n",
    "            self.device = 'cuda:0'\n",
    "        else:\n",
    "            self.device = 'cpu'"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "outputs": [],
   "source": [
    "\n",
    "def build_dataset(config):\n",
    "    if os.path.exists(config.vocab_path):\n",
    "        vocab = pickle.load(open(config.vocab_path,'rb'))\n",
    "    else:\n",
    "        vocab = build_vocab(config.train_path,config.class_ls_path)\n",
    "        with open(config.vocab_path,'wb') as f:\n",
    "            pickle.dump(vocab,f)\n",
    "    config.vocab_len = len(vocab)\n",
    "    config.class_ls = [x.strip() for x in open(config.class_ls_path,'r',encoding = 'utf-8').readlines()]\n",
    "    print('\\nVocab size:{}'.format(len(vocab)))\n",
    "\n",
    "    train_data = Mydataset(config.train_path,config,vocab)\n",
    "    dev_data = Mydataset(config.dev_path,config,vocab)\n",
    "    trian_loader = DataLoader(dataset = train_data,batch_size=config.batch_size,shuffle= True,num_workers= config.num_workers)\n",
    "    dev_loader = DataLoader(dataset=dev_data,batch_size=config.batch_size,shuffle= False,num_workers= config.num_workers)\n",
    "    if os.path.exists(config.test_path):\n",
    "        test_data = Mydataset(config.test_path,config,vocab)\n",
    "        test_loader = DataLoader(dataset=test_data,batch_size=config.batch_size,shuffle= False,num_workers= config.num_workers)\n",
    "    else:\n",
    "        test_loader = dev_loader\n",
    "    config.embedding_pretrained = torch.tensor(extract_vocab_tensor(config))\n",
    "    return  trian_loader,dev_loader,test_loader"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Vocab size:3\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "not enough values to unpack (expected 2, got 1)",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mValueError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[35], line 2\u001B[0m\n\u001B[0;32m      1\u001B[0m config \u001B[38;5;241m=\u001B[39mconfig()\n\u001B[1;32m----> 2\u001B[0m train_loader,dev_loader,test_loader \u001B[38;5;241m=\u001B[39m \u001B[43mbuild_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconfig\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[34], line 12\u001B[0m, in \u001B[0;36mbuild_dataset\u001B[1;34m(config)\u001B[0m\n\u001B[0;32m      9\u001B[0m config\u001B[38;5;241m.\u001B[39mclass_ls \u001B[38;5;241m=\u001B[39m [x\u001B[38;5;241m.\u001B[39mstrip() \u001B[38;5;28;01mfor\u001B[39;00m x \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mopen\u001B[39m(config\u001B[38;5;241m.\u001B[39mclass_ls_path,\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mr\u001B[39m\u001B[38;5;124m'\u001B[39m,encoding \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mutf-8\u001B[39m\u001B[38;5;124m'\u001B[39m)\u001B[38;5;241m.\u001B[39mreadlines()]\n\u001B[0;32m     10\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124mVocab size:\u001B[39m\u001B[38;5;132;01m{}\u001B[39;00m\u001B[38;5;124m'\u001B[39m\u001B[38;5;241m.\u001B[39mformat(\u001B[38;5;28mlen\u001B[39m(vocab)))\n\u001B[1;32m---> 12\u001B[0m train_data \u001B[38;5;241m=\u001B[39m \u001B[43mMydataset\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconfig\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtrain_path\u001B[49m\u001B[43m,\u001B[49m\u001B[43mconfig\u001B[49m\u001B[43m,\u001B[49m\u001B[43mvocab\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     13\u001B[0m dev_data \u001B[38;5;241m=\u001B[39m Mydataset(config\u001B[38;5;241m.\u001B[39mdev_path,config,vocab)\n\u001B[0;32m     14\u001B[0m trian_loader \u001B[38;5;241m=\u001B[39m DataLoader(dataset \u001B[38;5;241m=\u001B[39m train_data,batch_size\u001B[38;5;241m=\u001B[39mconfig\u001B[38;5;241m.\u001B[39mbatch_size,shuffle\u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mTrue\u001B[39;00m,num_workers\u001B[38;5;241m=\u001B[39m config\u001B[38;5;241m.\u001B[39mnum_workers)\n",
      "Cell \u001B[1;32mIn[31], line 10\u001B[0m, in \u001B[0;36mMydataset.__init__\u001B[1;34m(self, filepath, config, vocab)\u001B[0m\n\u001B[0;32m      8\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mvocab \u001B[38;5;241m=\u001B[39m vocab\n\u001B[0;32m      9\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mlabel_dic \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_getLabelDic(config)\n\u001B[1;32m---> 10\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdata_label \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_get_contents\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconfig\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     11\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mx \u001B[38;5;241m=\u001B[39m make_tensor(torch\u001B[38;5;241m.\u001B[39mtensor([_[\u001B[38;5;241m0\u001B[39m] \u001B[38;5;28;01mfor\u001B[39;00m _ \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdata_label]),config)\n\u001B[0;32m     12\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39my \u001B[38;5;241m=\u001B[39m make_tensor(torch\u001B[38;5;241m.\u001B[39mtensor([_[\u001B[38;5;241m1\u001B[39m] \u001B[38;5;28;01mfor\u001B[39;00m _ \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdata_label]),config)\n",
      "Cell \u001B[1;32mIn[31], line 35\u001B[0m, in \u001B[0;36mMydataset._get_contents\u001B[1;34m(self, config)\u001B[0m\n\u001B[0;32m     33\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m lin:\n\u001B[0;32m     34\u001B[0m     \u001B[38;5;28;01mcontinue\u001B[39;00m\n\u001B[1;32m---> 35\u001B[0m word,label \u001B[38;5;241m=\u001B[39m lin\u001B[38;5;241m.\u001B[39msplit()\n\u001B[0;32m     36\u001B[0m word_id \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mvocab\u001B[38;5;241m.\u001B[39mget(word,\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mvocab\u001B[38;5;241m.\u001B[39mget(UNK))\n\u001B[0;32m     37\u001B[0m label_id \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mlabel_dic\u001B[38;5;241m.\u001B[39mget(label)\n",
      "\u001B[1;31mValueError\u001B[0m: not enough values to unpack (expected 2, got 1)"
     ]
    }
   ],
   "source": [
    "config =config()\n",
    "train_loader,dev_loader,test_loader = build_dataset(config)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from  torchcrf import  CRF"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "START_TAG = 'START'\n",
    "STOP_TAG = 'STOP'\n",
    "class Model(nn.Module):\n",
    "    def __init__(self,config):\n",
    "        super(Model,self).__init__()\n",
    "        if config.embedding_pretrained is not None:\n",
    "            self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained,freeze=False)\n",
    "        else:\n",
    "            self.embedding =nn.Embedding(config.vocab_len,config.embedding_dim)\n",
    "\n",
    "        if config.bidirectional:\n",
    "            self.num_directions = 2\n",
    "        else:\n",
    "            self.num_directions = 1\n",
    "        self.config = config\n",
    "        self.rnn = nn.LSTM(config.embedding_dim,config.hidden_size,config.num_layer,batch_first=True,bidirectional=config.bidirectional)\n",
    "\n",
    "        self.hidden2tag = nn.Linear(config.hidden_size*self.num_directions,self.tagset_size)\n",
    "        self.tag_ls = self.getTagLs(config)\n",
    "        self.tag2idx = self.getTagDic()\n",
    "\n",
    "        self.tagset_size = len(self.tag2idx)\n",
    "        self.crf = CRF(self.tagset_size)\n",
    "\n",
    "\n",
    "    def _get_lstm_features(self,x):\n",
    "        x=self.embedding(x)\n",
    "        x=x.unsqueeze(1)\n",
    "        out,(hidden,c)=self.rnn(x)\n",
    "        out = self.hidden2tag(out)\n",
    "        out = out.transpose(0,1)\n",
    "        return out\n",
    "\n",
    "    def neg_log_likelihood(self,x,tags):\n",
    "        tags =tags.unqueeze(0)\n",
    "        feats = self._get_lstm_features(x)\n",
    "        return -self.crf(feats,tags)\n",
    "\n",
    "    def forward(self,x):\n",
    "        lstm_feats = self._get_lstm_features(x)\n",
    "        out = self.crf.decode(lstm_feats)\n",
    "        return out\n",
    "\n",
    "    def _make_tensor(self,tesor):\n",
    "        tesor_ret = tesor.to(self.config.device)\n",
    "        return  tesor_ret\n",
    "    def getTagLs(self,config):\n",
    "        tag_ls = config.class_ls\n",
    "        tag_ls.append(START_TAG)\n",
    "        tag_ls.append(STOP_TAG)\n",
    "        return tag_ls\n",
    "    def getTagDic(self):\n",
    "        tag_dic = {}\n",
    "        for idx,label in enumerate(self.tag_ls):\n",
    "            tag_dic[label]=idx\n",
    "        return tag_dic\n",
    "    def idx2Tag(self,idx):\n",
    "        return  self.tag_ls[idx]"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def build_corpus(filepath):\n",
    "    word_lists = []\n",
    "    tag_lists = []\n",
    "    with open(filepath,'r',encoding='utf-8') as f:\n",
    "        word_list = []\n",
    "        tag_list = []\n",
    "        for line in f:\n",
    "            lin = line.strip()\n",
    "            if lin:\n",
    "                word,tag = line.strip().split()\n",
    "                word_list.append(word)\n",
    "                tag_list.append(tag)\n",
    "            else:\n",
    "                word_lists.append(word_list)\n",
    "                tag_lists.append(tag_list)\n",
    "\n",
    "    word_lists = sorted(word_lists,key=lambda x:len(x),reverse=True)\n",
    "    tag_lists = sorted(tag_lists,key=lambda x:len(x),reverse=True)\n",
    "    return  word_lists,tag_lists"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def collate_fn(data):\n",
    "    data.sort(key=lambda x: len(x[0]), reverse=True)\n",
    "    sentences, tags = zip(*data)\n",
    "\n",
    "    # Merge questions (from tuple of 1D tensor to 2D tensor).\n",
    "    sent_lengths = [len(sent) for sent in sentences]\n",
    "    inputs = torch.full( (len(sentences), max(sent_lengths)), word_pad_id ).long() # vocab_dic['PAD] torch.full\n",
    "    labels = torch.full( (len(sentences), max(sent_lengths)), label_pad_id  ).long() # label_dic['O']. torch.full\n",
    "\n",
    "    for i, (sent, lab) in enumerate(zip(sentences, tags)):\n",
    "        end = sent_lengths[i]\n",
    "        inputs[i, :end] = sent[:end]\n",
    "        labels[i, :end] = lab[:end]\n",
    "    return inputs, labels, sent_lengths"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class NERDataset(Dataset):\n",
    "    def __init__(self, sentences, tags, vocab, config):\n",
    "        self.sentences = sentences\n",
    "        self.tags = tags\n",
    "        self.label_dic = self._getLabelDic(config)\n",
    "        self.vocab = vocab\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.sentences)\n",
    "\n",
    "    def __getitem__(self, item):\n",
    "        sentence = self.sentences[item]\n",
    "        tag = self.tags[item]\n",
    "        seq_len = len(sentence)\n",
    "\n",
    "        # convert the sentences and tags into numerical format\n",
    "        word_tokens = [self.vocab.get(word, self.vocab.get(UNK)) for word in sentence]\n",
    "        tag_tokens = [self.label_dic[t] for t in tag]\n",
    "\n",
    "\n",
    "        return torch.LongTensor(word_tokens),  torch.LongTensor(tag_tokens)\n",
    "\n",
    "    def _getLabelDic(self, config):\n",
    "        label_dic ={}\n",
    "        with open(config.class_ls_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                label = line.strip()\n",
    "                label_dic[label] = idx\n",
    "        return label_dic\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "word_lists, tag_lists = build_corpus(r'D:\\DeskS\\NLP\\NER\\ner.train')"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "config = config()  # 实例化各参数\n",
    "train_dataset = NERDataset(word_lists, tag_lists, vocab_dic, config)\n",
    "\n",
    "train_data_loader =  DataLoader(train_dataset, 128, shuffle=True, collate_fn=collate_fn)\n",
    "next(iter(train_data_loader))"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
