{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from multiprocessing import cpu_count\n",
    "import numpy as np\n",
    "import shutil\n",
    "import paddle\n",
    "import paddle.fluid as fluid"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 数据预处理，生成数据字典"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "data": {
      "text/plain": "True"
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "paddle.fluid.is_compiled_with_cuda()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据字典生成完成！\n",
      "类Id及其类别名称: {'0': '财经', '1': '彩票', '2': '房产', '3': '股票', '4': '家居', '5': '教育', '6': '科技', '7': '社会', '8': '时尚', '9': '时政', '10': '体育', '11': '星座', '12': '游戏', '13': '娱乐'}\n"
     ]
    }
   ],
   "source": [
    "# 生成数据字典\n",
    "data_root_path='./data/'\n",
    "def create_dict(data_path, dict_path):\n",
    "   dict_set = set()\n",
    "   #统计有多少种类别，分别对应的id,并显示出来以供后面的预测应用\n",
    "   id_and_className={}\n",
    "\n",
    "   # 读取已经下载的所有数据\n",
    "   with open(data_path, 'r', encoding='utf-8') as f:\n",
    "       lines = f.readlines()\n",
    "   # 把数据生成一个元组\n",
    "   for line in lines:\n",
    "       lineList=line.split('\\t')\n",
    "       title = lineList[-1].replace('\\n','')\n",
    "       classId=lineList[0]\n",
    "       if classId not in id_and_className.keys():\n",
    "           id_and_className[classId]=lineList[1]\n",
    "\n",
    "       for s in title:\n",
    "           dict_set.add(s)\n",
    "   # 把元组转换成字典，一个字对应一个数字\n",
    "   dict_list = []\n",
    "   i = 0\n",
    "   for s in dict_set:\n",
    "       dict_list.append([s, i])\n",
    "       i += 1\n",
    "   # 添加未知字符\n",
    "   dict_txt = dict(dict_list)\n",
    "   end_dict = {\"<unk>\": i}\n",
    "   dict_txt.update(end_dict)\n",
    "   # 把这些字典保存到本地中\n",
    "   with open(dict_path, 'w', encoding='utf-8') as f:\n",
    "       f.write(str(dict_txt))\n",
    "   print(\"数据字典生成完成！\")\n",
    "   print('类Id及其类别名称:',id_and_className)\n",
    "\n",
    "# 获取字典的长度\n",
    "def get_dict_len(dict_path):\n",
    "   with open(dict_path, 'r', encoding='utf-8') as f:\n",
    "       line = eval(f.readlines()[0])\n",
    "\n",
    "   return len(line.keys())\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # 把生产的数据列表都放在自己的总类别文件夹中\n",
    "    data_path = os.path.join(data_root_path, 'Train.txt')\n",
    "    dict_path = os.path.join(data_root_path, \"dict_txt.txt\")\n",
    "    # 创建数据字典\n",
    "    create_dict(data_path, dict_path)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "# 生成数据列表\n",
    "data_root_path='./data/'\n",
    "\n",
    "def create_data_list(data_root_path):\n",
    "   with open(data_root_path + 'test_list.txt', 'w') as f:\n",
    "       pass\n",
    "   with open(data_root_path + 'train_list.txt', 'w') as f:\n",
    "       pass\n",
    "\n",
    "   with open(os.path.join(data_root_path, 'dict_txt.txt'), 'r', encoding='utf-8') as f_data:\n",
    "       dict_txt = eval(f_data.readlines()[0])\n",
    "\n",
    "   with open(os.path.join(data_root_path, 'Train.txt'), 'r', encoding='utf-8') as f_data:\n",
    "       lines = f_data.readlines()\n",
    "   i = 0\n",
    "   for line in lines:\n",
    "       title = line.split('\\t')[-1].replace('\\n', '')\n",
    "       l = line.split('\\t')[0]\n",
    "       labs = \"\"\n",
    "       if i % 10 == 0:\n",
    "           with open(os.path.join(data_root_path, 'test_list.txt'), 'a', encoding='utf-8') as f_test:\n",
    "               for s in title:\n",
    "                   lab = str(dict_txt[s])\n",
    "                   labs = labs + lab + ','\n",
    "               labs = labs[:-1]\n",
    "               labs = labs + '\\t' + l + '\\n'\n",
    "               f_test.write(labs)\n",
    "       else:\n",
    "           with open(os.path.join(data_root_path, 'train_list.txt'), 'a', encoding='utf-8') as f_train:\n",
    "               for s in title:\n",
    "                   lab = str(dict_txt[s])\n",
    "                   labs = labs + lab + ','\n",
    "               labs = labs[:-1]\n",
    "               labs = labs + '\\t' + l + '\\n'\n",
    "               f_train.write(labs)\n",
    "       i += 1\n",
    "   print(\"数据列表生成完成！\")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据列表生成完成！\n"
     ]
    }
   ],
   "source": [
    "# 创建数据列表\n",
    "create_data_list(data_root_path)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 网络模型设置"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "def data_mapper(sample):\n",
    "   data, label = sample\n",
    "   dataList=[]\n",
    "   for e in data.split(','):\n",
    "        if e=='':\n",
    "           print('meet blank')\n",
    "        else:\n",
    "            dataList.append(np.int64(e))\n",
    "   return dataList, int(label)\n",
    "\n",
    "# 创建数据读取器train_reader\n",
    "def train_reader(train_list_path):\n",
    "   def reader():\n",
    "       with open(train_list_path, 'r') as f:\n",
    "           lines = f.readlines()\n",
    "           # 打乱数据\n",
    "           np.random.shuffle(lines)\n",
    "           # 开始获取每条数据和标签\n",
    "           for line in lines:\n",
    "               data, label = line.split('\\t')\n",
    "               yield data, label\n",
    "   return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 1024)\n",
    "#  创建数据读取器test_reader\n",
    "def test_reader(test_list_path):\n",
    "\n",
    "   def reader():\n",
    "       with open(test_list_path, 'r') as f:\n",
    "           lines = f.readlines()\n",
    "           for line in lines:\n",
    "               data, label = line.split('\\t')\n",
    "               yield data, label\n",
    "\n",
    "   return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 1024)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "def CNN_net(data,dict_dim, class_dim=14, emb_dim=128, hid_dim=128,hid_dim2=98):\n",
    "       emb = fluid.layers.embedding(input=data,\n",
    "                                size=[dict_dim, emb_dim])\n",
    "       conv_3 = fluid.nets.sequence_conv_pool(\n",
    "                                                input=emb,\n",
    "                                                num_filters=hid_dim,\n",
    "                                                filter_size=3,\n",
    "                                                act=\"tanh\",\n",
    "                                                pool_type=\"sqrt\")\n",
    "       conv_4 = fluid.nets.sequence_conv_pool(\n",
    "                                                input=emb,\n",
    "                                                num_filters=hid_dim2,\n",
    "                                                filter_size=4,\n",
    "                                                act=\"tanh\",\n",
    "                                                pool_type=\"sqrt\")\n",
    "\n",
    "       output = fluid.layers.fc(\n",
    "           input=[conv_3, conv_4], size=class_dim, act='softmax')\n",
    "       return output"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 训练模型"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "meet blank\n",
      "Test:0, Cost:0.51921, ACC:0.84122\n",
      "Test:0, Cost:0.43008, ACC:0.86608\n",
      "meet blank\n",
      "Test:1, Cost:0.40929, ACC:0.87126\n",
      "Test:1, Cost:0.40870, ACC:0.87170\n",
      "meet blank\n",
      "Test:2, Cost:0.39102, ACC:0.87676\n",
      "Test:2, Cost:0.39938, ACC:0.87486\n",
      "meet blank\n",
      "Test:3, Cost:0.38109, ACC:0.87951\n",
      "Test:3, Cost:0.39417, ACC:0.87648\n",
      "meet blank\n",
      "Test:4, Cost:0.37448, ACC:0.88132\n",
      "Test:4, Cost:0.39051, ACC:0.87761\n",
      "meet blank\n",
      "Test:5, Cost:0.36962, ACC:0.88286\n",
      "Test:5, Cost:0.38799, ACC:0.87833\n",
      "meet blank\n",
      "Test:6, Cost:0.36582, ACC:0.88386\n",
      "Test:6, Cost:0.38587, ACC:0.87873\n",
      "meet blank\n",
      "Test:7, Cost:0.36273, ACC:0.88488\n",
      "Test:7, Cost:0.38420, ACC:0.87921\n",
      "meet blank\n",
      "Test:8, Cost:0.36015, ACC:0.88556\n",
      "Test:8, Cost:0.38299, ACC:0.87959\n",
      "meet blank\n",
      "Test:9, Cost:0.35791, ACC:0.88616\n",
      "Test:9, Cost:0.38180, ACC:0.88011\n"
     ]
    }
   ],
   "source": [
    "paddle.enable_static()\n",
    "words = fluid.layers.data(name='words', shape=[1], dtype='int64', lod_level=1)\n",
    "label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n",
    "# 获取数据字典长度\n",
    "dict_dim = get_dict_len('./data/dict_txt.txt')\n",
    "# 获取卷积神经网络\n",
    "# model = CNN_net(words, dict_dim, 15)\n",
    "# 获取分类器\n",
    "model = CNN_net(words, dict_dim)\n",
    "# 获取损失函数和准确率\n",
    "cost = fluid.layers.cross_entropy(input=model, label=label)\n",
    "avg_cost = fluid.layers.mean(cost)\n",
    "# paddle.static.accuracy()\n",
    "acc = fluid.layers.accuracy(input=model, label=label)\n",
    "# 获取预测程序\n",
    "test_program = fluid.default_main_program().clone(for_test=True)\n",
    "# 定义优化方法\n",
    "optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.002)\n",
    "opt = optimizer.minimize(avg_cost)\n",
    "# 创建一个执行器，CPU训练速度比较慢\n",
    "#place = fluid.CPUPlace()\n",
    "place = fluid.CUDAPlace(0)\n",
    "exe = fluid.Executor(place)\n",
    "# 进行参数初始化\n",
    "exe.run(fluid.default_startup_program())\n",
    "\n",
    "train_reader = paddle.batch(reader=train_reader('./data/train_list.txt'), batch_size=128)\n",
    "test_reader = paddle.batch(reader=test_reader('./data/test_list.txt'), batch_size=128)\n",
    "feeder = fluid.DataFeeder(place=place, feed_list=[words, label])\n",
    "\n",
    "\n",
    "EPOCH_NUM=10\n",
    "model_save_dir = './work/infer_model/'\n",
    "# 开始训练\n",
    "\n",
    "for pass_id in range(EPOCH_NUM):\n",
    "\n",
    "   # 进行训练\n",
    "   tr_c = []\n",
    "   tr_a = []\n",
    "   for batch_id, data in enumerate(train_reader()):\n",
    "       train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n",
    "                            feed=feeder.feed(data),\n",
    "                            fetch_list=[avg_cost, acc])\n",
    "       tr_c.append(train_cost[0])\n",
    "       tr_a.append(train_acc[0])\n",
    "   c = (sum(tr_c) / len(tr_c))\n",
    "   a = (sum(tr_a) / len(tr_a))\n",
    "   print('Train:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, c, a))\n",
    "       # if batch_id % 100 == 0:\n",
    "       #\n",
    "       #     print('Train Pass:%d, Batch:%d, Cost:%0.5f, Acc:%0.5f' % (pass_id, batch_id, train_cost[0], train_acc[0]))\n",
    "\n",
    "\n",
    "   # 进行测试\n",
    "   test_costs = []\n",
    "   test_accs = []\n",
    "   for batch_id, data in enumerate(test_reader()):\n",
    "       test_cost, test_acc = exe.run(program=test_program,\n",
    "                                             feed=feeder.feed(data),\n",
    "                                             fetch_list=[avg_cost, acc])\n",
    "       #if pass_id=0 & batch_id%100==0:\n",
    "           #print('Test Pass:%d, Batch:%d, Cost:%0.5f, Acc:%0.5f' % (pass_id, batch_id, test_cost[0], test_acc[0]))\n",
    "\n",
    "       test_costs.append(test_cost[0])\n",
    "       test_accs.append(test_acc[0])\n",
    "   # 计算平均预测损失在和准确率\n",
    "   test_cost = (sum(test_costs) / len(test_costs))\n",
    "   test_acc = (sum(test_accs) / len(test_accs))\n",
    "   print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 保存模型"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练模型保存完成！\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(model_save_dir):\n",
    "   os.makedirs(model_save_dir)\n",
    "fluid.io.save_inference_model(model_save_dir,\n",
    "                           feeded_var_names=[words.name],\n",
    "                           target_vars=[model],\n",
    "                           executor=exe)\n",
    "print('训练模型保存完成！')"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 生成结果文件"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "place = fluid.CUDAPlace(1)\n",
    "exe = fluid.Executor(place)\n",
    "exe.run(fluid.default_startup_program())\n",
    "\n",
    "save_path = './work/infer_model/'\n",
    "\n",
    "# 从模型中获取预测程序、输入数据名称列表、分类器\n",
    "[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n",
    "\n",
    "\n",
    "def create_eval_data(data_root_path):\n",
    "    # 读取数据字典\n",
    "    with open('./data/dict_txt.txt', 'r', encoding='utf-8') as f_data:\n",
    "        dict_txt = eval(f_data.readlines()[0])\n",
    "    dict_txt = dict(dict_txt)\n",
    "    # 把字符串数据转换成列表数据\n",
    "    keys = dict_txt.keys()\n",
    "    datas = []\n",
    "\n",
    "    with open(os.path.join(data_root_path, 'Test.txt'), 'r', encoding='utf-8') as f_data:\n",
    "        lines = f_data.readlines()\n",
    "\n",
    "    for line in lines:\n",
    "        #print(line)\n",
    "        data=[]\n",
    "        for s in line:\n",
    "            if not s in keys:\n",
    "                s='<unk>'\n",
    "            data.append(np.int64(dict_txt[s]))\n",
    "        datas.append(data)\n",
    "    return datas\n",
    "\n",
    "\n",
    "eval_data=create_eval_data(data_root_path)\n",
    "# 获取每句话的单词数量\n",
    "base_shape = [[len(c) for c in eval_data]]\n",
    "\n",
    "# 生成预测数据\n",
    "tensor_words = fluid.create_lod_tensor(eval_data, base_shape, place)\n",
    "\n",
    "# 执行预测\n",
    "result = exe.run(program=infer_program,\n",
    "                 feed={feeded_var_names[0]: tensor_words},\n",
    "                 fetch_list=target_var)\n",
    "\n",
    "# 分类名称\n",
    "names = [ '财经', '彩票', '房产', '股票','家居', '教育', '科技', '社会', '时尚', '时政','体育','星座','游戏','娱乐']\n",
    "\n",
    "# 获取结果概率最大的label\n",
    "print(len(eval_data))\n",
    "# with open(os.path.join(data_root_path, 'Test.txt'), 'r', encoding='utf-8') as f_data:\n",
    "#     lines = f_data.readlines()\n",
    "with open(os.path.join(data_root_path,'result.txt'),'a',encoding='utf-8') as f_result:\n",
    "    for i in range(len(eval_data)):\n",
    "        lab = np.argsort(result)[0][i][-1]\n",
    "\n",
    "    #print('标题为:%s,  预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lines[i],lab, names[lab], result[0][i][lab]))\n",
    "\n",
    "        f_result.write(names[lab]+'\\n')\n",
    "        if i%1000==0:\n",
    "            print('已生成%d行数据，总共有%d行数据'%(i,len(eval_data)))\n",
    "print('已生成结果文件')\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}