{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d6cef21e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "from seq2seq_v101 import Seq2Seq\n",
    " \n",
    "from data_process.sort_process import SortProcess\n",
    "from data_process.data_iter import DataIter \n",
    "\n",
    "\n",
    "src_path = \"data/sort/letters_source.txt\"\n",
    "trg_path = \"data/sort/letters_target.txt\"\n",
    "\n",
    "sp = SortProcess(src_path, trg_path)\n",
    "src_ids, trg_ids, label_ids = sp.dataset(max_len=10, to_numpy=False)\n",
    "src_ids = np.array(src_ids)[:1000]\n",
    "trg_ids = np.array(trg_ids)[:1000]\n",
    "label_ids = np.array(label_ids)[:1000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8d8f268c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Mylosscallback(tf.keras.callbacks.Callback):\n",
    "    def __init__(self, log_dir):\n",
    "        super(Mylosscallback, self).__init__()\n",
    "        self.val_writer = tf.summary.create_file_writer(log_dir)\n",
    "        self.num=0\n",
    "    def on_train_begin(self, logs={}):\n",
    "        self.losses = []\n",
    " \n",
    "    def on_batch_end(self, batch, logs={}):\n",
    "        self.num=self.num+1\n",
    "        val_loss=logs.get('loss')\n",
    "        # print(1111)\n",
    "        val_loss_summary = tf.Summary()\n",
    "        val_loss_summary_value = val_loss_summary.value.add()\n",
    "        val_loss_summary_value.simple_value = val_loss\n",
    "        val_loss_summary_value.tag = 'loss'\n",
    "        self.val_writer.add_summary(val_loss_summary, self.num)\n",
    "        self.val_writer.flush()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5b449a8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 记录训练数据 每个周期记录\n",
    "history = tf.keras.callbacks.History() \n",
    "\n",
    "# 提前结束\n",
    "early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2) \n",
    "\n",
    "# 日志记录\n",
    "logger = tf.keras.callbacks.CSVLogger(filename='data/train.csv') \n",
    "\n",
    "\n",
    "# 保存最终模型\n",
    "check_final = tf.keras.callbacks.ModelCheckpoint(filepath='./data/model/final/checkpoint_final', \n",
    "                                                 monitor='val_loss',\n",
    "#                                                  save_best_only=True,\n",
    "                                                 save_weights_only=True,\n",
    "                                                ) \n",
    "# 保存最佳模型\n",
    "check_best = tf.keras.callbacks.ModelCheckpoint(filepath='./data/model/best/checkpoint_best', \n",
    "                                                 monitor='val_loss',\n",
    "#                                                  save_best_only=True,\n",
    "                                                 save_weights_only=True,\n",
    "                                                ) \n",
    "\n",
    "\n",
    "\n",
    "callbacks_ = [history, early_stop, logger, check_final, check_best]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e4f1fb19",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100\n",
      "165/250 [==================>...........] - ETA: 0s - loss: 16630.9199 "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-04 22:12:58.673628: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "250/250 [==============================] - 0s 566us/step - loss: 16206.7734 - val_loss: 53898.7344\n",
      "Epoch 2/100\n",
      "250/250 [==============================] - 0s 397us/step - loss: 13495.2334 - val_loss: 44549.2148\n",
      "Epoch 3/100\n",
      "250/250 [==============================] - 0s 376us/step - loss: 11136.4941 - val_loss: 36421.1562\n",
      "Epoch 4/100\n",
      "250/250 [==============================] - 0s 362us/step - loss: 9093.7578 - val_loss: 29440.5156\n",
      "Epoch 5/100\n",
      "250/250 [==============================] - 0s 362us/step - loss: 7336.4136 - val_loss: 23472.0566\n",
      "Epoch 6/100\n",
      "250/250 [==============================] - 0s 357us/step - loss: 5838.5293 - val_loss: 18408.8613\n",
      "Epoch 7/100\n",
      "250/250 [==============================] - 0s 370us/step - loss: 4575.0293 - val_loss: 14194.7168\n",
      "Epoch 8/100\n",
      "250/250 [==============================] - 0s 389us/step - loss: 3522.7124 - val_loss: 10701.7725\n",
      "Epoch 9/100\n",
      "250/250 [==============================] - 0s 371us/step - loss: 2658.6599 - val_loss: 7879.6777\n",
      "Epoch 10/100\n",
      "250/250 [==============================] - 0s 377us/step - loss: 1961.0895 - val_loss: 5634.5107\n",
      "Epoch 11/100\n",
      "250/250 [==============================] - 0s 370us/step - loss: 1410.0619 - val_loss: 3898.5508\n",
      "Epoch 12/100\n",
      "250/250 [==============================] - 0s 362us/step - loss: 985.5301 - val_loss: 2591.7397\n",
      "Epoch 13/100\n",
      "250/250 [==============================] - 0s 365us/step - loss: 668.1376 - val_loss: 1644.9698\n",
      "Epoch 14/100\n",
      "250/250 [==============================] - 0s 410us/step - loss: 439.2586 - val_loss: 989.4841\n",
      "Epoch 15/100\n",
      "250/250 [==============================] - 0s 501us/step - loss: 281.0535 - val_loss: 557.8835\n",
      "Epoch 16/100\n",
      "250/250 [==============================] - 0s 476us/step - loss: 177.1147 - val_loss: 293.5206\n",
      "Epoch 17/100\n",
      "250/250 [==============================] - 0s 451us/step - loss: 112.8443 - val_loss: 146.1237\n",
      "Epoch 18/100\n",
      "250/250 [==============================] - 0s 448us/step - loss: 75.9351 - val_loss: 73.4551\n",
      "Epoch 19/100\n",
      "250/250 [==============================] - 0s 424us/step - loss: 56.4647 - val_loss: 43.8683\n",
      "Epoch 20/100\n",
      "250/250 [==============================] - 0s 376us/step - loss: 47.0573 - val_loss: 35.7675\n",
      "Epoch 21/100\n",
      "250/250 [==============================] - 0s 372us/step - loss: 42.9962 - val_loss: 36.5379\n",
      "Epoch 22/100\n",
      "250/250 [==============================] - 0s 368us/step - loss: 41.4553 - val_loss: 39.3955\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x106fe16d0>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "n = 10000\n",
    "x = np.linspace(0, 100, n)  # 随机生成从0-100以内的30个数字\n",
    "y = 3*x + 7 +np.random.randn(n)*6\n",
    "\n",
    "# 建立模型\n",
    "model = tf.keras.models.Sequential()  # 顺序模型 \n",
    "model.add(tf.keras.layers.Dense(1, input_dim=1))  # 告诉dense，输出的结果是1维的，输入的数据也是1维的\n",
    "# model.summary()  # 显示模型的参数\n",
    "model.compile(optimizer='adam', loss='mse')  # 编译模型\n",
    "model.fit(x, y, epochs=100, validation_split=0.2, callbacks=callbacks_)   # 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b331aa45",
   "metadata": {},
   "outputs": [],
   "source": [
    "class History(Callback):\n",
    "  \"\"\"Callback that records events into a `History` object.\n",
    "\n",
    "  This callback is automatically applied to\n",
    "  every Keras model. The `History` object\n",
    "  gets returned by the `fit` method of models.\n",
    "\n",
    "  Example:\n",
    "\n",
    "  >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n",
    "  >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')\n",
    "  >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),\n",
    "  ...                     epochs=10, verbose=1)\n",
    "  >>> print(history.params)\n",
    "  {'verbose': 1, 'epochs': 10, 'steps': 1}\n",
    "  >>> # check the keys of history object\n",
    "  >>> print(history.history.keys())\n",
    "  dict_keys(['loss'])\n",
    "\n",
    "  \"\"\"\n",
    "\n",
    "  def __init__(self):\n",
    "    super(History, self).__init__()\n",
    "    self.history = {}\n",
    "\n",
    "  def on_train_begin(self, logs=None):\n",
    "    self.epoch = []\n",
    "\n",
    "  def on_epoch_end(self, epoch, logs=None):\n",
    "    logs = logs or {}\n",
    "    self.epoch.append(epoch)\n",
    "    for k, v in logs.items():\n",
    "      self.history.setdefault(k, []).append(v)\n",
    "\n",
    "    # Set the history attribute on the model after the epoch ends. This will\n",
    "    # make sure that the state which is set is the latest one.\n",
    "    self.model.history = self"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6a608745",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "08d22750",
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "\n",
    "DEBUG = logging.DEBUG   \n",
    "INFO = logging.INFO    \n",
    "WARNING = logging.WARNING   \n",
    "ERROR = logging.ERROR   \n",
    "CRITICAL = logging.CRITICAL \n",
    "\n",
    "\n",
    "def get_logger(path, to_file=True, stream=False, leve=INFO, formatter=None):\n",
    "    logger = logging.getLogger(path)\n",
    "    logger.handlers.clear()\n",
    "    logger.setLevel(leve)\n",
    "    \n",
    "    if formatter is None:\n",
    "        formatter = logging.Formatter('%(asctime)s-%(levelname)s : %(message)s', '%Y-%m-%d %H:%M:%S')\n",
    "    \n",
    "    if stream:\n",
    "        # 创建一个StreamHandler,用于输出到控制台\n",
    "        stream_handler = logging.StreamHandler()\n",
    "        stream_handler.setLevel(leve)\n",
    "        stream_handler.setFormatter(formatter)\n",
    "        logger.addHandler(stream_handler) \n",
    "    \n",
    "    if to_file:\n",
    "        # 创建一个FileHandler，用于写到本地 使用RotatingFileHandler类，滚动备份日志\n",
    "        fh = logging.handlers.RotatingFileHandler(filename=path,\n",
    "                                                  mode='a', \n",
    "                                                  maxBytes=1024 * 1024 * 1000, \n",
    "                                                  backupCount=5,\n",
    "                                                  encoding='utf-8')\n",
    "        fh.setLevel(leve)\n",
    "        fh.setFormatter(formatter)\n",
    "        logger.addHandler(fh)\n",
    "    return logger "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b2b3ac99",
   "metadata": {},
   "outputs": [],
   "source": [
    "log1 = get_logger(path=\"data/log2.log\")\n",
    "log2 = get_logger(path=\"data/log3.log\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c710d52",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8434b5f2",
   "metadata": {},
   "outputs": [],
   "source": [
    "log1.info(\"sss\")\n",
    "\n",
    "log2.info(\"333\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3fe098b8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "\n",
    "logging?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "604d1fb9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: http://mirrors.aliyun.com/pypi/simple/\n",
      "Collecting jieba\n",
      "  Downloading http://mirrors.aliyun.com/pypi/packages/c6/cb/18eeb235f833b726522d7ebed54f2278ce28ba9438e3135ab0278d9792a2/jieba-0.42.1.tar.gz (19.2 MB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.2/19.2 MB\u001b[0m \u001b[31m325.0 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:02\u001b[0m\n",
      "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25hBuilding wheels for collected packages: jieba\n",
      "  Building wheel for jieba (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for jieba: filename=jieba-0.42.1-py3-none-any.whl size=19314459 sha256=a62781a4d19984f7ab289715bb11cdfca0d4bc37a8eb93821d6f34fc34d9ac69\n",
      "  Stored in directory: /Users/st/Library/Caches/pip/wheels/e1/4b/67/93eb050406bb5774e9537785446258da898a98e92e7e1bbfae\n",
      "Successfully built jieba\n",
      "Installing collected packages: jieba\n",
      "Successfully installed jieba-0.42.1\n"
     ]
    }
   ],
   "source": [
    "!pip install jieba "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c7668e63",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
