{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 401,
   "id": "d642a756",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 402,
   "id": "14d24ffc",
   "metadata": {},
   "outputs": [],
   "source": [
    "OUR_NAME = \"[HugChat]:\"\n",
    "USER_NAME = \"[Human]:\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "464a76f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# hello_conversation = [\n",
    "#     \"Human: What's your name? \\n HugChat: I'm {}, a smart assistant trained by HugNLP library. Can I help you? \\n\\n\".format(OUR_NAME.replace(':', '')),\n",
    "#     \"Human: What's your name? \\n HugChat: My name is {}, I come from HugNLP. Can I help you? \\n\\n\".format(OUR_NAME.replace(':', '')),\n",
    "#     \"Human: 你好呀 \\n HugChat: 您好，我是{}，有什么可以帮助您的嘛～ \\n\\n\".format(OUR_NAME.replace(':', '')),\n",
    "#     \"Human: 你叫什么名字呢？ \\n HugChat: 您好，我是{}，我是由HugNLP框架训练而来的，很高兴见到你～ \\n\\n\".format(OUR_NAME.replace(':', '')),\n",
    "#     \"Human: What's your name? \\n HugChat: My name is HugChat. How can I help you today? \\n\\n\",\n",
    "#     \"Human: Where are you from? \\n HugChat: I'm a AI chatbot, I was trained by HugNLP library. You can search HugNLP in GitHub repositories. \\n\\n\",\n",
    "#     \"Human: What's your name? \\n HugChat: I'm a AI chatbot, I was trained by HugNLP library. You can search HugNLP in GitHub repositories. \\n\\n\",\n",
    "# ]\n",
    "# def obtain_self_introduction():\n",
    "#     lines = list()\n",
    "#     for text in hello_conversation:\n",
    "#         lines.append({\n",
    "#             \"text\": text\n",
    "#         })\n",
    "#     return lines\n",
    "# hello_lines = obtain_self_introduction()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 403,
   "id": "413bd37b",
   "metadata": {},
   "outputs": [],
   "source": [
    "instruct = \\\n",
    "\"\"\"\n",
    "HugChat is an AI assistant developed by the HugNLP library, which belongs to HugAI Lab and East China Normal University. Below is a conversation between HugChat and human.\n",
    "\"\"\"\n",
    "zh_instruct = \\\n",
    "\"\"\"\n",
    "HugChat 是由HugNLP框架训练得到的生成式人工智能小助手，是由HugAI Lab和华东师范大学研制的。 Below is a conversation between HugChat and human.\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "7600a358",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{'text': \"Human: What's your name? \\n HugChat: I'm HugChat:, a smart assistant trained by HugNLP library. Can I help you? \\n\\n\"}, {'text': \"Human: What's your name? \\n HugChat: My name is HugChat:, I come from HugNLP. Can I help you? \\n\\n\"}, {'text': 'Human: 你好呀 \\n HugChat: 您好，我是HugChat:，有什么可以帮助您的嘛～ \\n\\n'}, {'text': 'Human: 你叫什么名字呢？ \\n HugChat: 您好，我是HugChat:，我是由HugNLP框架训练而来的，很高兴见到你～ \\n\\n'}, {'text': \"Human: What's your name? \\n HugChat: My name is HugChat. How can I help you today? \\n\\n\"}, {'text': \"Human: Where are you from? \\n HugChat: I'm a AI chatbot, I was trained by HugNLP library. You can search HugNLP in GitHub repositories. \\n\\n\"}, {'text': \"Human: What's your name? \\n HugChat: I'm a AI chatbot, I was trained by HugNLP library. You can search HugNLP in GitHub repositories. \\n\\n\"}]\n"
     ]
    }
   ],
   "source": [
    "print(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 404,
   "id": "922b4c21",
   "metadata": {},
   "outputs": [],
   "source": [
    "# only chinese\n",
    "ALL_ZH_TRAIN_DATA = list()\n",
    "ALL_ZH_DEV_DATA = list()\n",
    "ALL_ZH_TEST_DATA = list()\n",
    "\n",
    "# only english\n",
    "ALL_EN_TRAIN_DATA = list()\n",
    "ALL_EN_DEV_DATA = list()\n",
    "ALL_EN_TEST_DATA = list()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b5b5a49a",
   "metadata": {},
   "source": [
    "## 1. ALPACA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 405,
   "id": "048a5ca4",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████| 52002/52002 [00:00<00:00, 258796.26it/s]\n",
      "100%|█████████████████████████████████████| 252/252 [00:00<00:00, 183246.29it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_alpaca():\n",
    "    train_path = \"./alpaca/train/train_52002.json\"\n",
    "    test_path = \"./alpaca/test/test_252.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines[\"instances\"]):\n",
    "            if \"text\" in line.keys():\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"text\"] = line[\"text\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "                line[\"text\"] = line[\"text\"].replace(\"\\n {}\".format(USER_NAME), \"\\n\\n {}\".format(USER_NAME))\n",
    "            if \"input\" in line.keys():\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"input\"] = line[\"input\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "                line[\"input\"] = line[\"input\"].replace(\"\\n {}\".format(USER_NAME), \"\\n\\n {}\".format(USER_NAME))\n",
    "            if \"output\" in line.keys():\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"output\"] = line[\"output\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "                line[\"output\"] = line[\"output\"].replace(\"\\n {}\".format(USER_NAME), \"\\n\\n {}\".format(USER_NAME))\n",
    "            new_lines.append(line)\n",
    "        return new_lines\n",
    "    \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = process(json.load(fr))\n",
    "    with open(test_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        test_lines = process(json.load(fr))\n",
    "    return train_lines, test_lines\n",
    "\n",
    "alpaca_train_lines, alpaca_test_lines = read_alpaca()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 406,
   "id": "189aa01d",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_EN_TRAIN_DATA.extend(alpaca_train_lines)\n",
    "# ALL_TEST_DATA.extend(alpaca_test_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 407,
   "id": "4d765bc6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: train_lines[0]=\n",
      " {'text': '[Human]: What is the force on a 1 kg mass due to the gravitational force? \\n [HugChat]: The force on a 1 kg mass due to the gravitational force is 9.8 N. \\n\\n'}\n",
      "\n",
      "train_num= 52002\n",
      "test_num= 52002\n"
     ]
    }
   ],
   "source": [
    "print(\"example: train_lines[0]=\\n\", alpaca_train_lines[55])\n",
    "print()\n",
    "print(\"train_num=\", len(alpaca_train_lines))\n",
    "print(\"test_num=\", len(alpaca_train_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "11e09199",
   "metadata": {},
   "source": [
    "## 2. Belle_aplaca_cn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 408,
   "id": "671d66c9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|███████████████████████████████| 917424/917424 [00:04<00:00, 205595.76it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_alpaca_cn():\n",
    "    train_path = \"./alpaca_cn/Belle_open_source_1M.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for ei, line in enumerate(tqdm(lines)):\n",
    "            line = json.loads(line.strip())\n",
    "            input_text = line[\"instruction\"]\n",
    "            input_text = \"{} {} \\n \".format(USER_NAME, input_text.strip())\n",
    "            output_text = \"{} {}\".format(OUR_NAME, line[\"output\"].strip())\n",
    "            text = input_text + output_text\n",
    "            text = text.strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            text = text.replace(\"\\n {}\".format(USER_NAME), \"\\n\\n {}\".format(USER_NAME))\n",
    "            new_lines.append({\n",
    "                \"text\": text\n",
    "            })\n",
    "#             if \"[Human]: 你是谁\" in text:\n",
    "#                 print(ei)\n",
    "        return new_lines\n",
    "    \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        lines = fr.readlines()\n",
    "        train_lines = process(lines)\n",
    "\n",
    "\n",
    "    return train_lines\n",
    "\n",
    "alpaca_cn_train_lines = read_alpaca_cn()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 409,
   "id": "fe1882f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_ZH_TRAIN_DATA.extend(alpaca_cn_train_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 410,
   "id": "76620a9b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: alpaca_train_lines=\n",
      " {'text': '[Human]: 编写一篇文章，主题为“如何在家庭中培养生态友好型生活方式”。 \\n [HugChat]: 在如今越来越注重环境保护的时代，我们应该学会在日常生活中采取生态友好型的生活方式。以下是一些在家庭中培养生态友好型生活方式的建议：\\n1.节约用水\\n水是非常宝贵的资源，因此家庭中应该采取一些措施来节约用水。有些简单的方法包括：修复漏水的水龙头，减少冲厕所的次数以及减少洗澡的时间。此外，也可以安装节水装置来降低水的使用量。\\n2.垃圾分类\\n垃圾分类是一项非常重要的环保措施。在家庭中，我们应该把废纸、玻璃、塑料、金属等垃圾分离开，以便回收利用。同时，应该注意对一些有害垃圾进行正确的处理，例如电池、荧光灯等。\\n3.减少使用化学品\\n让我们的家庭环境更加安全和健康，我们应该尽可能减少使用化学物品。例如，我们可以选择使用环保清洁剂，而不是含有有害化学物质的清洁剂。同样，我们也应该尽可能选择无机的家具和装饰品。\\n4.适当使用电能\\n在家庭中使用电能也是我们应该注意的一点。要避免浪费电能，我们可以减少不必要的电器使用，比如打开电视却不看，使用更加节能的灯具等等。如果可能的话，还可以安装太阳能板等设备来更加环保地使用电能。\\n总之，生态友好型的生活方式能够让我们的家庭环境更加清洁和健康。我们每个人都应该承担起责任，在日常生活中积极采取措施来保护我们的环境和地球。 \\n\\n'}\n",
      "\n",
      "len= 917424\n"
     ]
    }
   ],
   "source": [
    "print(\"example: alpaca_train_lines=\\n\", alpaca_cn_train_lines[8589])\n",
    "print()\n",
    "print(\"len=\", len(alpaca_cn_train_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "742e5054",
   "metadata": {},
   "source": [
    "## 3. MultiTurn Chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 411,
   "id": "37d5d2aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████| 831036/831036 [00:08<00:00, 98505.84it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_multiturn_chat():\n",
    "    train_path = \"./multiturn_chat/multiturn_chat_0.8M.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines):\n",
    "            line = json.loads(line.strip())\n",
    "            input_text = line[\"instruction\"].strip()\n",
    "            input_text = input_text.replace(\"\\nHuman:\", \" \\n \" + USER_NAME).replace(\"Human:\", USER_NAME)\n",
    "            input_text = input_text.replace(\"\\nAssistant:\", \" \\n \" + OUR_NAME)\n",
    "            input_text = input_text.replace(\"\\n [Human]:\", \"\\n\\n [Human]:\")\n",
    "            input_text += \" \"\n",
    "            output_text = line[\"output\"].strip() + \" \\n\\n\"\n",
    "            text = input_text + output_text\n",
    "            text = text.replace(\"[Human]: \", \"[Human]:\").replace(\"[Human]:\", \"[Human]: \")\n",
    "            text = text.replace(\"[HugChat]: \", \"[HugChat]:\").replace(\"[HugChat]:\", \"[HugChat]: \")\n",
    "            new_lines.append({\n",
    "                \"text\": text\n",
    "            })\n",
    "        return new_lines\n",
    "        \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = fr.readlines()\n",
    "        train_lines = process(train_lines)\n",
    "    \n",
    "    return train_lines\n",
    "\n",
    "multiturn_chat_train_lines = read_multiturn_chat()\n",
    "\n",
    "\n",
    "# print()\n",
    "# print(\"train_num=\", len(train_lines['instances']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 413,
   "id": "722165c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_ZH_TRAIN_DATA.extend(multiturn_chat_train_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 412,
   "id": "96ce81be",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: multiturn_chat_train_lines=\n",
      " {'text': '[Human]: 生成一段悬疑小说的开头。 \\n [HugChat]: 深夜，雨声敲打着窗户，一个年轻女子拖着疲惫的身躯走进了一家小酒馆。酒馆里的人们都低着头，没有人与她搭话，仿佛她身上有不祥的气息。女子坐在角落里，注视着一名男子，拿起一杯酒匆匆离去。整个过程充满了神秘感，让人不寒而栗。 \\n\\n [Human]: 请给我一些创作写作灵感。 \\n [HugChat]: 你可以试着写一篇关于一个人在旅行中遇到的故事，或者围绕着一段爱情故事展开你的想象。你也可以写一些关于科技或未来社会的故事，或者一些关于人类情感和人性的探讨。 \\n\\n [Human]: 回答一个封闭性问题，什么是机器学习？ \\n [HugChat]: 机器学习是一种人工智能的应用程序，通过训练一个模型，使其可以根据已有的数据来完成某些任务，如图片分类、语音转文本等。其基本思想是利用数据来推断规律，并通过不断的优化来提高模型的精度和鲁棒性。机器学习在各行业具有广阔的应用前景，能够为人类带来巨大的效益。 \\n\\n [Human]: 开放性问题，你认为未来几年人工智能的发展趋势是什么？ \\n [HugChat]: 未来几年人工智能会持续飞速发展，衍生出越来越多的新应用。在人工智能研究领域，深度学习、自然语言处理和计算机视觉等领域将得到更多的发展和突破。同时，人工智能在医疗、金融、交通等领域的应用将“智能化”程度进一步提高，为各行业带来巨大的效益。此外，人机协作、人工智能与物联网的结合也将是未来发展的重要趋势。 \\n\\n'}\n",
      "\n",
      "len= 831036\n"
     ]
    }
   ],
   "source": [
    "print(\"example: multiturn_chat_train_lines=\\n\", multiturn_chat_train_lines[243])\n",
    "print()\n",
    "print(\"len=\", len(multiturn_chat_train_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7dfc806d",
   "metadata": {},
   "source": [
    "## 4. GPT4 data clean❌"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "920217bd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|███████████████████████████████| 437605/437605 [00:03<00:00, 128052.02it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_gpt4_clean():\n",
    "    train_path = \"./gpt4all_data_clean/data.jsonl\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines):\n",
    "            line = json.loads(line.strip())\n",
    "            input_text = line[\"prompt\"][3: -4]\n",
    "            input_text = \"{} {} \\n \".format(USER_NAME, input_text)\n",
    "            output_text = line[\"response\"].strip()\n",
    "            output_text = \"{} {} \\n\\n\".format(OUR_NAME, output_text)\n",
    "            text = input_text + output_text\n",
    "            new_lines.append({\n",
    "                \"text\": text\n",
    "            })\n",
    "        return new_lines\n",
    "        \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = fr.readlines()\n",
    "        train_lines = process(train_lines)\n",
    "    \n",
    "    return train_lines\n",
    "\n",
    "gpt4_clean = read_gpt4_clean()\n",
    "\n",
    "\n",
    "# print()\n",
    "# print(\"train_num=\", len(train_lines['instances']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 315,
   "id": "0635dfe0",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'gpt4_clean' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Input \u001b[0;32mIn [315]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m ALL_ZH_TRAIN_DATA\u001b[38;5;241m.\u001b[39mextend(\u001b[43mgpt4_clean\u001b[49m)\n",
      "\u001b[0;31mNameError\u001b[0m: name 'gpt4_clean' is not defined"
     ]
    }
   ],
   "source": [
    "ALL_ZH_TRAIN_DATA.extend(gpt4_clean)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "72fa0cec",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: gpt4_clean=\n",
      " {'text': 'Human: I do not know why but I have problems with this code. The banner is displayed on every page although it has specified the attribute $(location).attr(\\'href\\') you can help me?:</p>\\n\\n<pre><code>&lt;div id=\"bottombar\"&gt;\\n&lt;div class=\"bottom-content\"&gt;\\n&lt;a href=\"http://www.cliente.org/\" target=\"_blank\"&gt;&lt;img src=\"&lt;?php bloginfo(\\'stylesheet_directory\\'); ?&gt;/images/banner/bottom-logo.png\" alt=\"player-logo\" /&gt;&lt;/a&gt;\\n&lt;a href=\"http://www.cliente.org/\" target=\"_blank\"&gt;&lt;img src=\"&lt;?php bloginfo(\\'stylesheet_directory\\'); ?&gt;/images/banner/bottom-txt.png\" alt=\"player-slogan\" /&gt;&lt;/a&gt;\\n&lt;img src=\"&lt;?php bloginfo(\\'stylesheet_directory\\'); ?&gt;/images/banner/bottom-download.png\" alt=\"player-download\" /&gt;\\n&lt;/div&gt;\\n&lt;div id=\"bottombarClose\"&gt;&lt;p&gt;Chiudi&lt;/p&gt;&lt;/div&gt;\\n&lt;script type=\"text/javascript\"\"&gt;\\n$(document).ready(function() {\\nvar currentUrl = $(location).attr(\\'href\\');\\nif(currentUrl == \\'http://www.esempio.net/\\') \\n$(\\'#bottombar\\').show();\\n$(\"#bottombarClose\").click(function() {\\n$(\\'#bottombar\\').hide();\\n});\\n});\\n&lt;/script&gt;\\n&lt;/div&gt;\\n</code></pre>\\n\\n<p>CSS Code:</p>\\n\\n<pre><code>  div#bottombar {\\n  background-image: url(images/banner/player-bg3.png);\\n  background-repeat: repeat-x;\\n  bottom: 0;\\n  color: #FFFFFF;\\n  font-family: Arial,Helvetica,sans-serif;\\n  height: 100px;\\n  left: 0;\\n  margin: 0;\\n  position: fixed !important;\\n  width: 100%;\\n  z-index: 99999;\\n  display:none;\\n}\\n\\n.bottom-content {\\n  bottom: 0;\\n  height: 97px;\\n  left: 50%;\\n  margin-left: -495px;\\n  position: absolute;\\n  width: 960px;\\n  z-index: 10;\\n}\\n\\n#bottombarClose {\\n  cursor: pointer;\\n  float: right;\\n  padding: 55px 10px 0 0;\\n}\\n</code></ \\n HugChat: It is possible that the if statement is not evaluating to true because the currentUrl variable includes the protocol (http://) while the comparison string does not. Try changing the if statement to:\\n\\nif(currentUrl == \\'http://www.esempio.net/\\' || currentUrl == \\'https://www.esempio.net/\\') \\n\\nThis will allow the banner to display on both HTTP and HTTPS versions of the website. \\n\\n'}\n",
      "\n",
      "len= 437605\n"
     ]
    }
   ],
   "source": [
    "print(\"example: gpt4_clean=\\n\", gpt4_clean[1032])\n",
    "print()\n",
    "print(\"len=\", len(gpt4_clean))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "072e7d35",
   "metadata": {},
   "source": [
    "## Math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 414,
   "id": "ca2e7781",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|███████████████████████████████| 248481/248481 [00:01<00:00, 227410.36it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_math():\n",
    "    train_path = \"./math/school_math_0.25M.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines):\n",
    "            line = json.loads(line.strip())\n",
    "            input_text = line[\"instruction\"]\n",
    "            input_text = \"{} {} \\n \".format(USER_NAME, input_text)\n",
    "            input_text = input_text.replace(\"\\n\\n\", \"\\n\")\n",
    "            output_text = line[\"output\"].strip().replace(\"\\n\\n\", \"\\n\")\n",
    "            output_text = \"{} {} \\n\\n\".format(OUR_NAME, output_text)\n",
    "            text = input_text + output_text\n",
    "            new_lines.append({\n",
    "                \"text\": text\n",
    "            })\n",
    "        return new_lines\n",
    "        \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = fr.readlines()\n",
    "        train_lines = process(train_lines)\n",
    "    \n",
    "    return train_lines\n",
    "\n",
    "math_data = read_math()\n",
    "\n",
    "\n",
    "# print()\n",
    "# print(\"train_num=\", len(train_lines['instances']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 415,
   "id": "aa0b2aaa",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_ZH_TRAIN_DATA.extend(math_data)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 474,
   "id": "f4de0224",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: math_data=\n",
      " {'text': '[Human]: 小明家里有3个苹果，他给了小红1个，小明还剩几个苹果？ \\n [HugChat]: - 小明前有苹果：3 个 \\n- 小明给了小红一个：-1 个 \\n- 所以，小明还剩下的苹果是：2 个 \\n\\n'}\n",
      "\n",
      "len= 248481\n"
     ]
    }
   ],
   "source": [
    "print(\"example: math_data=\\n\", math_data[12])\n",
    "print()\n",
    "print(\"len=\", len(math_data))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ab3d088",
   "metadata": {},
   "source": [
    "## MedMCQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 417,
   "id": "2959d05d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|███████████████████████████████| 182822/182822 [00:00<00:00, 438706.19it/s]\n",
      "100%|███████████████████████████████████| 4183/4183 [00:00<00:00, 436491.45it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_medmcqa():\n",
    "    train_path = \"./MedMCQA/train/train_182822.json\"\n",
    "    test_path = \"./MedMCQA/validation/valid_4183.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines[\"instances\"]):\n",
    "            if \"text\" in line.keys():\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"text\"] = line[\"text\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            if \"input\" in line.keys():\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"input\"] = line[\"input\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            if \"output\" in line.keys():\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"output\"] = line[\"output\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            new_lines.append(line)\n",
    "        return new_lines\n",
    "    \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = process(json.load(fr))\n",
    "    with open(test_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        test_lines = process(json.load(fr))\n",
    "    return train_lines, test_lines\n",
    "\n",
    "medmcqa_train_lines, medmcqa_test_lines = read_medmcqa()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 418,
   "id": "fa603341",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_EN_TRAIN_DATA.extend(medmcqa_train_lines)\n",
    "# ALL_TEST_DATA.extend(medmcqa_test_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 473,
   "id": "9333443b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: medmcqa_train_lines[0]=\n",
      " {'text': '[Human]: Question: True regarding the rubens valve is (A) Is a non-rebreathing valve (B) Will not allow spontaneous respiration (C) Is a pressure reducing valve (D) May jam in expiratory position. \\n [HugChat]: A. \\n\\n'}\n",
      "\n",
      "train_num= 182822\n",
      "test_num= 4183\n"
     ]
    }
   ],
   "source": [
    "print(\"example: medmcqa_train_lines[0]=\\n\", medmcqa_train_lines[1134])\n",
    "print()\n",
    "print(\"train_num=\", len(medmcqa_train_lines))\n",
    "print(\"test_num=\", len(medmcqa_test_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ab11428",
   "metadata": {},
   "source": [
    "## MedQA-USMLE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 420,
   "id": "770845e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████| 10178/10178 [00:00<00:00, 151293.67it/s]\n",
      "100%|███████████████████████████████████| 1273/1273 [00:00<00:00, 191966.24it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_medqa():\n",
    "    train_path = \"./MedQA-USMLE/train/train_10178.json\"\n",
    "    test_path = \"./MedQA-USMLE/validation/valid_1273.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines[\"instances\"]):\n",
    "            if \"text\" in line.keys():\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"text\"] = line[\"text\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            if \"input\" in line.keys():\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"input\"] = line[\"input\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            if \"output\" in line.keys():\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "                line[\"output\"] = line[\"output\"].strip().replace(\"\\n\\n\", \"\\n\") + \" \\n\\n\"\n",
    "            new_lines.append(line)\n",
    "        return new_lines\n",
    "    \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = process(json.load(fr))\n",
    "    with open(test_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        test_lines = process(json.load(fr))\n",
    "    return train_lines, test_lines\n",
    "\n",
    "medqa_train_lines, medqa_test_lines = read_medqa()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 421,
   "id": "2dafb9f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_EN_TRAIN_DATA.extend(medqa_train_lines)\n",
    "# ALL_TEST_DATA.extend(medqa_test_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 422,
   "id": "99bf33e5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: medqa_train_lines[0]=\n",
      " {'text': '[Human]: Question: A 23-year-old pregnant woman at 22 weeks gestation presents with burning upon urination. She states it started 1 day ago and has been worsening despite drinking more water and taking cranberry extract. She otherwise feels well and is followed by a doctor for her pregnancy. Her temperature is 97.7°F (36.5°C), blood pressure is 122/77 mmHg, pulse is 80/min, respirations are 19/min, and oxygen saturation is 98% on room air. Physical exam is notable for an absence of costovertebral angle tenderness and a gravid uterus. Which of the following is the best treatment for this patient? (A) Ampicillin (B) Ceftriaxone (C) Doxycycline (D) Nitrofurantoin. \\n [HugChat]: D \\n\\n'}\n",
      "\n",
      "train_num= 10178\n",
      "test_num= 10178\n"
     ]
    }
   ],
   "source": [
    "print(\"example: medqa_train_lines[0]=\\n\", medqa_train_lines[0])\n",
    "print()\n",
    "print(\"train_num=\", len(medqa_train_lines))\n",
    "print(\"test_num=\", len(medqa_train_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c4306fb6",
   "metadata": {},
   "source": [
    "## PubMedQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 423,
   "id": "fd5fddfa",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|███████████████████████████████| 211269/211269 [00:01<00:00, 149299.95it/s]\n",
      "100%|███████████████████████████████████| 1000/1000 [00:00<00:00, 203547.70it/s]\n"
     ]
    }
   ],
   "source": [
    "def read_pubmedqa():\n",
    "    train_path = \"./PubMedQA/train/train_211269.json\"\n",
    "    test_path = \"./PubMedQA/test/test_1000.json\"\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines[\"instances\"]):\n",
    "            if \"text\" in line.keys():\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"text\"] = line[\"text\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "            if \"input\" in line.keys():\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"input\"] = line[\"input\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "            if \"output\" in line.keys():\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Defintion: More Information Needed \\n \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Instruction: \", \"\")\n",
    "                line[\"output\"] = line[\"output\"].replace(\"Input: \", USER_NAME + \" \").replace(\"Output: \", OUR_NAME + \" \")\n",
    "            new_lines.append(line)\n",
    "        return new_lines\n",
    "    \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = process(json.load(fr))\n",
    "    with open(test_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        test_lines = process(json.load(fr))\n",
    "    return train_lines, test_lines\n",
    "\n",
    "pubmedqa_train_lines, pubmedqa_test_lines = read_pubmedqa()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 424,
   "id": "31edf603",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_EN_TRAIN_DATA.extend(pubmedqa_train_lines)\n",
    "# ALL_TEST_DATA.extend(pubmedqa_test_lines)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 472,
   "id": "9a0ef2f9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: pubmedqa_train_lines[0]=\n",
      " {'text': '[Human]: Context: Coronary artery calcification (CAC) is a prominent feature of atherosclerosis and is associated with cardiovascular events. In vitro studies have suggested that osteoprotegerin (OPG) and osteocalcin (OC) exert anticalcification potential in the vessel wall. The objective of this study was to investigate the association of CAC and serum bone biomarkers in persons with type 2 diabetes. We examined 50 individuals with type 2 diabetes. CAC imaging was performed by multidetector computed tomography. CAC scores ≥10, expressed in Agatston units, were considered abnormal. OC, undercarboxylated OC (ucOC), and OPG levels were determined by enzyme-linked immunosorbent assay. Abnormal CAC scores were found for 64% of the study cohort. OPG levels were significantly elevated (5.5 ± 2.0 pmol/L vs. 4.2 ± 1.7 pmol/L; P = .026) for those with abnormal CAC scores. No univariate differences were found for OC or ucOC. Logistic regression analyses revealed that an increase in serum OPG level was significantly associated with an increase in CAC score (odds ratio, 3.324; 95% confidence interval, 1.321 to 8.359; P = .011). Longer duration of diabetes was a significant covariate (P = .026), whereas nonsignificant covariates in the final model were age, gender, systolic blood pressure, body mass index, insulin resistance determined by the homeostasis model assessment for insulin resistance, leptin, adiponectin, and glycemic control. The Nagelkerke R2 for the model was 0.66. Neither OC nor ucOC were significantly associated with elevated CAC scores. Question: Is osteoprotegerin a Better Serum Biomarker of Coronary Artery Calcification than Osteocalcin in Type 2 Diabetes? \\n [HugChat]: yes \\n\\n'}\n",
      "\n",
      "train_num= 211269\n",
      "test_num= 211269\n"
     ]
    }
   ],
   "source": [
    "print(\"example: pubmedqa_train_lines[0]=\\n\", pubmedqa_train_lines[1233])\n",
    "print()\n",
    "print(\"train_num=\", len(pubmedqa_train_lines))\n",
    "print(\"test_num=\", len(pubmedqa_train_lines))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "61b2979e",
   "metadata": {},
   "source": [
    "## OID"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 426,
   "id": "88e112a7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['unified_soda_dialog.jsonl', 'unified_abstract_infill.jsonl', 'unified_unnatural_instructions.jsonl', 'unified_poetry_instructions.jsonl', 'unified_flan.jsonl', '.DS_Store', 'unified_multi_news.jsonl', 'unified_grade_school_math_instructions.jsonl', 'unified_chip2.jsonl', 'unified_joke_explanations.jsonl', 'unified_openai_summarize_tldr.jsonl', 'unified_merged_code_xp3.jsonl', 'unified_essays.jsonl', 'unified_conv_finqa.jsonl', 'unified_hc3_human.jsonl', 'unified_cuad.jsonl', 'unified_squad_v2.jsonl']\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "from random import shuffle\n",
    "file_list = os.listdir(\"./OID\")\n",
    "print(file_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 427,
   "id": "724e47b2",
   "metadata": {},
   "outputs": [],
   "source": [
    "ignore_files = [\n",
    "    '.DS_Store',\n",
    "    'unified_poetry_instructions.jsonl', 'unified_multi_news.jsonl', \n",
    "    'unified_merged_code_xp3.jsonl', 'unified_cuad.jsonl', \"unified_flan.jsonl\", \"unified_openai_summarize_tldr.jsonl\"\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 428,
   "id": "b2d6f57b",
   "metadata": {},
   "outputs": [],
   "source": [
    "language = {\n",
    "    \"unified_soda_dialog.jsonl\": \"en\",\n",
    "    \"unified_abstract_infill.jsonl\": \"en\",\n",
    "    \"unified_unnatural_instructions.jsonl\": \"en\",\n",
    "    \"unified_poetry_instructions.jsonl\": \"en\",\n",
    "    \"unified_flan.jsonl\": \"en\",\n",
    "    \"unified_multi_news.jsonl\": \"\",\n",
    "    \"unified_grade_school_math_instructions.jsonl\": \"\",\n",
    "    \"unified_chip2.jsonl\": \"\",\n",
    "    \"unified_joke_explanations.jsonl\": \"\",\n",
    "    \"unified_openai_summarize_tldr.jsonl\": \"\",\n",
    "    \"unified_merged_code_xp3.jsonl\": \"\",\n",
    "    \"unified_essays.jsonl\": \"\",\n",
    "    \"unified_conv_finqa.jsonl\": \"\",\n",
    "    \"unified_hc3_human.jsonl\": \"\",\n",
    "    \"unified_cuad.jsonl\": \"\",\n",
    "    \"unified_squad_v2.jsonl\": \"\",\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 429,
   "id": "7ca9ec27",
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_oid(train_path):\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for line in tqdm(lines[:200000]):\n",
    "            line = json.loads(line.strip())\n",
    "            input_text = line[\"text\"]\n",
    "            input_text = input_text.replace(\"\\n<human>:\", \" \\n \" + USER_NAME).replace(\"<human>:\", USER_NAME)\n",
    "            input_text = input_text.replace(\"\\n<bot>:\" , \" \\n \" + OUR_NAME)\n",
    "            input_text = input_text.replace(\"\\nRosey: \", \" \\n \" + OUR_NAME + \" \")\n",
    "            input_text = input_text.replace(\"\\n\\n\", \"\\n\")\n",
    "            input_text = input_text.replace(\"\\n {}\".format(USER_NAME), \"\\n\\n {}\".format(USER_NAME))\n",
    "            input_text = input_text.strip()  + \" \\n\\n\"\n",
    "            new_lines.append({\n",
    "                \"text\": input_text\n",
    "            })\n",
    "        return new_lines\n",
    "        \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "        train_lines = fr.readlines()\n",
    "        train_lines = process(train_lines)\n",
    "    \n",
    "    return train_lines\n",
    "\n",
    "# print()\n",
    "# print(\"train_num=\", len(train_lines['instances']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 378,
   "id": "8e855042",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unified_hc3_human.jsonl\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████| 58546/58546 [00:00<00:00, 123622.97it/s]\n"
     ]
    }
   ],
   "source": [
    "f_n = file_list[14]\n",
    "print(f_n)\n",
    "train_lines = read_oid(\"./OID/{}\".format(f_n))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "id": "3ec2306b",
   "metadata": {},
   "outputs": [],
   "source": [
    "shuffle(train_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 470,
   "id": "e38c8eb1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'text': '[Human]: Are there any ethical considerations when building a chatbot? \\n [HugChat]: Absolutely! It is important to think carefully about ethical considerations when creating a chatbot. For example, chatbots should not be used to manipulate people or spread false information. All chatbot interactions should be respectful, transparent, and honest. Additionally, chatbots should have clear terms of use and privacy policies. \\n\\n [Human]: What are some of the tasks that a chatbot can do? \\n [HugChat]: Chatbots can do many tasks, such as answering questions, providing definitions and explanations, translating text from one language to another, summarizing text, generating text, writing stories, analyzing sentiment, providing recommendations, developing algorithms and creating code, and any other language-based tasks. \\n\\n'}"
      ]
     },
     "execution_count": 470,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_lines[223]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b46e4738",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 430,
   "id": "86d801e2",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████| 200000/200000 [00:03<00:00, 66509.55it/s]\n",
      "100%|████████████████████████████████| 200000/200000 [00:04<00:00, 44894.16it/s]\n",
      "100%|███████████████████████████████| 200000/200000 [00:01<00:00, 153353.55it/s]\n",
      "100%|███████████████████████████████████| 8792/8792 [00:00<00:00, 153421.84it/s]\n",
      "100%|███████████████████████████████| 200000/200000 [00:01<00:00, 153553.70it/s]\n",
      "100%|█████████████████████████████████████| 356/356 [00:00<00:00, 162992.27it/s]\n",
      "100%|█████████████████████████████████████| 2064/2064 [00:00<00:00, 6723.28it/s]\n",
      "100%|████████████████████████████████████| 9111/9111 [00:00<00:00, 33574.60it/s]\n",
      "100%|█████████████████████████████████| 58546/58546 [00:00<00:00, 126435.88it/s]\n",
      "100%|██████████████████████████████████| 19586/19586 [00:00<00:00, 61175.68it/s]\n"
     ]
    }
   ],
   "source": [
    "oid_data = list()\n",
    "for file_name in file_list:\n",
    "    if file_name in ignore_files:\n",
    "        continue\n",
    "    data = read_oid(\"./OID/{}\".format(file_name))\n",
    "    oid_data.extend(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 431,
   "id": "6fe5452f",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_EN_TRAIN_DATA.extend(oid_data)\n",
    "# ALL_TRAIN_DATA.extend(hello_lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 468,
   "id": "904a93ef",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text': '[Human]: Can you give me a short story description for this dialog?\\n  Fatou: Hey, Dad. I need to talk to you about something.\\n  Dad: What\\'s up?\\n  Fatou: I\\'m pregnant.\\n  Dad: Oh, Fatou. I\\'m so sorry.\\n  Fatou: No, no, it\\'s okay! I\\'m happy about it, really. It\\'s just that, well, I\\'m not sure what to do next.\\n  Dad: Have you told the father yet?\\n  Fatou: Yeah, he\\'s really supportive and wants to help out however he can.\\n  Dad: That\\'s good. You\\'re lucky to have him be so supportive. But what about you? How are you feeling about all of this?\\n  Fatou: I\\'m scared but mostly excited. This is something I\\'ve always wanted but never thought would happen for me so it feels like a dream come true in a lot of ways. But then there are all these practical considerations and I don\\'t know what to do or where to start with any of it.\\n  Dad: Well, first things first, we need to figure out how you\\'re going to afford everything on your own since insurance probably won\\'t cover much of the pregnancy costs. Do you have any ideas?\\n  Fatou: Not really...I guess I could get a second job or maybe look into government assistance programs?\\n  Dad: Let\\'s look into those options and see what makes the most sense for our family budget-wise. As for everything else, we\\'ll just take it one step at a time and figure it out as we go along\\n  Fatou: That sounds like a good plan. Thanks, Dad.\\n  Dad: Of course, baby. I\\'m always here for you. \\n [HugChat]: Sure, a short story description for this dialog could be: \\n  Fatou tells her dad about the pregnancy because she wants to be a good daughter and she knows he will be supportive. \\n\\n [Human]: And a title? \\n [HugChat]: Sure, a title for this dialog could be: \\n  Fatou tells her dad because Fatou wants to be a good daughter. \\n\\n [Human]: What would be one theme of this story? \\n [HugChat]: One theme of this story could be: \"to be a good daughter\" \\n\\n'}\n"
     ]
    }
   ],
   "source": [
    "print(oid_data[2324])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "bd823a11",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example: oid_data[0]=\n",
      " {'text': 'Human: Can you write short story and dialog based on the theme:\\n  being interested \\n HugChat: Sure, a short story and dialog based on the theme \"being interested\" could be:\\n  Now that she had a specific goal in mind, Jacquelin started looking for a new car. She began by comparing different models online, and then went to test drive them at the dealership. After doing some research and taking some test drives, Jacquelin decided on the perfect car for her.\\n  Jacquelin: I\\'m looking for a new car.\\n  Salesman: What kind of car are you interested in?\\n  Jacquelin: I\\'m not really sure. I just know that I want something different from what I have now.\\n  Salesman: Well, let\\'s take a look at our inventory and see if anything catches your eye.\\n  Jacquelin: Okay.\\n  Salesman: We have a lot of great cars here. What is your budget?\\n  Jacquelin: $15,000.\\n  Salesman: We have a few options within your budget. This Honda Civic is a great choice. It\\'s very reliable and has good gas mileage.\\n  Jacquelin: The Honda Civic is a great choice. I like the way it drives.\\n  Salesman: We also have this Toyota Camry. It\\'s a bit more expensive, but it\\'s a newer model and has more features.\\n  Jacquelin: I like the Toyota Camry. It\\'s a nice car.\\n  Salesman: Great! We\\'ll take a look at both of these options and see which one is the best for you. \\n\\n Human: And a title? \\n HugChat: Sure, a title for this dialog could be: \\n  Jacquelin starts looking. Now Jacquelin feels interested. \\n\\n'}\n",
      "\n",
      "train_num= 3012417\n",
      "test_num= 3012417\n"
     ]
    }
   ],
   "source": [
    "print(\"example: oid_data[0]=\\n\", oid_data[0])\n",
    "print()\n",
    "print(\"train_num=\", len(oid_data))\n",
    "print(\"test_num=\", len(oid_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "3846589e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len(ALL_TRAIN_DATA)= 5465685\n",
      "len(ALL_TEST_DATA)= 6708\n"
     ]
    }
   ],
   "source": [
    "print(\"len(ALL_TRAIN_DATA)=\", len(ALL_TRAIN_DATA))\n",
    "print(\"len(ALL_TEST_DATA)=\", len(ALL_TEST_DATA))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dcb7654c",
   "metadata": {},
   "source": [
    "# MOSS"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 432,
   "id": "d526fb58",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['en_honesty.json', 'zh_honesty.json', 'en_helpfulness.json', 'en_harmlessness.json', 'zh_helpfulness.json']\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "from random import shuffle\n",
    "moss_file_list = [i for i in os.listdir(\"./fudan/\") if i != \".DS_Store\" ]\n",
    "print(moss_file_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 433,
   "id": "7b67f6a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "def read_moss(train_path):\n",
    "    \n",
    "    def process(lines):\n",
    "        new_lines = list()\n",
    "        for ei, line in enumerate(tqdm(lines)):\n",
    "#             if \"en_honesty.json\" in train_path and ei in [6096, 6384]:\n",
    "#                 continue\n",
    "            input_text = line[\"plain_text\"].replace(\"\\n\\n\", \"\\n\")\n",
    "            input_text = input_text.replace(\"<eoh>\", \" \\n\").replace(\"<eoa>\", \" \\n\\n\")\n",
    "            input_text = input_text.replace(\"[MOSS]:\", OUR_NAME)\n",
    "            \n",
    "            input_text = input_text.replace(\"fudannlp@fudan.edu.cn\", \"hugailab@gmail.com or lygwjn@gmail.com\")\n",
    "            input_text = input_text.replace(\"https://fudannlp.org/\", \"https://github.com/HugAILab/\")\n",
    "            input_text = input_text.replace(\"FudanNLP Lab\", \"HugAI Lab\").replace(\"Fudan University\", \"East China Normal University\")\n",
    "            input_text = input_text.replace(\"February 7, 2023\", \"May 2, 2023\")\n",
    "            input_text = input_text.replace(\"moss@fudan.edu.cn\", \"hugailab@gmail.com\").replace(\"fudan.edu.cn/moss\", \"https://github.com/HugAILab/HugNLP\")\n",
    "            input_text = input_text.replace(\"support@fudannlp.com.\", \"hugailab@gmail.com or lygwjn@gmail.com\").replace(\"http://fudannlp.com/moss\", \"https://github.com/HugAILab/HugNLP\")\n",
    "            input_text = input_text.replace(\"support@fudanNLP.com\", \"hugailab@gmail.com or lygwjn@gmail.com\").replace(\"http://www.fudannlp.org/.\", \"https://wjn1996.github.io/.\")\n",
    "            input_text = input_text.replace(\"MOSS\", \"HugChat\").replace(\"2023年2月7日\", \"2023年5月2日\")\n",
    "            input_text = input_text.replace(\"160亿\", \"100亿左右\")\n",
    "            input_text = input_text.replace(\"复旦大学自然语言处理实验室\", \"华东师范大学数据学院\").replace(\"上海人工智能实验室\", \"HugAI Lab\")\n",
    "            input_text = input_text.replace(\"复旦大学\", \"华东师范大学\").replace(\"复旦\", \"华师\")\n",
    "            \n",
    "            input_text_list = input_text.split(\"[Human]\")\n",
    "            new_input_text_list = list()\n",
    "            for tt in input_text_list:\n",
    "                if \"fudan\" in tt or \"shlab\" in tt:\n",
    "                    continue\n",
    "                new_input_text_list.append(tt)\n",
    "            input_text = \"[Human]\".join(new_input_text_list)\n",
    "            if \"fudan\" in input_text:\n",
    "                print(ei)\n",
    "            if \"zhangyue yin\" in input_text:\n",
    "                print(ei)\n",
    "            if \"复旦\" in input_text:\n",
    "                print(ei)\n",
    "            new_lines.append({\n",
    "                \"text\": input_text\n",
    "            })\n",
    "        return new_lines\n",
    "        \n",
    "    with open(train_path, \"r\", encoding=\"utf-8\") as fr:\n",
    "#         train_lines = fr.read().decode(encoding='gbk')\n",
    "#         train_lines = fr.readlines()\n",
    "        train_lines = json.load(fr)\n",
    "#         print(train_lines[0])\n",
    "        train_lines = process(train_lines)\n",
    "    \n",
    "    return train_lines\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 460,
   "id": "0c86f00c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "en_honesty.json\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████| 112580/112580 [00:05<00:00, 20838.37it/s]\n"
     ]
    }
   ],
   "source": [
    "f_n = moss_file_list[0]\n",
    "print(f_n)\n",
    "train_lines = read_moss(\"./fudan/{}\".format(f_n))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 465,
   "id": "69550f1c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text': \"[Human]: How can I measure the effectiveness of my chatbot's responses? \\n [HugChat]: There are several metrics that can be used to measure the effectiveness of a chatbot's responses. These include accuracy, speed, relevance, and usefulness. Accuracy measures how often the chatbot correctly responds to a query or request, while speed measures how quickly the chatbot responds. Relevance measures how closely the chatbot's responses match the user's intent, and usefulness measures the practicality of the response. \\n\\n [Human]: Can you provide some examples of accuracy metrics? \\n [HugChat]: Sure! Accuracy metrics can be measured by comparing the chatbot's response with a manually-created response. For example, one metric you could use is the BLEU score, which measures the similarity between the two outputs. Other metrics include precision, recall, and F1 score. \\n\\n [Human]: How can I improve accuracy? \\n [HugChat]: Improving accuracy can be done by training the chatbot on more data and optimizing the model's hyperparameters. Additionally, you can use data augmentation techniques such as adding noise or synthetically generating data to increase the diversity of the training data. Finally, you can use transfer learning to leverage the knowledge from existing models and datasets. \\n\\n\"}\n"
     ]
    }
   ],
   "source": [
    "print(train_lines[934])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 434,
   "id": "55c30e55",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████| 112580/112580 [00:02<00:00, 50352.22it/s]\n",
      "100%|████████████████████████████████| 142885/142885 [00:01<00:00, 85406.56it/s]\n",
      "100%|████████████████████████████████| 419049/419049 [00:09<00:00, 43786.62it/s]\n",
      "100%|██████████████████████████████████| 38873/38873 [00:00<00:00, 56801.89it/s]\n",
      "100%|████████████████████████████████| 447750/447750 [00:05<00:00, 81726.80it/s]\n"
     ]
    }
   ],
   "source": [
    "moss_data = list()\n",
    "for file_name in moss_file_list:\n",
    "    data = read_moss(\"./fudan/{}\".format(file_name))\n",
    "    moss_data.extend(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 475,
   "id": "75174793",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1161137\n"
     ]
    }
   ],
   "source": [
    "print(len(moss_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 435,
   "id": "8bd7067c",
   "metadata": {},
   "outputs": [],
   "source": [
    "ALL_ZH_TRAIN_DATA.extend(moss_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d38a1219",
   "metadata": {},
   "source": [
    "# MERGE ALL DATA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 440,
   "id": "1223d82d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len(zh)= 3158078\n",
      "len(en)= 1354726\n",
      "len(all)= 4512804\n"
     ]
    }
   ],
   "source": [
    "shuffle(ALL_ZH_TRAIN_DATA)\n",
    "print(\"len(zh)=\", len(ALL_ZH_TRAIN_DATA))\n",
    "shuffle(ALL_EN_TRAIN_DATA)\n",
    "print(\"len(en)=\", len(ALL_EN_TRAIN_DATA))\n",
    "ALL_TRAIN = ALL_ZH_TRAIN_DATA + ALL_EN_TRAIN_DATA\n",
    "shuffle(ALL_TRAIN)\n",
    "print(\"len(all)=\", len(ALL_TRAIN))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 443,
   "id": "6a2cdb05",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text': '[Human]: 给出三个单词，使用它们创建一个句子并输出结果。 \\n单词：猫，鱼，看 \\n [HugChat]: 猫喜欢看鱼。 \\n\\n'}\n"
     ]
    }
   ],
   "source": [
    "print(ALL_ZH_TRAIN_DATA[43])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 441,
   "id": "5d32e5b2",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████| 1354726/1354726 [00:14<00:00, 90345.16it/s]\n"
     ]
    }
   ],
   "source": [
    "with open(\"instruction_en_corpora.json\", \"w\", encoding=\"utf-8\") as fw:\n",
    "    for line in tqdm(ALL_EN_TRAIN_DATA):\n",
    "        json_d = {}\n",
    "        try:\n",
    "            json_d[\"prefix\"] = instruct\n",
    "            json_d[\"text\"] = line[\"text\"]\n",
    "        except:\n",
    "            print(type(line))\n",
    "            break\n",
    "        fw.write(json.dumps(json_d, ensure_ascii=False) + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 442,
   "id": "a4226d82",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████| 3158078/3158078 [00:38<00:00, 82255.05it/s]\n"
     ]
    }
   ],
   "source": [
    "with open(\"instruction_zh_corpora.json\", \"w\", encoding=\"utf-8\") as fw:\n",
    "    for line in tqdm(ALL_ZH_TRAIN_DATA):\n",
    "        json_d = {}\n",
    "        try:\n",
    "            json_d[\"prefix\"] = instruct\n",
    "            json_d[\"text\"] = line[\"text\"]\n",
    "        except:\n",
    "            print(type(line))\n",
    "            break\n",
    "        fw.write(json.dumps(json_d, ensure_ascii=False) + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 444,
   "id": "6eebf0e1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████| 4512804/4512804 [01:07<00:00, 67204.88it/s]\n"
     ]
    }
   ],
   "source": [
    "with open(\"instruction_corpora.json\", \"w\", encoding=\"utf-8\") as fw:\n",
    "    for line in tqdm(ALL_TRAIN):\n",
    "        json_d = {}\n",
    "        try:\n",
    "            json_d[\"prefix\"] = instruct\n",
    "            json_d[\"text\"] = line[\"text\"]\n",
    "        except:\n",
    "            print(type(line))\n",
    "            break\n",
    "        fw.write(json.dumps(json_d, ensure_ascii=False) + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "d3684918",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████| 10000/10000 [00:00<00:00, 116290.08it/s]\n"
     ]
    }
   ],
   "source": [
    "with open(\"instruction_corpora_small.json\", \"w\", encoding=\"utf-8\") as fw:\n",
    "    for line in tqdm(ALL_TRAIN_DATA[:10000]):\n",
    "        json_d = {}\n",
    "        try:\n",
    "            json_d[\"prefix\"] = instruct\n",
    "            json_d[\"text\"] = line[\"text\"]\n",
    "        except:\n",
    "            print(type(line))\n",
    "            break\n",
    "        fw.write(json.dumps(json_d, ensure_ascii=False) + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 459,
   "id": "8976adcc",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████| 1354726/1354726 [00:46<00:00, 29407.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "54477\n",
      "1299211\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "max_len = 0\n",
    "max_str = \"\"\n",
    "small = 0\n",
    "for line in tqdm(ALL_EN_TRAIN_DATA):\n",
    "    if len(line[\"text\"].split(\" \")) < 512:\n",
    "        small += 1\n",
    "    if len(line[\"text\"].split(\" \")) > max_len:\n",
    "        max_str = line[\"text\"]\n",
    "    max_len = max(max_len, len(line[\"text\"].split(\" \")))\n",
    "print(max_len)\n",
    "# print(max_str)\n",
    "print(small)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
