{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "refined-burton",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "continent-angola",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_file_name_list_from_dir_path(path):\n",
    "    res = []\n",
    "    for f in os.listdir(path):\n",
    "        if not f.endswith(\"~\") or not f == \"\":      # 返回指定的文件夹包含的文件或文件夹的名字的列表\n",
    "            res.append(f)\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "concrete-humanitarian",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_json_data_from_file_name(file_name,path_name='word_data'):\n",
    "    # word_1.json\n",
    "    if \".json\" not in file_name:\n",
    "        return 1/0\n",
    "    temp_file_name = file_name\n",
    "    temp_json_str = \"\"\n",
    "    with open(\"./{}/{}\".format(path_name,temp_file_name),\"r\",encoding='utf-8') as f:\n",
    "        temp_json_str = f.readline()\n",
    "    temp_json_data = json.loads(temp_json_str)\n",
    "    return temp_json_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "criminal-footage",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_json_data_main_sx(json_data):\n",
    "    bookWordInfo_keys_list = json_data[\"bookWordInfo\"].keys()\n",
    "    \n",
    "    res = {}\n",
    "\n",
    "    # 单词 wordName\n",
    "    wordName = json_data[\"bookWordInfo\"][\"wordName\"]\n",
    "    #print(\"wordName : \" ,wordName)\n",
    "    res[\"wordName\"] = wordName\n",
    "\n",
    "    # 单词id wordId\n",
    "    wordId = json_data[\"wordId\"]\n",
    "    #print(\"wordId : \" ,wordId)\n",
    "    res[\"wordId\"] = wordId\n",
    "\n",
    "    # 单词组id groupId\n",
    "    groupId = json_data[\"groupId\"]\n",
    "    #print(\"groupId : \" ,groupId)\n",
    "    res[\"groupId\"] = groupId\n",
    "\n",
    "    # 单词单元id unitId\n",
    "    unitId = json_data[\"unitId\"]\n",
    "    #print(\"unitId : \" ,unitId)\n",
    "    res[\"unitId\"] = unitId\n",
    "\n",
    "    # 单词章节id chapterId\n",
    "    chapterId = json_data[\"chapterId\"]\n",
    "    #print(\"chapterId : \" ,chapterId)\n",
    "    res[\"chapterId\"] = chapterId\n",
    "\n",
    "    # 音标的列表 enSymbols_list\n",
    "    enSymbols_list=[]\n",
    "    enSymbols_list_len = len(json_data[\"bookWordInfo\"][\"bookWordSymbolsList\"])\n",
    "    for i in range(enSymbols_list_len):\n",
    "        enSymbols_list.append(json_data[\"bookWordInfo\"][\"bookWordSymbolsList\"][i][\"enSymbols\"])\n",
    "    #print(\"enSymbols_list : \" ,enSymbols_list)\n",
    "    res[\"enSymbols_list\"] = enSymbols_list\n",
    "\n",
    "    # 意思的列表 means_list\n",
    "    means_list = []\n",
    "    means_list_len = len(json_data[\"bookWordInfo\"][\"bookWordPartsList\"])\n",
    "    for i in range(means_list_len):\n",
    "        means_list.append(json_data[\"bookWordInfo\"][\"bookWordPartsList\"][i][\"means\"])\n",
    "    #print(\"means_list : \" ,means_list)\n",
    "    res[\"means_list\"] = means_list\n",
    "\n",
    "    # 单词属性 wordType\n",
    "    # 是单独的单词还是派生出来的单词\n",
    "    # \"WORD\" => 单独的单词 不需要\n",
    "    # 'DERIVE' => 派生出来的单词 需要找到他的父单词 继承父单词的其他属性\n",
    "    # \"EXPAND\" => 单独的单词 不需要\n",
    "    wordType = json_data[\"bookWordInfo\"][\"wordType\"]\n",
    "    #print(\"wordType : \" ,wordType)\n",
    "    res[\"wordType\"] = wordType\n",
    "    \n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "painful-disorder",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_json_data_other_sx(json_data):\n",
    "    bookWordInfo_keys_list = json_data[\"bookWordInfo\"].keys()\n",
    "    \n",
    "    res = {}\n",
    "    \n",
    "    # 派生的列表 derives_list\n",
    "    if \"bookWordDerives\" in bookWordInfo_keys_list:\n",
    "        derives_list = []\n",
    "        derives_list_len = len(json_data[\"bookWordInfo\"][\"bookWordDerives\"])\n",
    "        for i in range(derives_list_len):\n",
    "            temp_word = json_data[\"bookWordInfo\"][\"bookWordDerives\"][i][\"bookWordInfoVo\"]\n",
    "            temp_wordName = temp_word[\"wordName\"]\n",
    "            # 音标的列表 temp_enSymbols_list\n",
    "            temp_enSymbols_list=[]\n",
    "            temp_enSymbols_list_len = len(temp_word[\"bookWordSymbolsList\"])\n",
    "            for i in range(temp_enSymbols_list_len):\n",
    "                temp_enSymbols_list.append(temp_word[\"bookWordSymbolsList\"][i][\"enSymbols\"])\n",
    "            # print(\"temp_enSymbols_list : \" ,temp_enSymbols_list)\n",
    "            # 意思的列表 temp_means_list\n",
    "            temp_means_list = []\n",
    "            temp_means_list_len = len(temp_word[\"bookWordPartsList\"])\n",
    "            for i in range(temp_means_list_len):\n",
    "                temp_means_list.append(temp_word[\"bookWordPartsList\"][i][\"means\"])\n",
    "            #print(\"temp_means_list : \" ,temp_means_list)\n",
    "\n",
    "            temp_word_list = [temp_wordName,temp_enSymbols_list,temp_means_list]\n",
    "            derives_list.append(temp_word_list)\n",
    "        #print(\"derives_list : \" ,derives_list)\n",
    "        res[\"derives_list\"] = derives_list\n",
    "\n",
    "    # 考点的列表 exams_list\n",
    "    if \"bookWordExams\" in bookWordInfo_keys_list:\n",
    "        exams_list = []\n",
    "        exams_list_len = len(json_data[\"bookWordInfo\"][\"bookWordExams\"])\n",
    "        for i in range(exams_list_len):\n",
    "            temp_exams = json_data[\"bookWordInfo\"][\"bookWordExams\"][i]\n",
    "\n",
    "            temp_enSentences = temp_exams[\"enSentences\"]\n",
    "            temp_chTranslation = temp_exams[\"chTranslation\"]\n",
    "            temp_sourceDesc = temp_exams[\"sourceDesc\"]\n",
    "\n",
    "            temp_i_list = [temp_enSentences,temp_chTranslation,temp_sourceDesc]\n",
    "            exams_list.append(temp_i_list)\n",
    "        #print(\"exams_list : \" ,exams_list)\n",
    "        res[\"exams_list\"] = exams_list\n",
    "\n",
    "    # 记忆的列表 mnemonics_list\n",
    "    if \"bookWordMnemonics\" in bookWordInfo_keys_list:\n",
    "        mnemonics_list = []\n",
    "        mnemonics_list_len = len(json_data[\"bookWordInfo\"][\"bookWordMnemonics\"])\n",
    "        for i in range(mnemonics_list_len):\n",
    "            temp_mnemonics = json_data[\"bookWordInfo\"][\"bookWordMnemonics\"][i]\n",
    "\n",
    "            temp_charNumber = temp_mnemonics[\"charNumber\"]\n",
    "            #print(temp_charNumber)\n",
    "            start_index = int(temp_charNumber.split(\"-\")[0])-1\n",
    "            if len(temp_charNumber.split(\"-\"))==1:\n",
    "                end_index = start_index+1\n",
    "            else:\n",
    "                end_index = int(temp_charNumber.split(\"-\")[1])\n",
    "                \n",
    "            # 单词 wordName\n",
    "            wordName = json_data[\"bookWordInfo\"][\"wordName\"]\n",
    "\n",
    "            temp_charNumber = wordName[start_index:end_index]\n",
    "\n",
    "            temp_mnemonicDesc = temp_mnemonics[\"mnemonicDesc\"]\n",
    "\n",
    "            temp_i_list = [temp_charNumber,temp_mnemonicDesc]\n",
    "            mnemonics_list.append(temp_i_list)\n",
    "        #print(\"mnemonics_list : \" ,mnemonics_list)\n",
    "        res[\"mnemonics_list\"] = mnemonics_list\n",
    "\n",
    "    # 例子的列表 examples_list\n",
    "    if \"bookWordExamples\" in bookWordInfo_keys_list:\n",
    "        examples_list = []\n",
    "        examples_list_len = len(json_data[\"bookWordInfo\"][\"bookWordExamples\"])\n",
    "        for i in range(examples_list_len):\n",
    "            temp_examples = json_data[\"bookWordInfo\"][\"bookWordExamples\"][i]\n",
    "\n",
    "            temp_enSentences = temp_examples[\"enSentences\"]\n",
    "            temp_chTranslation = temp_examples[\"chTranslation\"]\n",
    "            temp_sourceDesc = temp_examples[\"sourceDesc\"]\n",
    "\n",
    "            temp_i_list = [temp_enSentences,temp_chTranslation,temp_sourceDesc]\n",
    "            examples_list.append(temp_i_list)\n",
    "        #print(\"examples_list : \" ,examples_list)\n",
    "        res[\"examples_list\"] = examples_list\n",
    "\n",
    "\n",
    "    # 拓展的列表 bookWordExpands\n",
    "    if \"bookWordExpands\" in bookWordInfo_keys_list:\n",
    "        expands_list = []\n",
    "        expands_list_len = len(json_data[\"bookWordInfo\"][\"bookWordExpands\"])\n",
    "        for i in range(expands_list_len):\n",
    "            temp_word = json_data[\"bookWordInfo\"][\"bookWordExpands\"][i]\n",
    "            if \"bookWordInfoVo\" in temp_word.keys():\n",
    "                temp_word = temp_word[\"bookWordInfoVo\"]\n",
    "                temp_wordName = temp_word[\"wordName\"]\n",
    "                # 音标的列表 temp_enSymbols_list\n",
    "                temp_enSymbols_list=[]\n",
    "                temp_enSymbols_list_len = len(temp_word[\"bookWordSymbolsList\"])\n",
    "                for i in range(temp_enSymbols_list_len):\n",
    "                    temp_enSymbols_list.append(temp_word[\"bookWordSymbolsList\"][i][\"enSymbols\"])\n",
    "                # print(\"temp_enSymbols_list : \" ,temp_enSymbols_list)\n",
    "                # 意思的列表 temp_means_list\n",
    "                temp_means_list = []\n",
    "                temp_means_list_len = len(temp_word[\"bookWordPartsList\"])\n",
    "                for i in range(temp_means_list_len):\n",
    "                    temp_means_list.append(temp_word[\"bookWordPartsList\"][i][\"means\"])\n",
    "                #print(\"temp_means_list : \" ,temp_means_list)\n",
    "\n",
    "                temp_word_list = [temp_wordName,temp_enSymbols_list,temp_means_list]\n",
    "                expands_list.append(temp_word_list)\n",
    "            else:\n",
    "                temp_expandDesc = temp_word[\"expandDesc\"]\n",
    "                expands_list.append(temp_expandDesc)\n",
    "        #print(\"expands_list : \" ,expands_list)\n",
    "        res[\"expands_list\"] = expands_list\n",
    "        \n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "typical-gather",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_word_map_from_file_name(file_name):\n",
    "    temp_file_name = file_name\n",
    "    \n",
    "    res = {}\n",
    "    \n",
    "    json_data_1 = get_json_data_from_file_name(file_name=temp_file_name)\n",
    "    main_sx = get_json_data_main_sx(json_data_1)\n",
    "    if main_sx[\"wordType\"] != \"WORD\":\n",
    "        # 需要去找父单词\n",
    "        # print(temp_file_name)\n",
    "        try:\n",
    "            present_word_wordType = json_data_1[\"bookWordInfo\"][\"bookWordFamilyVos\"][0][\"bookWordInfoVo\"][\"wordType\"]\n",
    "            \n",
    "            if present_word_wordType == \"WORD\":\n",
    "                present_word_id = json_data_1[\"bookWordInfo\"][\"bookWordFamilyVos\"][0][\"bookWordInfoVo\"][\"wordId\"]\n",
    "            else:\n",
    "                a = 1/0\n",
    "            present_word_json_data = get_json_data_from_file_name(file_name=\"word_{}.json\".format(present_word_id))\n",
    "            other_sx = get_json_data_other_sx(present_word_json_data)\n",
    "        except:\n",
    "            other_sx = {}\n",
    "    else:\n",
    "        other_sx = get_json_data_other_sx(json_data_1)\n",
    "\n",
    "    #print(\"main_sx : \",main_sx)\n",
    "    #print(\"other_sx : \",other_sx)\n",
    "    \n",
    "    res[\"main_sx\"] = main_sx\n",
    "    res[\"other_sx\"] = other_sx\n",
    "    \n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "peaceful-priest",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 把文件的名称用wordid命名\n",
    "\n",
    "data_dir = \"./data/\"\n",
    "file_name_list = get_file_name_list_from_dir_path(data_dir)\n",
    "\n",
    "for i in file_name_list:\n",
    "    temp_i = get_json_data_from_file_name(file_name=i,path_name='data')\n",
    "    wordId = temp_i[\"wordId\"]\n",
    "    \n",
    "    json_str_utf8 = json.dumps(temp_i,ensure_ascii=False)\n",
    "    f2 = open('./word_data/word_{}.json'.format(wordId),'w',encoding='utf-8')\n",
    "    f2.write(json_str_utf8)\n",
    "    f2.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "alternative-addiction",
   "metadata": {},
   "outputs": [],
   "source": [
    "word_data_dir = \"./word_data/\"\n",
    "word_data_file_name_list = get_file_name_list_from_dir_path(word_data_dir)\n",
    "\n",
    "# json_data = get_json_data_from_file_name(file_name=file_name_list[0])\n",
    "# get_word_map_from_file_name(file_name=\"word_400.json\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "disturbed-portsmouth",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获得所有单词的列表\n",
    "zong_data_list = []\n",
    "for i in word_data_file_name_list:\n",
    "    temp_i = get_word_map_from_file_name(file_name=i)\n",
    "    zong_data_list.append(temp_i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "respiratory-partner",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "4721"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(zong_data_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "independent-staff",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 按照单词为key 变成字典\n",
    "zong_data_dic = {}\n",
    "for i in zong_data_list:\n",
    "    wordName = i[\"main_sx\"][\"wordName\"]\n",
    "    zong_data_dic[wordName] = i"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "continued-practitioner",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存成一个文件 zong_data_json.json\n",
    "json_str = json.dumps(zong_data_dic,sort_keys =True)\n",
    "f2 = open('zong_data_json.json','w',encoding='utf-8')\n",
    "f2.write(json_str)\n",
    "f2.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "external-israel",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存成一个文件 zong_data_json_utf8.json\n",
    "json_str_utf8 = json.dumps(zong_data_dic,sort_keys =True,ensure_ascii=False)\n",
    "f2 = open('zong_data_json_utf8.json','w',encoding='utf-8')\n",
    "f2.write(json_str_utf8)\n",
    "f2.close()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
