{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d6c422b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c361f9b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import jieba"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "6b959afb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import fasttext"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "bb41b67d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Warning : `load_model` does not return WordVectorModel or SupervisedModel any more, but a `FastText` object which is very similar.\n"
     ]
    }
   ],
   "source": [
    "fasttext_model = fasttext.load_model('../../dataset/fastText/cc.zh.300.bin')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c42f76fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "cat_uy2cn_df = pd.read_csv('../datasets/商品分类关键词_维汉对照.csv', encoding='utf-8-sig', header=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "e71c61cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "cat_cn_df = pd.read_csv('../datasets/商品分类关键词_汉语.csv', encoding='utf-8-sig', header=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "b6d1a47a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>keywords_uy</th>\n",
       "      <th>keywords_cn</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>ماللار تۈرى</td>\n",
       "      <td>货品</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>ئۆي جابدۇقلىرى</td>\n",
       "      <td>家具</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>شەخسىي ساغلاملىق</td>\n",
       "      <td>个人健康</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>كىيىم-زىبۇزىننەت</td>\n",
       "      <td>服饰</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>ئېلېكتىر سايمانلىرى</td>\n",
       "      <td>电器</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "           keywords_uy keywords_cn\n",
       "0          ماللار تۈرى          货品\n",
       "1       ئۆي جابدۇقلىرى          家具\n",
       "2     شەخسىي ساغلاملىق        个人健康\n",
       "3     كىيىم-زىبۇزىننەت          服饰\n",
       "4  ئېلېكتىر سايمانلىرى          电器"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cat_uy2cn_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "82ca956a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成维语对照的关键词列表\n",
    "keywords_uy = cat_uy2cn_df['keywords_uy'].tolist()\n",
    "keywords_cn = cat_uy2cn_df['keywords_cn'].tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "424784f3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['ماللار تۈرى',\n",
       " 'ئۆي جابدۇقلىرى',\n",
       " 'شەخسىي ساغلاملىق',\n",
       " 'كىيىم-زىبۇزىننەت',\n",
       " 'ئېلېكتىر سايمانلىرى']"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "keywords_uy[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "5adf2293",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['货品', '家具', '个人健康', '服饰', '电器']"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "keywords_cn[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "12be9b6e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成汉语的关键词列表\n",
    "keywords_cn2 = cat_cn_df['keywords'].tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "169d0c8d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['游戏话费', '服装鞋包', '手机数码', '家用电器', '美妆饰品']"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "keywords_cn2[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "0fd88538",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取单词词向量\n",
    "def get_word_vector(word, model=fasttext_model):\n",
    "\n",
    "    word_vector = model.get_word_vector(word)\n",
    "\n",
    "    return word_vector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "8be4dd8e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取某句句向量\n",
    "def get_sentence_vector(sentence):\n",
    "\n",
    "    cut_words = jieba.lcut(sentence)\n",
    "\n",
    "    sentence_vector = None\n",
    "    \n",
    "    for word in cut_words:\n",
    "        word_vector = get_word_vector(word)\n",
    "\n",
    "        if sentence_vector is not None:\n",
    "            sentence_vector += word_vector\n",
    "        else:\n",
    "            sentence_vector = word_vector\n",
    "\n",
    "    sentence_vector = sentence_vector / len(cut_words)\n",
    "\n",
    "    return sentence_vector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "b5abb8c1",
   "metadata": {},
   "outputs": [],
   "source": [
    "def cos_sim(vector_a, vector_b):\n",
    "    \"\"\"\n",
    "    计算两个向量之间的余弦相似度\n",
    "    :param vector_a: 向量 a\n",
    "    :param vector_b: 向量 b\n",
    "    :return: sim\n",
    "    \"\"\"\n",
    "    vector_a = np.mat(vector_a)\n",
    "    vector_b = np.mat(vector_b)\n",
    "    num = float(vector_a * vector_b.T)\n",
    "    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)\n",
    "    cos = num / denom\n",
    "    sim = 0.5 + 0.5 * cos\n",
    "    return sim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "5c8ce0be",
   "metadata": {},
   "outputs": [],
   "source": [
    "uy_cn_keymap = []\n",
    "uy_cn_unkeymap = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f047f2e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Dumping model to file cache C:\\Users\\Alexpad\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 1.403 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "D:\\envs\\Python\\Python37\\lib\\site-packages\\ipykernel_launcher.py:12: RuntimeWarning: invalid value encountered in true_divide\n",
      "  if sys.path[0] == '':\n"
     ]
    }
   ],
   "source": [
    "# 遍历计算匹配最相似的中文文本，建立 维语-中文映射\n",
    "for uy, cn in zip(keywords_uy, keywords_cn):\n",
    "    a = get_sentence_vector(cn)\n",
    "    max_sim = 0\n",
    "    most_sim_word = ''\n",
    "    for cn2 in keywords_cn2:\n",
    "        b = get_sentence_vector(cn2)\n",
    "        if cos_sim(a, b) > max_sim:\n",
    "            # print(max_sim, cos_sim(a,b), cn, cn2)\n",
    "            most_sim_word = cn2\n",
    "            max_sim = cos_sim(a, b)\n",
    "        if max_sim >= 0.8:\n",
    "            uy_cn_keymap.append([uy, cn, most_sim_word, max_sim])\n",
    "            print(uy, cn, most_sim_word, max_sim)\n",
    "    if max_sim < 0.8:\n",
    "        uy_cn_unkeymap.append([uy, cn, most_sim_word, max_sim])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70622d90",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "8f6ab731",
   "metadata": {},
   "outputs": [],
   "source": [
    "keywords_uy2cn_df.to_csv('../datasets/123.csv', encoding='utf-8-sig', header=0, index = False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
