{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import findspark\n",
    "findspark.init()\n",
    "import pyspark\n",
    "from pyspark.sql import SparkSession\n",
    "from pyspark.sql.functions import col, locate, regexp_extract, array_position, udf, expr, when\n",
    "from pyspark.sql.types import StructType, StructField, StringType, ArrayType"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "spark = SparkSession.builder \\\n",
    "    .appName(\"Spark NLP\")\\\n",
    "    .config(\"spark.driver.memory\",\"20g\")\\\n",
    "    .config(\"spark.driver.maxResultSize\",\"20g\")\\\n",
    "    .config(\"spark.jars\", \"spark-nlp-assembly-3.3.4.jar\")\\\n",
    "    .getOrCreate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------------------------+--------------------+-------------------------+--------------------+----------------------+\n",
      "|                             sentences| pinyin_without_tone|pinyin_without_tone_split|    pinyin_with_tone|pinyin_with_tone_split|\n",
      "+--------------------------------------+--------------------+-------------------------+--------------------+----------------------+\n",
      "|              就这样两个人一起度过的话|'ou', 'e', 'an', ...|     [4, 9, 15, 21, 26...|'ou4', 'e4', 'an4...|  [5, 11, 18, 25, 3...|\n",
      "|                                  重复|          'ong', 'u'|                      [5]|        'ong2', 'u4'|                   [6]|\n",
      "|              问交通指示灯一切会好转吗|'en', 'ao', 'ong'...|     [4, 10, 17, 23, 2...|'en4', 'ao1', 'on...|  [5, 12, 20, 27, 3...|\n",
      "|                    天呐现在才早上七点|'ian', 'a', 'ian'...|     [5, 10, 17, 23, 2...|'ian1', 'a4', 'ia...|  [6, 12, 20, 27, 3...|\n",
      "|                  先生请坐我来帮您介绍|'ian', 'en', 'in'...|     [5, 11, 17, 22, 2...|'ian1', 'en', 'in...|  [6, 12, 19, 25, 3...|\n",
      "|                              春告風吹|'en', 'ao', 'en',...|              [4, 10, 16]|'en1', 'ao4', 'en...|           [5, 12, 19]|\n",
      "|                  风在等着蝴蝶平息怒火|'en', 'ai', 'en',...|     [4, 10, 16, 22, 2...|'en1', 'ai4', 'en...|  [5, 12, 19, 26, 3...|\n",
      "|                                  大切|           'a', 'ie'|                      [3]|         'a4', 'ie1'|                   [4]|\n",
      "|                    我想去到安静的地方|'o', 'an', 'i', '...|     [3, 9, 14, 20, 26...|'o3', 'an3', 'i4'...|  [4, 11, 17, 24, 3...|\n",
      "|                      给我全身心投入啊|'ei', 'o', 'an', ...|     [4, 9, 15, 21, 27...|'ei3', 'o3', 'an2...|  [5, 11, 18, 25, 3...|\n",
      "|                            真叫人安心|'en', 'ao', 'en',...|          [4, 10, 16, 22]|'en1', 'ao4', 'en...|       [5, 12, 19, 26]|\n",
      "|                            要是我的话|'ao', 'II', 'o', ...|          [4, 10, 15, 20]|'ao4', 'II', 'o3'...|       [5, 11, 17, 22]|\n",
      "|            剪断天使之翼现在我无比残破|'ian', 'an', 'ian...|     [5, 11, 18, 24, 3...|'ian3', 'an4', 'i...|  [6, 13, 21, 28, 3...|\n",
      "|                        感受不到我自己|'an', 'ou', 'u', ...|     [4, 10, 15, 21, 2...|'an3', 'ou4', 'u4...|  [5, 12, 18, 25, 3...|\n",
      "|                题为爱情但满篇只有伤痛|'i', 'ei', 'ai', ...|     [3, 9, 15, 21, 27...|'i2', 'ei2', 'ai4...|  [4, 11, 18, 25, 3...|\n",
      "|            全都翘掉以后发行的首张唱片|'an', 'ou', 'ao',...|     [4, 10, 16, 22, 2...|'an2', 'ou1', 'ao...|  [5, 12, 19, 26, 3...|\n",
      "|在你进入我生命之前一切都只是单调的黑白|'ai', 'i', 'in', ...|     [4, 9, 15, 20, 25...|'ai4', 'i3', 'in4...|  [5, 11, 18, 24, 3...|\n",
      "|                最好的东西不过是你的吻|'ei', 'ao', 'e', ...|     [4, 10, 15, 22, 2...|'ei4', 'ao3', 'e'...|  [5, 12, 17, 25, 3...|\n",
      "|                                君何好|     'vn', 'e', 'ao'|                   [4, 9]|  'vn1', 'e2', 'ao3'|               [5, 11]|\n",
      "|                                君届音|    'vn', 'ie', 'in'|                  [4, 10]| 'vn1', 'ie4', 'in1'|               [5, 12]|\n",
      "+--------------------------------------+--------------------+-------------------------+--------------------+----------------------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df = spark.read.format(\"orc\").load(\"rhyme/sentences.orc\").repartition(304).cache()\n",
    "df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pypinyin import lazy_pinyin, Style\n",
    "\n",
    "from typing import List, Tuple\n",
    "#from phase import word_parser\n",
    "import time\n",
    "import re\n",
    "def word_parser(sentence: str) -> Tuple[List[str], List[str]]:\n",
    "    pinyins = lazy_pinyin(sentence)\n",
    "    rhys = pinyin_parser(pinyins)\n",
    "    rhys_tone = add_tone(sentence, rhys)\n",
    "    return rhys, rhys_tone\n",
    "\n",
    "def add_tone(sentence, ryhs):\n",
    "    pinyins_tone = lazy_pinyin(sentence, style=Style.FINALS_TONE3)\n",
    "    result = []\n",
    "    for i, ryh in enumerate(ryhs):\n",
    "        if pinyins_tone[i][-1] == '1' or pinyins_tone[i][-1] == '2' or pinyins_tone[i][-1] == '3' or pinyins_tone[i][-1] == '4':\n",
    "            result.append(ryh+pinyins_tone[i][-1])\n",
    "        else:\n",
    "            result.append(ryh)\n",
    "    return result\n",
    "\n",
    "\n",
    "def pinyin_parser(pinyins: List[str]) -> List[List[str]]:\n",
    "    parsed_pinyins = []\n",
    "    for pinyin in pinyins:\n",
    "        pinyin = pinyin_correction(pinyin)\n",
    "        consonant, vowel = split_cv(pinyin)\n",
    "        parsed_pinyins.append(weak_rhyme(vowel_parser(vowel)))\n",
    "    return parsed_pinyins\n",
    "\n",
    "\n",
    "def pinyin_correction(pinyin: str) -> str:\n",
    "\n",
    "    # z/c/s + i -> z/c/s + I\n",
    "    if re.match(r'[zcs]i$', pinyin):\n",
    "        return pinyin.replace('i', 'I')\n",
    "\n",
    "    # zh/ch/sh/r + i -> zh/ch/sh/r + II\n",
    "    elif re.match(r'(?:zh|ch|sh|r)i$', pinyin):\n",
    "        return pinyin.replace('i', 'II')\n",
    "\n",
    "    # j/q/x/y + u/ue/un/uan -> j/q/x + v/ve/vn/van | v/ve/vn/van\n",
    "    # yu == yi\n",
    "    elif re.match(r'[jqxy]u', pinyin):\n",
    "        if pinyin.endswith('u'):\n",
    "            return re.sub(r'y*(.+)', r'\\1', pinyin.replace('u', 'i'))\n",
    "        return re.sub(r'y*(.+)', r'\\1', pinyin.replace('u', 'v'))\n",
    "\n",
    "    # y + a/e/ao/ou/an/ in/iang/ing/iong -> ia/ie/iao/iou/ian/ in/iang/ing/iong\n",
    "    elif pinyin.startswith(\"y\"):\n",
    "        return re.sub(r'yi*(.*)', r'i\\1', pinyin)\n",
    "\n",
    "    # w + u/a/o/ai/ei/an/en/ang/eng        -> u/ua/uo/uai/uei/uan/uen/uang/ueng\n",
    "    elif pinyin.startswith(\"w\"):\n",
    "        return re.sub(r'wu*(.*)', r'u\\1', pinyin)\n",
    "\n",
    "    # qiu -> qiou\n",
    "    elif pinyin.endswith('iu'):\n",
    "        return pinyin.replace('iu', 'iou')\n",
    "\n",
    "    # cui -> cuei\n",
    "    elif pinyin.endswith('ui'):\n",
    "        return pinyin.replace('ui', 'uei')\n",
    "\n",
    "    # lun -> luen\n",
    "    elif pinyin.endswith('un'):\n",
    "        return pinyin.replace('un', 'uen')\n",
    "\n",
    "\n",
    "    return pinyin\n",
    "\n",
    "\n",
    "def split_cv(pinyin):\n",
    "    return re.findall(r'(ch|zh|sh|[^aeiIouv])*(.+)', pinyin)[0]\n",
    "\n",
    "\n",
    "def vowel_parser(vowel: str) -> str:\n",
    "    if vowel == 'van':\n",
    "        return 'an'\n",
    "\n",
    "    elif len(vowel) > 1 and vowel[0] == 'u':\n",
    "        return vowel[1:]\n",
    "\n",
    "    # for 'i', except 'in' and 'ing' (one vowel), 'ie' and 'ian' (sound different from 'e' and 'an')\n",
    "    elif len(vowel) > 1 and vowel[0] == 'i' and vowel[1] != 'n' and vowel[1] != 'e' and vowel[1:] != 'an':\n",
    "        return vowel[1:]\n",
    "\n",
    "    return vowel\n",
    "\n",
    "def weak_rhyme(pinyin):\n",
    "    if pinyin.endswith('eng'):\n",
    "        return pinyin.replace('eng', 'en')\n",
    "\n",
    "    elif pinyin.endswith('ang'):\n",
    "        return pinyin.replace('ang', 'an')\n",
    "\n",
    "    elif pinyin.endswith('ing'):\n",
    "        return pinyin.replace('ing', 'in')\n",
    "\n",
    "    elif pinyin.endswith('ui'):\n",
    "        return pinyin.replace('ui', 'ei')\n",
    "\n",
    "    elif pinyin.endswith('ie'):\n",
    "        return pinyin.replace('ui', 'ei')\n",
    "\n",
    "    elif pinyin.endswith('ue'):\n",
    "        return pinyin.replace('ue', 'ei')\n",
    "\n",
    "    return pinyin\n",
    "\n",
    "\n",
    "def find_rhyme_without_tone(word):\n",
    "    word_pinyin = str(word_parser(word)[0])[1:-1]\n",
    "    filtered = df.filter(df.pinyin_without_tone.contains(word_pinyin))\n",
    "    sentences = filtered.select(df.sentences, locate(word_pinyin, df.pinyin_without_tone).alias('position'),\n",
    "                                df.pinyin_without_tone_split)\n",
    "\n",
    "    sentences = sentences.withColumn('find_result', expr(\n",
    "        \"IF(position==1, 0, array_position(pinyin_without_tone_split, position - 3) + 1)\"))\n",
    "    sentences = sentences.withColumn('rhyme', expr(f\"substring(sentences, find_result, {len(word)})\"))\n",
    "    # start = time.time()\n",
    "    # sentences.show()\n",
    "    return sentences.groupBy(\"rhyme\").count().sort('count', ascending=False).limit(100)\n",
    "\n",
    "\n",
    "def find_rhyme_with_tone(word):\n",
    "    word_pinyin = str(word_parser(word)[1])[1:-1]\n",
    "    filtered = df.filter(df.pinyin_with_tone.contains(word_pinyin))\n",
    "    sentences = filtered.select(df.sentences, locate(word_pinyin, df.pinyin_with_tone).alias('position'),\n",
    "                                df.pinyin_with_tone_split)\n",
    "\n",
    "    sentences = sentences.withColumn('find_result', expr(\n",
    "        \"IF(position==1, 0, array_position(pinyin_with_tone_split, position - 3) + 1)\"))\n",
    "    sentences = sentences.withColumn('rhyme', expr(f\"substring(sentences, find_result, {len(word)})\"))\n",
    "    return sentences.groupBy(\"rhyme\").count().filter(\"count > 5\").sort('count', ascending=False)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "71.4 ms ± 4.27 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
     ]
    }
   ],
   "source": [
    "word = \"哭泣\"\n",
    "rhyme = find_rhyme_with_tone(word)\n",
    "%timeit rhyme.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sparknlp\n",
    "from sparknlp.base import *\n",
    "from sparknlp.annotator import *\n",
    "from pyspark.ml import Pipeline\n",
    "# First extract the prerequisites for the NerDLModel\n",
    "documentAssembler = DocumentAssembler() \\\n",
    "    .setInputCol(\"rhyme\") \\\n",
    "    .setOutputCol(\"document\")\n",
    "\n",
    "sentence = SentenceDetector() \\\n",
    "    .setInputCols([\"document\"]) \\\n",
    "    .setOutputCol(\"sentence\")\n",
    "\n",
    "tokenizer = Tokenizer() \\\n",
    "    .setInputCols([\"sentence\"]) \\\n",
    "    .setOutputCol(\"token\")\n",
    "\n",
    "embeddings = BertEmbeddings.load('bert_wwm')\\\n",
    "    .setInputCols(['document', 'token']) \\\n",
    "    .setOutputCol('embeddings')\n",
    "\n",
    "\n",
    "pipeline = Pipeline().setStages([\n",
    "    documentAssembler,\n",
    "    sentence,\n",
    "    tokenizer,\n",
    "    embeddings,\n",
    "])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('哭泣', 1.000000000000001),\n",
       " ('哭去', 0.9999999999998415),\n",
       " ('孤寂', 0.9106094062519385),\n",
       " ('孤立', 0.9106094062519385),\n",
       " ('孤僻', 0.9106094062519385),\n",
       " ('诛地', 0.8984363049143628),\n",
       " ('污吏', 0.8863447884969013),\n",
       " ('污迹', 0.8863447648824224),\n",
       " ('屋去', 0.8842628276471044),\n",
       " ('茱莉', 0.8790351474640392),\n",
       " ('茱丽', 0.879035125328728),\n",
       " ('估计', 0.8529082271893977),\n",
       " ('乎预', 0.8522491218551917),\n",
       " ('乎意', 0.8522491123630471),\n",
       " ('呼气', 0.82932505634988),\n",
       " ('呼吁', 0.82932505634988),\n",
       " ('夫系', 0.8171877899472931),\n",
       " ('粗气', 0.8067973078284715),\n",
       " ('初遇', 0.7960032460182322),\n",
       " ('朱丽', 0.7948772779205953),\n",
       " ('朱蒂', 0.7948772779205953),\n",
       " ('朱莉', 0.794877236506355),\n",
       " ('出気', 0.7754349559718157),\n",
       " ('出细', 0.7754349234799179),\n",
       " ('出狱', 0.775434882237714),\n",
       " ('出第', 0.775434882237714),\n",
       " ('出气', 0.775434882237714),\n",
       " ('出戏', 0.775434882237714),\n",
       " ('出去', 0.7754348612590334),\n",
       " ('出必', 0.7754348612590334),\n",
       " ('出力', 0.7754348612590334),\n",
       " ('出意', 0.7754348612590334),\n",
       " ('出地', 0.7754348612590334),\n",
       " ('出巨', 0.7754348612590334),\n",
       " ('出记', 0.7754348435102866)]"
      ]
     },
     "execution_count": 96,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result = pipeline.fit(rhyme).transform(rhyme)\n",
    "embedding = result.select(result.rhyme, result.embeddings).collect()\n",
    "\n",
    "word_df = spark.createDataFrame([[word]]).toDF(\"rhyme\")\n",
    "result = pipeline.fit(word_df).transform(word_df)\n",
    "word_embedding = result.select(result.rhyme, result.embeddings).collect()\n",
    "\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "def get_cosine_similarity(feature_vec_1, feature_vec_2):\n",
    "    return cosine_similarity([feature_vec_1], [feature_vec_2])[0][0]\n",
    "\n",
    "similarities = [get_cosine_similarity(i.embeddings[0].embeddings, word_embedding[0].embeddings[0].embeddings) for i in embedding]\n",
    "similarities_dict = {}\n",
    "for i in range(len(similarities)):\n",
    "    similarities_dict[embedding[i].rhyme] = similarities[i]\n",
    "similarities_dict = sorted(similarities_dict.items(), key=lambda item: item[1], reverse=True)\n",
    "similarities_dict"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
