{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "from text_former import CleanedText\n",
    "import gensim\n",
    "from gensim import models,corpora\n",
    "import matplotlib.pyplot as plt\n",
    "import pyLDAvis.gensim_models\n",
    "from tqdm import tqdm\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def KeepProperpy(text:str, property_dict:dict, target_property:list, word_seperator:str=' ', judge_func=None) -> str:\n",
    "    try:\n",
    "        words = text.split(word_seperator)\n",
    "    except AttributeError as err:\n",
    "        print('Using text %s\\n'%text)\n",
    "        raise err\n",
    "    ret_words = []\n",
    "    for wd in words:\n",
    "        if wd == '':\n",
    "            continue\n",
    "        if wd not in property_dict:\n",
    "            raise KeyError('Word \"%s\" not in given dict.'%wd)\n",
    "        if cmp_func is None:\n",
    "            if property_dict[wd] in target_property:\n",
    "                ret_words.append(wd)\n",
    "        else:\n",
    "            if judge_func(property_dict[wd]):\n",
    "                ret_words.append(wd)\n",
    "    if ret_words == []:\n",
    "        return ''\n",
    "    return ' '.join(ret_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def keepWord(text:str, target_words:list, word_seperator:str=' ') -> str:\n",
    "    try:\n",
    "        words = text.split(word_seperator)\n",
    "    except AttributeError as err:\n",
    "        print('Using text %s\\n'%text)\n",
    "        raise err\n",
    "    ret_words = [wd for wd in words if wd in target_words]\n",
    "    return ' '.join(ret_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "def LDAFunc(cleaned_corpus:list, num_topics:int, word_seperator:str=' '):\n",
    "    words = [text.split(word_seperator) for text in cleaned_corpus]\n",
    "    words = [[i for i in l if i != ''] for l in words]\n",
    "    \n",
    "    dct = corpora.Dictionary(words)\n",
    "    corpus = [dct.doc2bow(wds) for wds in words]\n",
    "    lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dct, num_topics=num_topics, random_state=2022)\n",
    "    \n",
    "    return lda, corpus, dct"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./input_data/artists.txt', 'r', encoding='utf-8') as f:\n",
    "    artists = f.readlines()\n",
    "    artists = [i.strip(' \\n').split(' ')[0] for i in artists]\n",
    "with open('./input_data/repertory.txt', 'r', encoding='utf-8') as f:\n",
    "    repertory = f.readlines()\n",
    "    repertory = [i.strip(' \\n').split(' ')[0] for i in repertory]\n",
    "with open('./input_data/cliques.txt', 'r', encoding='utf-8') as f:\n",
    "    cliques = f.readlines()\n",
    "    cliques = [i.strip(' \\n').split(' ')[0] for i in cliques]\n",
    "with open('./input_data/vocations.txt', 'r', encoding='utf-8') as f:\n",
    "    vocations = f.readlines()\n",
    "    vocations = [i.strip(' \\n').split(' ')[0] for i in vocations]\n",
    "\n",
    "key_words = { 'artists':artists, 'repertory':repertory, 'cliques':cliques, 'vocations':vocations }\n",
    "\n",
    "terms = artists + repertory + cliques + vocations\n",
    "\n",
    "target_tags = [\n",
    "    'n', 'nr', 'ns', 'nt',\n",
    "    'nw', 'nz', 'PER', 'LOC', 'ORG'\n",
    "]\n",
    "\n",
    "paths = {\n",
    "    'artist_info':'./input_data/BaiduBaike/artists_info.csv', \n",
    "    'repertory_info':'./input_data/BaiduBaike/repertory_info.csv',\n",
    "    'bilibili_comment':'./input_data/Bilibili/jingju_comment.csv'\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2337"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(artists + repertory + cliques + vocations)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['rpid', 'oid', 'type', 'mid', 'count', 'ctime', 'like', 'uname', 'sex',\n",
       "       'current_level', 'vipType', 'vipStatus', 'message', 'has_folded',\n",
       "       'is_folded'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv(paths['bilibili_comment'])\n",
    "df.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = df[['oid', 'ctime', 'like', 'uname', 'sex', 'message']]\n",
    "df_by_oid = dict(tuple(df.groupby('oid')))\n",
    "df_by_uname = dict(tuple(df.groupby('uname')))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "word_property = CleanedText(' '.join(list(df['message'])))[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "message_by_oid = [' '.join(i['message']) for i in df_by_oid.values()]\n",
    "message_by_uname = [' '.join(i['message']) for i in df_by_uname.values()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "temp_df = pd.read_csv('./bili_cleaned_msg_by_oid.csv')\n",
    "cleaned_by_oid = list(temp_df['message'])\n",
    "\n",
    "temp_df = pd.read_csv('./bili_cleaned_msg_by_uname.csv')\n",
    "cleaned_by_uname = list(temp_df['message'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "cleaned_by_oid = [KeepProperpy(i, word_property, target_tags) for i in cleaned_by_oid]\n",
    "cleaned_by_uname = [KeepProperpy(i, word_property, target_tags) for i in cleaned_by_uname]\n",
    "\n",
    "cleaned_by_oid = [i for i in cleaned_by_oid if i != '']\n",
    "cleaned_by_uname = [i for i in cleaned_by_uname if i != '']\n",
    "\n",
    "words_oid = [i.split(' ') for i in cleaned_by_oid]\n",
    "words_uname = [i.split(' ') for i in cleaned_by_uname]\n",
    "\n",
    "words_oid = [[i for i in l if i != ''] for l in words_oid]\n",
    "words_uname = [[i for i in l if i != ''] for l in words_uname]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "cleaned_by_oid_a = [KeepProperpy(i, word_property, ['a', 'ad', 'an']) for i in cleaned_by_oid]\n",
    "cleaned_by_uname_a = [KeepProperpy(i, word_property, ['a', 'ad', 'an']) for i in cleaned_by_uname]\n",
    "\n",
    "para_oid = LDAFunc(cleaned_by_oid_a, 2)\n",
    "para_uname = LDAFunc(cleaned_by_uname_a, 2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Iridesent\\AppData\\Roaming\\Python\\Python38\\site-packages\\pyLDAvis\\_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only.\n",
      "  default_term_info = default_term_info.sort_values(\n"
     ]
    }
   ],
   "source": [
    "v_oid_a = pyLDAvis.gensim_models.prepare(*para_oid)\n",
    "pyLDAvis.save_html(v_oid_a, 'bililibi_by-oid_a_2.html')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Iridesent\\AppData\\Roaming\\Python\\Python38\\site-packages\\pyLDAvis\\_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only.\n",
      "  default_term_info = default_term_info.sort_values(\n"
     ]
    }
   ],
   "source": [
    "v_uname_a = pyLDAvis.gensim_models.prepare(*para_uname)\n",
    "pyLDAvis.save_html(v_uname_a, 'bililibi_by-uname_a_2.html')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------- by oid --------\n",
      "\n",
      "(0, '0.016*\"京剧\" + 0.013*\"热词\" + 0.010*\"文化\" + 0.009*\"视频\" + 0.008*\"感觉\"')\n",
      "(1, '0.015*\"京剧\" + 0.010*\"感觉\" + 0.008*\"文化\" + 0.008*\"老板\" + 0.007*\"戏曲\"')\n",
      "(2, '0.014*\"京剧\" + 0.014*\"感觉\" + 0.013*\"文化\" + 0.010*\"戏曲\" + 0.008*\"原神\"')\n",
      "(3, '0.017*\"热词\" + 0.015*\"文化\" + 0.012*\"原神\" + 0.012*\"京剧\" + 0.011*\"眼睛\"')\n",
      "\n",
      "--------- by uname --------\n",
      "\n",
      "(0, '0.023*\"热词\" + 0.014*\"眼睛\" + 0.014*\"视频\" + 0.012*\"游戏\" + 0.012*\"辣\"')\n",
      "(1, '0.024*\"文化\" + 0.023*\"京剧\" + 0.015*\"感觉\" + 0.013*\"藏狐\" + 0.010*\"戏曲\"')\n"
     ]
    }
   ],
   "source": [
    "dict_oid = corpora.Dictionary(words_oid)\n",
    "dict_uname = corpora.Dictionary(words_uname)\n",
    "\n",
    "corpus_oid = [dict_oid.doc2bow(words) for words in words_oid]\n",
    "corpus_uname = [dict_uname.doc2bow(words) for words in words_uname]\n",
    "\n",
    "LDA_oid = models.ldamodel.LdaModel(corpus=corpus_oid, id2word=dict_oid, num_topics=4, random_state=2022)\n",
    "LDA_uname = models.ldamodel.LdaModel(corpus=corpus_uname, id2word=dict_uname, num_topics=2, random_state=2022)\n",
    "\n",
    "print('--------- by oid --------\\n')\n",
    "for topic in LDA_oid.print_topics(num_words=5):\n",
    "    print(topic)\n",
    "print('\\n--------- by uname --------\\n')\n",
    "for topic in LDA_uname.print_topics(num_words=5):\n",
    "    print(topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "infer_oid = LDA_oid.inference(corpus_oid)[0]\n",
    "infer_uname = LDA_uname.inference(corpus_uname)[0]\n",
    "\n",
    "labels_oid = [np.argmax(values) for values in infer_oid]\n",
    "labels_uname = [np.argmax(values) for values in infer_uname]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Iridesent\\AppData\\Roaming\\Python\\Python38\\site-packages\\pyLDAvis\\_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only.\n",
      "  default_term_info = default_term_info.sort_values(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Serving to http://127.0.0.1:8889/    [Ctrl-C to exit]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "127.0.0.1 - - [28/Mar/2022 16:52:43] \"GET / HTTP/1.1\" 200 -\n",
      "127.0.0.1 - - [28/Mar/2022 16:52:45] code 404, message Not Found\n",
      "127.0.0.1 - - [28/Mar/2022 16:52:45] \"GET /favicon.ico HTTP/1.1\" 404 -\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "stopping Server...\n"
     ]
    }
   ],
   "source": [
    "v_oid = pyLDAvis.gensim_models.prepare(LDA_oid, corpus_oid, dict_oid)\n",
    "pyLDAvis.save_html(v_oid, 'bililibi_by-oid_n.html')\n",
    "\n",
    "v_uname = pyLDAvis.gensim_models.prepare(LDA_uname, corpus_uname, dict_uname)\n",
    "pyLDAvis.save_html(v_uname, 'bililibi_by-uname_n.html')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "words_terms_oid = [[i for i in l if i in terms] for l in words_oid]\n",
    "words_terms_uname = [[i for i in l if i in terms] for l in words_uname]\n",
    "\n",
    "dict_terms_oid = corpora.Dictionary(words_terms_oid)\n",
    "dict_terms_uname = corpora.Dictionary(words_terms_uname)\n",
    "\n",
    "corpus_terms_oid = [dict_terms_oid.doc2bow(words) for words in words_terms_oid]\n",
    "corpus_terms_uname = [dict_terms_uname.doc2bow(words) for words in words_terms_uname]\n",
    "\n",
    "LDA_terms_oid = models.ldamodel.LdaModel(corpus=corpus_terms_oid, id2word=dict_terms_oid, num_topics=4, random_state=2022)\n",
    "LDA_terms_uname = models.ldamodel.LdaModel(corpus=corpus_terms_uname, id2word=dict_terms_uname, num_topics=2, random_state=2022)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Iridesent\\AppData\\Roaming\\Python\\Python38\\site-packages\\pyLDAvis\\_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only.\n",
      "  default_term_info = default_term_info.sort_values(\n",
      "C:\\Users\\Iridesent\\AppData\\Roaming\\Python\\Python38\\site-packages\\pyLDAvis\\_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only.\n",
      "  default_term_info = default_term_info.sort_values(\n"
     ]
    }
   ],
   "source": [
    "v_terms_oid = pyLDAvis.gensim_models.prepare(LDA_terms_oid, corpus_terms_oid, dict_terms_oid)\n",
    "pyLDAvis.save_html(v_terms_oid, 'bililibi_by-oid_terms.html')\n",
    "\n",
    "v_terms_uname = pyLDAvis.gensim_models.prepare(LDA_terms_uname, corpus_terms_uname, dict_terms_uname)\n",
    "pyLDAvis.save_html(v_terms_uname, 'bililibi_by-uname_terms.html')"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "2bf03f580add3df3b73aa432a034fa0d1684914ab36a2c2b375a4b7444952bf0"
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
