{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2023-11-13T16:03:44.126862300Z",
     "start_time": "2023-11-13T16:03:44.059659200Z"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import re\n",
    "import os\n",
    "import gensim\n",
    "from gensim.corpora import Dictionary\n",
    "\n",
    "import glob\n",
    "from warnings import filterwarnings\n",
    "from pandas import DataFrame, Series\n",
    "from paddlenlp import Taskflow\n",
    "import spacy\n",
    "import logging\n",
    "import enchant\n",
    "import nltk\n",
    "from nltk import word_tokenize, pos_tag, ne_chunk\n",
    "\n",
    "checker = enchant.Dict(\"en_US\")\n",
    "filterwarnings('ignore', category=DeprecationWarning)\n",
    "logging.basicConfig(level=logging.ERROR)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [],
   "source": [
    "class EnDataPreprocessingAndBuildCorpus:\n",
    "    title_texts = list()\n",
    "    title_corpus = list()\n",
    "    desc_text = list()\n",
    "    desc_corpus = list()\n",
    "    # nlp = spacy.load(\"en_core_web_sm\") # 有点不太理想\n",
    "    nlp = spacy.load(\"en_core_web_lg\") # 比上一个要稍好\n",
    "    # nlp = spacy.load(\"en_core_web_md\")  # 这个相比之下是最好的\n",
    "    # nlp = spacy.load(\"en_core_web_trf\") # 差劲\n",
    "\n",
    "    def __init__(self, data_frame: DataFrame):\n",
    "        # 删除重复行\n",
    "        data_frame = data_frame.drop_duplicates(keep='last')\n",
    "        # 去掉缺失值\n",
    "        data_frame = data_frame.dropna(subset=['职位', '任职要求'])\n",
    "        self.df = data_frame\n",
    "        self.df.apply(func=self.run, axis=1)\n",
    "        self.title_texts, self.title_corpus = self.buildCorpus(self.df[\"职位\"])\n",
    "        self.desc_text, self.desc_corpus = self.buildCorpus(self.df[\"任职要求\"])\n",
    "    \n",
    "    # @staticmethod\n",
    "    # def preprocessing(text):\n",
    "    #     named_entities_list = []\n",
    "    # \n",
    "    #     # 分词\n",
    "    #     words = word_tokenize(text)\n",
    "    # \n",
    "    #     # 词性标注\n",
    "    #     tags = pos_tag(words)\n",
    "    # \n",
    "    #     # 使用NLTK的NERChunker进行命名实体识别\n",
    "    #     chunked = ne_chunk(tags)\n",
    "    # \n",
    "    #     # 打印命名实体\n",
    "    #     for subtree in chunked:\n",
    "    #         if type(subtree) == nltk.Tree:\n",
    "    #             entity_text = \" \".join([word for word, tag in subtree.leaves()])\n",
    "    #             if entity_text.strip().endswith(\".\") or entity_text.strip().endswith(\":\"):\n",
    "    #                 word = entity_text.strip()[:-1].strip()\n",
    "    #             else:\n",
    "    #                 word = entity_text.strip()\n",
    "    #             # word1 = self.nlp(word)\n",
    "    #             # if len(word1) == 1:\n",
    "    #             #     if word1[0].lemma_.strip() in my_stop_words and not word1[0].text.isdigit():\n",
    "    #             #         continue\n",
    "    #             # if len(word) <=2 and word.isdigit():\n",
    "    #             #     continue\n",
    "    #             # if word1[0].is_punct or word1[]:\n",
    "    #             #     continue\n",
    "    #             if word not in my_stop_words and word.lower() not in stop_word and word and not word.isdigit():\n",
    "    #                 named_entities_list.append(word)\n",
    "    #             elif word == \"R\":\n",
    "    #                 named_entities_list.append(word)\n",
    "    #     return named_entities_list\n",
    "    def preprocessing(self, text):\n",
    "        \"\"\"\n",
    "        spacy\n",
    "        :param text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        # 命名实体识别\n",
    "        doc = self.nlp(text)\n",
    "        texts = list()\n",
    "        for word in doc.ents:\n",
    "            if word.text.strip().endswith(\".\") or word.text.strip().endswith(\":\"):\n",
    "                word = word.text.strip()[:-1].strip()\n",
    "            else:\n",
    "                word = word.text.strip()\n",
    "            if word not in my_stop_words and word.lower() not in stop_word and word and not word.isdigit():\n",
    "                texts.append(word)\n",
    "            elif word == \"R\":\n",
    "                texts.append(word)\n",
    "        return texts\n",
    "\n",
    "    @staticmethod\n",
    "    def buildCorpus(texts: Series):\n",
    "        # 创建二元语法模型\n",
    "        big_gram = gensim.models.phrases.Phrases(texts)\n",
    "        # 应用二元语法模型\n",
    "        texts = [big_gram[line] for line in texts]\n",
    "        # 词典将所有文本中的单词和短语映射到唯一的数字标识符\n",
    "        dictionary = Dictionary(texts)\n",
    "        # 创建文档-词袋\n",
    "        corpus = [dictionary.doc2bow(text) for text in texts]\n",
    "        return texts, corpus\n",
    "\n",
    "    def run(self, row):\n",
    "        row['任职要求'] = self.preprocessing(row['任职要求'])\n",
    "        row['职位'] = self.preprocessing(row['职位'])\n",
    "        return row\n",
    "\n",
    "    @staticmethod\n",
    "    def save_dfs_to_excel(data: dict):\n",
    "        path = \"../Data/语料库创建result\"\n",
    "        if not os.path.exists(path):\n",
    "            os.mkdir(path)\n",
    "        corpu = EnDataPreprocessingAndBuildCorpus(data['df'])\n",
    "        desc = Series(corpu.desc_text, name=\"任职要求\")\n",
    "        title = Series(corpu.title_texts, name=\"职位\")\n",
    "        df_gram = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        desc = Series(corpu.desc_corpus, name=\"任职要求\")\n",
    "        title = Series(corpu.title_corpus, name=\"职位\")\n",
    "        df_corpus = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        # 创建一个 ExcelWriter 对象\n",
    "        with pd.ExcelWriter(f\"{path}/{data.get('Origin_name')}.xlsx\", engine='xlsxwriter') as writer:\n",
    "            # 将每个 DataFrame 写入不同的工作表\n",
    "            corpu.df.to_excel(writer, sheet_name='命名实体', index=False)\n",
    "            df_gram.to_excel(writer, sheet_name='二元语法', index=False)\n",
    "            df_corpus.to_excel(writer, sheet_name='语料库', index=False)\n",
    "        print(f\"\\t结果已保存到文件 {path}/{data['Origin_name']}.xlsx\")\n",
    "\n",
    "\n",
    "class ZhDataPreprocessingAndBuildCorpus:\n",
    "    title_texts = list()\n",
    "    title_corpus = list()\n",
    "    desc_text = list()\n",
    "    desc_corpus = list()\n",
    "    # nlp = Taskflow(\"ner\", model=\"fast\") # 快速模式 精度下降\n",
    "    nlp = Taskflow(\"ner\")\n",
    "    spacy_nlp = EnDataPreprocessingAndBuildCorpus.nlp\n",
    "\n",
    "    def __init__(self, data_frame: DataFrame):\n",
    "        # 删除重复行\n",
    "        data_frame = data_frame.drop_duplicates(keep='last')\n",
    "        # 去掉缺失值\n",
    "        data_frame = data_frame.dropna(subset=['职位', '任职要求'])\n",
    "        self.df = data_frame\n",
    "        self.df.apply(func=self.run, axis=1)\n",
    "        self.title_texts, self.title_corpus = self.buildCorpus(self.df[\"职位\"])\n",
    "        self.desc_text, self.desc_corpus = self.buildCorpus(self.df[\"任职要求\"])\n",
    "\n",
    "    def preprocessingZh(self, text):\n",
    "        doc = self.nlp(text)\n",
    "        texts = list()\n",
    "        for index, item1 in enumerate(doc):\n",
    "            word, desc = tuple(item1)\n",
    "            word = word.strip()\n",
    "            # if re.search('[\\u4e00-\\u9fa5]+', word):  # 大部分中文\n",
    "            #     if word not in my_stop_words and word not in stop_word and word and len(word) > 2:\n",
    "            #         texts.append(word)\n",
    "            # else:  # 英文\n",
    "            #     if word not in my_stop_words and word.lower() not in stop_word and word and len(word) > 2:\n",
    "            #         texts.append(word)\n",
    "            if word not in my_stop_words and word.lower() not in stop_word and word and len(word) > 2:\n",
    "                    texts.append(word)\n",
    "        return texts\n",
    "    \n",
    "    def preprocessingEn(self, text):\n",
    "        \"\"\"\n",
    "        spacy\n",
    "        :param text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        # 命名实体识别\n",
    "        doc = self.spacy_nlp(text)\n",
    "        texts = list()\n",
    "        for word in doc.ents:\n",
    "            if word.text.strip().endswith(\".\") or word.text.strip().endswith(\":\"):\n",
    "                word = word.text.strip()[:-1].strip()\n",
    "            else:\n",
    "                word = word.text.strip()\n",
    "            if word not in my_stop_words and word.lower() not in stop_word and word and not word.isdigit():\n",
    "                texts.append(word)\n",
    "            elif word == \"R\":\n",
    "                texts.append(word)\n",
    "        return texts\n",
    "\n",
    "    \n",
    "    @staticmethod\n",
    "    def buildCorpus(texts: Series):\n",
    "        # 创建二元语法模型\n",
    "        big_gram = gensim.models.phrases.Phrases(texts)\n",
    "        # 应用二元语法模型\n",
    "        texts = [big_gram[line] for line in texts]\n",
    "        # 词典将所有文本中的单词和短语映射到唯一的数字标识符\n",
    "        dictionary = Dictionary(texts)\n",
    "        # 创建文档-词袋\n",
    "        corpus = [dictionary.doc2bow(text) for text in texts]\n",
    "        return texts, corpus\n",
    "\n",
    "    def run(self, row):\n",
    "        if not len(re.findall('[A-Z]|[a-z]', row['任职要求'])) / len(row['任职要求']) > 0.8:  # 中文的\n",
    "            row['任职要求'] = self.preprocessingZh(row['任职要求'])\n",
    "        else:  # 英文的\n",
    "            row['任职要求'] = self.preprocessingEn(row['任职要求'])\n",
    "        row['职位'] = self.preprocessingZh(row['职位'])\n",
    "        return row\n",
    "\n",
    "    @staticmethod\n",
    "    def save_dfs_to_excel(data: dict):\n",
    "        path = \"../Data/语料库创建result\"\n",
    "        if not os.path.exists(path):\n",
    "            os.mkdir(path)\n",
    "        corpu = ZhDataPreprocessingAndBuildCorpus(data['df'])\n",
    "        desc = Series(corpu.desc_text, name=\"任职要求\")\n",
    "        title = Series(corpu.title_texts, name=\"职位\")\n",
    "        df_gram = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        desc = Series(corpu.desc_corpus, name=\"任职要求\")\n",
    "        title = Series(corpu.title_corpus, name=\"职位\")\n",
    "        df_corpus = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        # 创建一个 ExcelWriter 对象\n",
    "        with pd.ExcelWriter(f\"{path}/{data.get('Origin_name')}.xlsx\", engine='xlsxwriter') as writer:\n",
    "            # 将每个 DataFrame 写入不同的工作表\n",
    "            corpu.df.to_excel(writer, sheet_name='命名实体', index=False)\n",
    "            df_gram.to_excel(writer, sheet_name='二元语法', index=False)\n",
    "            df_corpus.to_excel(writer, sheet_name='语料库', index=False)\n",
    "        print(f\"\\t结果已保存到文件 {path}/{data['Origin_name']}.xlsx\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-11-13T16:03:50.306974700Z",
     "start_time": "2023-11-13T16:03:44.121862900Z"
    }
   },
   "id": "3a917bfb326da169"
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [],
   "source": [
    "# 添加停顿词\n",
    "my_stop_words = [\"岗位\", \"职责\", \"描述\", \"福利\", \"任职要求\", \"工作职责\",\"岗位需求\",\"职责描述\",\"工作地点\",\"技能要求\",\"专业要求\",\"技能要求\",\"学历要求\",\"其他说明\",\"公司介绍\",\"上班地址\",\"上班时间\",\"性格方面\",\"技能方面\",\"经验方面\",\"公司简介\", \"岗位职责\", \"岗位要求\", \"职位福利\", \"福利待遇\",\"任职资格\", \"工作氛围\", \"薪酬福利\", \"任职资格\", \"日常工作\"] + ['role', 'work address', 'education requirement', 'work time', 'year', 'benefit', 'position', 'work atmosphere', 'skill', 'description', 'job requirement', 'responsibility', 'requisition', 'personality', 'professional requirement', 'post', 'salary', 'you', 'other instruction', 'work location', 'job description', 'recommend', 'daily work', 'overview', 'today', 'job', 'skill requirement', 'welfare benefit', 'category', 'job benefit', 'job qualification', 'job responsibility', 'company introduction',\"overview\"] + ['Position', 'Responsibilities', 'Description', 'Benefits', 'Job Requirements', 'Job Responsibilities', 'Job Requirements', 'Job Description', 'Work Location', 'Recommended Skills', 'Professional Requirements', 'Skill Requirements', 'Education Requirements', 'Other Instructions', 'Company Introduction', 'Work Address', 'Work Hours', 'Personality', 'Skills', 'Experience', 'Company Introduction', 'Job Responsibilities', 'Job Requirements', 'Job Benefits', 'Welfare Benefits', 'Qualification', 'Work Atmosphere', 'Salary Benefits', 'Qualification', 'Daily Work'] + [\"At least a\",\"Requisition ID\",\"Today\",\"Summary\",\"g\",\"JOB SUMMARY\",\"Job Description : JOB SUMMARY\",\"tomorrow\", \"years\"]"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-11-13T16:03:50.327491300Z",
     "start_time": "2023-11-13T16:03:50.313437600Z"
    }
   },
   "id": "5f4e51dac0caada9"
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前网站: boss直聘\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\tools_installed\\anaconda\\envs\\bishe\\lib\\site-packages\\paddlenlp\\transformers\\tokenizer_utils_base.py:1865: UserWarning: Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\t结果已保存到文件 ../Data/语料库创建result/boss直聘.xlsx\n",
      "当前网站: CareerBuilder\n",
      "\t结果已保存到文件 ../Data/语料库创建result/CareerBuilder.xlsx\n",
      "当前网站: CIA\n",
      "\t结果已保存到文件 ../Data/语料库创建result/CIA.xlsx\n",
      "当前网站: DNI\n",
      "\t结果已保存到文件 ../Data/语料库创建result/DNI.xlsx\n",
      "当前网站: linkedin\n",
      "\t结果已保存到文件 ../Data/语料库创建result/linkedin.xlsx\n",
      "当前网站: Simplyhired\n",
      "\t结果已保存到文件 ../Data/语料库创建result/Simplyhired.xlsx\n",
      "当前网站: 智联招聘\n",
      "\t结果已保存到文件 ../Data/语料库创建result/智联招聘.xlsx\n"
     ]
    }
   ],
   "source": [
    "stop_word = open(\"../Data/stop_words.txt\", encoding='utf-8').read().split(\"\\n\")\n",
    "\n",
    "files = glob.glob(\"../Data/数据清洗/*\")\n",
    "for file in files:\n",
    "    file_name = file.replace('\\\\', '/').split('/')[-1].split('.')[0]\n",
    "    item = dict()\n",
    "    print(f\"当前网站: {file_name}\")\n",
    "    df = pd.read_csv(file, index_col=0)\n",
    "    if re.search('[\\u4e00-\\u9fa5]', file_name):  # 中文网站\n",
    "        item[\"Origin_name\"] = file_name\n",
    "        item[\"df\"] = df\n",
    "        ZhDataPreprocessingAndBuildCorpus.save_dfs_to_excel(item)\n",
    "    else:  # 英文网站\n",
    "        item[\"Origin_name\"] = file_name\n",
    "        item[\"df\"] = df\n",
    "        EnDataPreprocessingAndBuildCorpus.save_dfs_to_excel(item)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-11-13T16:09:10.023083700Z",
     "start_time": "2023-11-13T16:03:50.326491Z"
    }
   },
   "id": "13cf4cbe529bd027"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
