{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-10-15T08:04:12.138300900Z",
     "start_time": "2023-10-15T08:04:12.069129700Z"
    }
   },
   "outputs": [],
   "source": [
    "import spacy\n",
    "import gensim\n",
    "from gensim.corpora import Dictionary\n",
    "\n",
    "import pandas as pd\n",
    "from pandas import DataFrame, Series\n",
    "import openpyxl\n",
    "import warnings\n",
    "import re\n",
    "import enchant\n",
    "import os\n",
    "\n",
    "warnings.filterwarnings('ignore', category=DeprecationWarning)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3619ac4986f07cca",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-10-15T08:04:12.161891Z",
     "start_time": "2023-10-15T08:04:12.088024100Z"
    },
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "outputs": [],
   "source": [
    "class EnDataClear:\n",
    "    \"\"\"\n",
    "    一个英文数据清洗的 工具类\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, data_frame):\n",
    "        \"\"\"\n",
    "        初始化 工具类\n",
    "        :param data_frame: 传入Dataframe对象\n",
    "        \"\"\"\n",
    "        # 删除重复行\n",
    "        data_frame = data_frame.drop_duplicates(keep='last')\n",
    "        # 去掉缺失值\n",
    "        data_frame = data_frame.dropna(subset=['职位', '任职要求'])\n",
    "        self.df = data_frame\n",
    "\n",
    "    @staticmethod\n",
    "    def convert_fullwidth_to_halfWidth(text):\n",
    "        '''\n",
    "        全角字符统一为半全角字符\n",
    "        '''\n",
    "        halfwidth_chars = \"1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"\n",
    "\n",
    "        # 对应的全角字符Unicode范围\n",
    "        fullwidth_chars = \"１２３４５６７８９０ａｂｃｄｅｆｇｈｉｊｋｌｍｎｏｐｑｒｓｔｕｖｗｘｙｚＡＢＣＤＥＦＧＨＩＪＫＬＭＮＯＰＱＲＳＴＵＶＷＸＹＺ！“＃＄％＆'（）＊＋，－．／：；＜＝＞？＠［＼］＾＿｀｛｜｝～\"\n",
    "\n",
    "        char_mapping = dict()\n",
    "        for i in range(len(halfwidth_chars)):\n",
    "            char_mapping[ord(fullwidth_chars[i])] = halfwidth_chars[i]\n",
    "        return text.translate(char_mapping)\n",
    "\n",
    "    @staticmethod\n",
    "    def chinese_punctuation_to_english(text):\n",
    "        '''\n",
    "        中文标点符号统一为英文\n",
    "        '''\n",
    "        punctuation_mapping = {'，': ',', '。': '.', '？': '?', '！': '!', '；': ';', '：': ':', '“': '\"', '”': '\"', '‘': \"'\",\n",
    "                               '’': \"'\", '&#8203;``oaicite:{\"number\":1,\"invalid_reason\":\"Malformed citation 【': '[',\n",
    "                               '】\"}``&#8203;': ']', '（': '(', '）': ')', '《': '<', '》': '>', '、': ',', '……': '...',\n",
    "                               '·': '.',\n",
    "                               '——': '-'}\n",
    "        # 使用正则表达式替换中文标点符号\n",
    "        for ch, en in punctuation_mapping.items():\n",
    "            text = re.sub(re.escape(ch), en, text)\n",
    "        return text\n",
    "\n",
    "    @staticmethod\n",
    "    def Abbreviation_replacement(text):\n",
    "        # 缩写和它们的扩展 字典\n",
    "        abbreviations = {\"can't\": 'cannot', \"it's\": 'it is', \"I'm\": 'I am', 'gonna': 'going to', 'wanna': 'want to',\n",
    "                         \"shouldn't\": 'should not', \"didn't\": 'did not', \"couldn't\": 'could not', \"doesn't\": 'does not',\n",
    "                         \"won't\": 'will not', \"i'll\": 'I will', \"you'll\": 'you will', \"he'll\": 'he will',\n",
    "                         \"she'll\": 'she will', \"we'll\": 'we will', \"they'll\": 'they will', \"I've\": 'I have',\n",
    "                         \"you've\": 'you have', \"we've\": 'we have', \"they've\": 'they have', \"I'd\": 'I would',\n",
    "                         \"you'd\": 'you would', \"he'd\": 'he would', \"she'd\": 'she would', \"we'd\": 'we would',\n",
    "                         \"they'd\": 'they would', \"haven't\": 'have not', \"hasn't\": 'has not', \"wouldn't\": 'would not',\n",
    "                         \"should've\": 'should have', \"could've\": 'could have', \"might've\": 'might have',\n",
    "                         \"must've\": 'must have', \"Can't\": 'Cannot', \"It's\": 'It is', 'Gonna': 'Going to',\n",
    "                         'Wanna': 'Want to', \"Shouldn't\": 'Should not', \"Didn't\": 'Did not', \"Couldn't\": 'Could not',\n",
    "                         \"Doesn't\": 'Does not', \"Won't\": 'Will not', \"I'll\": 'I will', \"You'll\": 'You will',\n",
    "                         \"He'll\": 'He will', \"She'll\": 'She will', \"We'll\": 'We will', \"They'll\": 'They will',\n",
    "                         \"You've\": 'You have', \"We've\": 'We have', \"They've\": 'They have', \"You'd\": 'You would',\n",
    "                         \"He'd\": 'He would', \"She'd\": 'She would', \"We'd\": 'We would', \"They'd\": 'They would',\n",
    "                         \"Haven't\": 'Have not', \"Hasn't\": 'Has not', \"Wouldn't\": 'Would not',\n",
    "                         \"Should've\": 'Should have',\n",
    "                         \"Could've\": 'Could have', \"Might've\": 'Might have', \"Must've\": 'Must have'}\n",
    "\n",
    "        # 替换缩写\n",
    "        for abbreviation, expansion in abbreviations.items():\n",
    "            # 使用正则表达式确保只替换完整的单词，而不是部分匹配\n",
    "            text = re.sub(r'\\b' + re.escape(abbreviation) + r'\\b', expansion, text)\n",
    "        return text\n",
    "\n",
    "    @staticmethod\n",
    "    def check_spelling(text):\n",
    "        # 创建一个拼写检查器\n",
    "        checker = enchant.Dict(\"en_US\")  # 使用英语字典，可以根据需要选择其他字典\n",
    "        # 将文本分成单词\n",
    "        words = text.split()\n",
    "        # 用于存储纠正后的文本\n",
    "        corrected_text = []\n",
    "        for word in words:\n",
    "            if checker.check(word):\n",
    "                # 如果拼写正确，保留原词\n",
    "                corrected_text.append(word)\n",
    "            else:\n",
    "                # 如果拼写错误，尝试获取建议的正确拼写\n",
    "                suggestions = checker.suggest(word)\n",
    "                if suggestions:\n",
    "                    corrected_text.append(suggestions[0])  # 使用第一个建议的拼写\n",
    "                else:\n",
    "                    corrected_text.append(word)  # 如果没有建议，保留原词\n",
    "        # 将纠正后的单词组合成文本\n",
    "        corrected_text = ' '.join(corrected_text)\n",
    "        return corrected_text\n",
    "\n",
    "    @staticmethod\n",
    "    def replace_special_char(cleaned_text):\n",
    "        \"\"\"\n",
    "        去除特殊字符\n",
    "        :param cleaned_text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "\n",
    "        # def remove_char(text):\n",
    "        #     \"\"\"\n",
    "        #     去掉换行\n",
    "        #     :param text: \n",
    "        #     :return: \n",
    "        #     \"\"\"\n",
    "        #     while True:\n",
    "        #         char = re.findall(\"\\n+\", text, re.S)\n",
    "        #         if char:\n",
    "        #             for i in char:\n",
    "        #                 text = r\"{}\".format(text).replace(i, \" \")\n",
    "        #         else:\n",
    "        #             break\n",
    "        #     return \" \".join([i.strip() for i in text.split()])\n",
    "\n",
    "        # 使用正则表达式删除以 \"@\" 开头的邮箱地址\n",
    "        cleaned_text = re.sub(r'@\\w+\\.\\w+', ' ', cleaned_text)\n",
    "        # 去掉一些例如： #123 \n",
    "        cleaned_text = re.sub(r\"#\\d+|^\\d+Job\", \" \", cleaned_text)\n",
    "        cleaned_text = re.sub(r\"#|\\*\", \" \", cleaned_text)\n",
    "        # 去除URLs\n",
    "        cleaned_text = re.sub(r'https?://\\S+|www\\.\\S+', ' ', cleaned_text)\n",
    "        # 去除HTML标记\n",
    "        pattern = re.compile('<.*?>')\n",
    "        cleaned_text = re.sub(pattern, ' ', cleaned_text)\n",
    "        # 去掉一些特殊字符\n",
    "        cleaned_text = re.sub(\"\\]|\\\\|\\||\\'|\\{|\\}|<|=|~|\\)|>|\\[|\\(|/\", ' ', cleaned_text)\n",
    "        # 一些其他操作\n",
    "        cleaned_text = re.sub('\\s+-\\s+', \"-\", cleaned_text)\n",
    "        # 去除 非ASCII字符\n",
    "        cleaned_text = ''.join([char for char in cleaned_text if ord(char) < 128])\n",
    "        # 去掉换行 和多余的空格\n",
    "        cleaned_text = \" \".join([word.strip() for word in cleaned_text.split()])\n",
    "        return cleaned_text\n",
    "\n",
    "    def run(self, text):\n",
    "        \"\"\"\n",
    "        文本清洗\n",
    "        :param text: \n",
    "        \"\"\"\n",
    "        # 中英文 标点符号统一  中文符号转换为英文\n",
    "        text = self.chinese_punctuation_to_english(text)\n",
    "        # 全角字符统一为半全角字符\n",
    "        text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "        # 替换缩写\n",
    "        text = self.Abbreviation_replacement(text)\n",
    "        # 去除特殊字符\n",
    "        text = self.replace_special_char(text)\n",
    "        # 拼写检查\n",
    "        # text = self.check_spelling(text)\n",
    "        return text\n",
    "\n",
    "    def data_clear(self, row):\n",
    "        \"\"\"\n",
    "        清洗文本的工具函数\n",
    "        :param row: self.df 的每一行 \n",
    "        :return: 返回处理后的 self.df每一行 \n",
    "        \"\"\"\n",
    "        row['任职要求'] = self.run(row['任职要求'])\n",
    "        row['职位'] = self.run(row['职位'])\n",
    "        return row\n",
    "\n",
    "    def main_(self) -> DataFrame:\n",
    "        \"\"\"\n",
    "        执行函数\n",
    "        :return: 文本清洗后的 df \n",
    "        \"\"\"\n",
    "        self.df.apply(func=self.data_clear, axis=1)\n",
    "        return self.df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec46ef3626226f1b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-10-15T08:04:15.794422700Z",
     "start_time": "2023-10-15T08:04:15.774323Z"
    },
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "outputs": [],
   "source": [
    "class EnDataPreprocessingAndBuildCorpus:\n",
    "    title_texts = list()\n",
    "    title_corpus = list()\n",
    "    desc_text = list()\n",
    "    desc_corpus = list()\n",
    "    nlp = spacy.load(\"en_core_web_sm\")\n",
    "\n",
    "    def __init__(self, data_frame: DataFrame):\n",
    "        self.df = data_frame\n",
    "        self.df.apply(func=self.run, axis=1)\n",
    "        self.title_texts, self.title_corpus = self.buildCorpus(self.df[\"职位\"])\n",
    "        self.desc_text, self.desc_corpus = self.buildCorpus(self.df[\"任职要求\"])\n",
    "        \n",
    "    def preprocessing(self, text):\n",
    "        # doc = self.nlp(text)\n",
    "        # texts = list()\n",
    "        # # 数据预处理 词干化、去停顿词和词性还原\n",
    "        # for word in doc:\n",
    "        #     if word.text != '\\n' and not word.is_stop and word.lemma_.strip() not in my_stop_words:\n",
    "        #         if word.lemma_.strip():\n",
    "        #             texts.append(word.lemma_.strip())\n",
    "\n",
    "        # # 命名实体识别\n",
    "        # doc = self.nlp(\" \".join(texts))\n",
    "        doc = self.nlp(text)\n",
    "        texts = list()\n",
    "        for word in doc.ents:\n",
    "            if word.text not in my_stop_words and len(word.text) > 2 and not word.text.isdigit():\n",
    "                texts.append(word.text)\n",
    "        return texts\n",
    "\n",
    "    @staticmethod\n",
    "    def buildCorpus(texts: Series):\n",
    "        # 创建二元语法模型\n",
    "        big_gram = gensim.models.phrases.Phrases(texts)\n",
    "        # 应用二元语法模型\n",
    "        texts = [big_gram[line] for line in texts]\n",
    "        # 词典将所有文本中的单词和短语映射到唯一的数字标识符\n",
    "        dictionary = Dictionary(texts)\n",
    "        # 创建文档-词袋\n",
    "        corpus = [dictionary.doc2bow(text) for text in texts]\n",
    "        return texts, corpus\n",
    "\n",
    "    def run(self, row):\n",
    "        row['任职要求'] = self.preprocessing(row['任职要求'])\n",
    "        row['职位'] = self.preprocessing(row['职位'])\n",
    "        return row\n",
    "\n",
    "    @staticmethod\n",
    "    def save_dfs_to_excel(item:dict):    \n",
    "        path = \"../Data/语料库创建result\"\n",
    "        if not os.path.exists(path):\n",
    "            os.mkdir(path)\n",
    "        corpu = EnDataPreprocessingAndBuildCorpus(item['df'])\n",
    "        desc = Series(corpu.desc_text, name=\"任职要求\")\n",
    "        title = Series(corpu.title_texts, name=\"职位\")\n",
    "        df_gram = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        desc = Series(corpu.desc_corpus, name=\"任职要求\")\n",
    "        title = Series(corpu.title_corpus, name=\"职位\")\n",
    "        df_corpus = pd.concat([title, desc], axis=1)\n",
    "        \n",
    "         # 创建一个 ExcelWriter 对象\n",
    "        with pd.ExcelWriter(f\"{path}/{item.get('Origin_name')}.xlsx\", engine='xlsxwriter') as writer:\n",
    "            # 将每个 DataFrame 写入不同的工作表\n",
    "            corpu.df.to_excel(writer, sheet_name='命名实体', index=False)\n",
    "            df_gram.to_excel(writer, sheet_name='二元语法', index=False)\n",
    "            df_corpus.to_excel(writer, sheet_name='语料库', index=False)\n",
    "        print(f\"\\t结果已保存到文件 {path}/{item['Origin_name']}.xlsx\")\n",
    "            \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5bbef97b4e028efe",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-10-15T08:04:15.771119600Z",
     "start_time": "2023-10-15T08:04:15.752468600Z"
    },
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    }
   },
   "outputs": [],
   "source": [
    "# 添加停顿词\n",
    "my_stop_words = ['Recommended', 'Skills', 'Role', 'Category', 'DescriptionRequisition', 'Job', 'position', 'overview',\n",
    "                 'responsibility', \"one\", \"Today\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d92afb74d934665",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-10-15T08:04:15.753468Z",
     "start_time": "2023-10-15T08:04:12.097039200Z"
    },
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "workbook = openpyxl.load_workbook('../Data/数据汇总.xlsx')\n",
    "\n",
    "stopList = open(\"../Data/stop_words.txt\", 'r', encoding='utf-8').read().split('\\n')\n",
    "\n",
    "for sheet_name in workbook.sheetnames:\n",
    "    item = dict()\n",
    "    print(f\"当前网站: {sheet_name}\")\n",
    "    df = pd.read_excel('../Data/数据汇总.xlsx', sheet_name=sheet_name, index_col=0)\n",
    "    if re.search('[\\u4e00-\\u9fa5]', sheet_name):  # 中文网站\n",
    "        pass\n",
    "    else:  # 英文网站\n",
    "        df = EnDataClear(data_frame=df).main_()\n",
    "        item[\"Origin_name\"] = sheet_name\n",
    "        item[\"df\"] = df\n",
    "        EnDataPreprocessingAndBuildCorpus.save_dfs_to_excel(item)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
