{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2023-10-18T11:42:47.788752500Z",
     "start_time": "2023-10-18T11:42:47.747752Z"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import re\n",
    "import os\n",
    "import gensim\n",
    "from gensim.corpora import Dictionary\n",
    "\n",
    "import openpyxl\n",
    "from warnings import filterwarnings\n",
    "from pandas import DataFrame, Series\n",
    "from paddlenlp import Taskflow\n",
    "import spacy\n",
    "import logging\n",
    "\n",
    "filterwarnings('ignore', category=DeprecationWarning)\n",
    "logging.basicConfig(level=logging.ERROR)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "class EnDataClear:\n",
    "    \"\"\"\n",
    "    一个英文数据清洗的 工具类\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, data_frame):\n",
    "        \"\"\"\n",
    "        初始化 工具类\n",
    "        :param data_frame: 传入Dataframe对象\n",
    "        \"\"\"\n",
    "        # 删除重复行\n",
    "        data_frame = data_frame.drop_duplicates(keep='last')\n",
    "        # 去掉缺失值\n",
    "        data_frame = data_frame.dropna(subset=['职位', '任职要求'])\n",
    "        self.df = data_frame\n",
    "\n",
    "    @staticmethod\n",
    "    def convert_fullwidth_to_halfWidth(text):\n",
    "        \"\"\"\n",
    "        全角字符统一为半全角字符\n",
    "        \"\"\"\n",
    "        halfwidth_chars = \"1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"\n",
    "\n",
    "        # 对应的全角字符Unicode范围\n",
    "        fullwidth_chars = \"１２３４５６７８９０ａｂｃｄｅｆｇｈｉｊｋｌｍｎｏｐｑｒｓｔｕｖｗｘｙｚＡＢＣＤＥＦＧＨＩＪＫＬＭＮＯＰＱＲＳＴＵＶＷＸＹＺ！“＃＄％＆'（）＊＋，－．／：；＜＝＞？＠［＼］＾＿｀｛｜｝～\"\n",
    "\n",
    "        char_mapping = dict()\n",
    "        for i in range(len(halfwidth_chars)):\n",
    "            char_mapping[ord(fullwidth_chars[i])] = halfwidth_chars[i]\n",
    "        return text.translate(char_mapping)\n",
    "\n",
    "    @staticmethod\n",
    "    def chinese_punctuation_to_english(text):\n",
    "        \"\"\"\n",
    "        中文标点符号统一为英文\n",
    "        \"\"\"\n",
    "        punctuation_mapping = {'，': ',', '。': '.', '？': '?', '！': '!', '；': ';', '：': ':', '“': '\"', '”': '\"', '‘': \"'\",\n",
    "                               '’': \"'\", '&#8203;``oaicite:{\"number\":1,\"invalid_reason\":\"Malformed citation 【': '[',\n",
    "                               '】\"}``&#8203;': ']', '（': '(', '）': ')', '《': '<', '》': '>', '、': ',', '……': '...',\n",
    "                               '·': '.',\n",
    "                               '——': '-'}\n",
    "        # 使用正则表达式替换中文标点符号\n",
    "        for ch, en in punctuation_mapping.items():\n",
    "            text = re.sub(re.escape(ch), en, text)\n",
    "        return text\n",
    "\n",
    "    @staticmethod\n",
    "    def Abbreviation_replacement(text):\n",
    "        # 缩写和它们的扩展 字典\n",
    "        abbreviations = {\"can't\": 'cannot', \"it's\": 'it is', \"I'm\": 'I am', 'gonna': 'going to', 'wanna': 'want to',\n",
    "                         \"shouldn't\": 'should not', \"didn't\": 'did not', \"couldn't\": 'could not', \"doesn't\": 'does not',\n",
    "                         \"won't\": 'will not', \"i'll\": 'I will', \"you'll\": 'you will', \"he'll\": 'he will',\n",
    "                         \"she'll\": 'she will', \"we'll\": 'we will', \"they'll\": 'they will', \"I've\": 'I have',\n",
    "                         \"you've\": 'you have', \"we've\": 'we have', \"they've\": 'they have', \"I'd\": 'I would',\n",
    "                         \"you'd\": 'you would', \"he'd\": 'he would', \"she'd\": 'she would', \"we'd\": 'we would',\n",
    "                         \"they'd\": 'they would', \"haven't\": 'have not', \"hasn't\": 'has not', \"wouldn't\": 'would not',\n",
    "                         \"should've\": 'should have', \"could've\": 'could have', \"might've\": 'might have',\n",
    "                         \"must've\": 'must have', \"Can't\": 'Cannot', \"It's\": 'It is', 'Gonna': 'Going to',\n",
    "                         'Wanna': 'Want to', \"Shouldn't\": 'Should not', \"Didn't\": 'Did not', \"Couldn't\": 'Could not',\n",
    "                         \"Doesn't\": 'Does not', \"Won't\": 'Will not', \"I'll\": 'I will', \"You'll\": 'You will',\n",
    "                         \"He'll\": 'He will', \"She'll\": 'She will', \"We'll\": 'We will', \"They'll\": 'They will',\n",
    "                         \"You've\": 'You have', \"We've\": 'We have', \"They've\": 'They have', \"You'd\": 'You would',\n",
    "                         \"He'd\": 'He would', \"She'd\": 'She would', \"We'd\": 'We would', \"They'd\": 'They would',\n",
    "                         \"Haven't\": 'Have not', \"Hasn't\": 'Has not', \"Wouldn't\": 'Would not',\n",
    "                         \"Should've\": 'Should have',\n",
    "                         \"Could've\": 'Could have', \"Might've\": 'Might have', \"Must've\": 'Must have'}\n",
    "\n",
    "        # 替换缩写\n",
    "        for abbreviation, expansion in abbreviations.items():\n",
    "            # 使用正则表达式确保只替换完整的单词，而不是部分匹配\n",
    "            text = re.sub(r'\\b' + re.escape(abbreviation) + r'\\b', expansion, text)\n",
    "        return text\n",
    "\n",
    "    @staticmethod\n",
    "    def replace_special_char(cleaned_text):\n",
    "        \"\"\"\n",
    "        去除特殊字符\n",
    "        :param cleaned_text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        # 使用正则表达式删除以 \"@\" 开头的邮箱地址\n",
    "        cleaned_text = re.sub(r'@\\w+\\.\\w+', ' ', cleaned_text)\n",
    "        # 去掉一些例如： #123 \n",
    "        cleaned_text = re.sub(r\"#\\d+|^\\d+Job\", \" \", cleaned_text)\n",
    "        cleaned_text = re.sub(r\"#|\\*\", \" \", cleaned_text)\n",
    "        # 去除URLs\n",
    "        cleaned_text = re.sub(r'https?://\\S+|www\\.\\S+', ' ', cleaned_text)\n",
    "        # 去除HTML标记\n",
    "        pattern = re.compile('<.*?>')\n",
    "        cleaned_text = re.sub(pattern, ' ', cleaned_text)\n",
    "        # 去掉一些特殊字符\n",
    "        cleaned_text = re.sub(\"\\]|\\\\|\\||\\\"|\\{|\\}|<|=|~|\\)|>|\\[|\\(|/\", ' ', cleaned_text)\n",
    "        # 一些其他操作\n",
    "        cleaned_text = re.sub('\\s+-\\s+', \"-\", cleaned_text)\n",
    "        cleaned_text = re.sub('\\\\\\\\n', \" \", cleaned_text)\n",
    "        cleaned_text = re.sub('\\n', \" \", cleaned_text)\n",
    "        cleaned_text = re.sub('\\\\\\\\', \" \", cleaned_text)\n",
    "        # 去除 非ASCII字符\n",
    "        cleaned_text = ''.join([char for char in cleaned_text if ord(char) < 128])\n",
    "        # 去掉换行 和多余的空格\n",
    "        cleaned_text = \" \".join([word.strip() for word in cleaned_text.split()])\n",
    "        return cleaned_text\n",
    "\n",
    "    def run(self, text):\n",
    "        \"\"\"\n",
    "        文本清洗\n",
    "        :param text: \n",
    "        \"\"\"\n",
    "        # 中英文 标点符号统一  中文符号转换为英文\n",
    "        text = self.chinese_punctuation_to_english(text)\n",
    "        # 全角字符统一为半全角字符\n",
    "        text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "        # 替换缩写\n",
    "        text = self.Abbreviation_replacement(text)\n",
    "        # 去除特殊字符\n",
    "        text = self.replace_special_char(text)\n",
    "        return text\n",
    "\n",
    "    def data_clear(self, row):\n",
    "        \"\"\"\n",
    "        清洗文本的工具函数\n",
    "        :param row: self.df 的每一行 \n",
    "        :return: 返回处理后的 self.df每一行 \n",
    "        \"\"\"\n",
    "        row['任职要求'] = self.run(row['任职要求'])\n",
    "        row['职位'] = self.run(row['职位'])\n",
    "        return row\n",
    "\n",
    "    def main_(self) -> DataFrame:\n",
    "        \"\"\"\n",
    "        执行函数\n",
    "        :return: 文本清洗后的 df \n",
    "        \"\"\"\n",
    "        self.df.apply(func=self.data_clear, axis=1)\n",
    "        return self.df\n",
    "\n",
    "\n",
    "class ZhDataClear(EnDataClear):\n",
    "    \"\"\"\n",
    "    一个英文数据清洗的 工具类\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, data_frame):\n",
    "        \"\"\"\n",
    "        初始化 工具类\n",
    "        :param data_frame: 传入Dataframe对象\n",
    "        \"\"\"\n",
    "        super().__init__(data_frame)\n",
    "\n",
    "    @staticmethod\n",
    "    def public_special_char(cleaned_text):\n",
    "        # 使用正则表达式删除以 \"@\" 开头的邮箱地址\n",
    "        cleaned_text = re.sub(r'@\\w+\\.\\w+', ' ', cleaned_text)\n",
    "        # 去除URLs\n",
    "        cleaned_text = re.sub(r'https?://\\S+|www\\.\\S+', ' ', cleaned_text)\n",
    "        # 去除HTML标记\n",
    "        pattern = re.compile('<.*?>')\n",
    "        cleaned_text = re.sub(pattern, ' ', cleaned_text)\n",
    "        # 去掉序号\n",
    "        cleaned_text = re.sub(r'（\\d+）', '', cleaned_text)\n",
    "        cleaned_text = re.sub(r'\\d\\.|\\d、|\\d\\s|[a-z]\\.|[A-Z]\\.|\\d,', '', cleaned_text)\n",
    "        cleaned_text = re.sub(r'一、|二、|三、|四、|五、|六、|七、|八、|九、|十、', '', cleaned_text)\n",
    "        cleaned_text = re.sub(r'（一）|（二）|（三）|（四）|（五）|（六）|（七）|（八）|（九）|（十）', '', cleaned_text)\n",
    "        cleaned_text = re.sub(\n",
    "            r\"\\uf09e|\\uf09f|\\uf0b7|\\x9f|\\u200b|\\u2002|\\uf06c|\\xa0|\\u3000|\\uf0fc|\\uf0d8|\\ufeff|\\u2028|●|▪|/\",\n",
    "            \"\", cleaned_text)\n",
    "\n",
    "        return cleaned_text\n",
    "\n",
    "    @staticmethod\n",
    "    def zh_replace_special_char(cleaned_text):\n",
    "        \"\"\"\n",
    "        去除特殊字符\n",
    "        :param cleaned_text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        for i in re.findall(\"\\s+\\d\", cleaned_text):\n",
    "            if not \"\\n\" in i:\n",
    "                cleaned_text = cleaned_text.replace(i, \"\")\n",
    "        cleaned_text = re.sub(r'①|②|③|④|⑤|⑥|⑦|⑧|⑨|⑩', '', cleaned_text)\n",
    "        # 去掉一些特殊字符\n",
    "        cleaned_text = re.sub(\n",
    "            r\"🍁|🏁|💝|😁|#|\\🎖|★|😤|►|◆|🎸|💪|🤔|💎|↓|‒|💜|‧|『|🚁|–|🔸|%|」|\\$|’|「|@|🎁|☆|⭐|】|\\\\t|−|❀|·|»|🏠|”|🔹|→|≥|🌻|🧩|■|【⃣▪|😎|«|´|🏅|\\+|️\\👏|😊|🏀|🐣|』|🌺|❗|✨|👣|🎻|\\^|🈶|＂|≦|、|\\*|\\r|✅|◎|§|∣|‘|💡|◼|\\?|\\\\|🎉|�|🌈|🌸|…|♦|`|🥇|》|🌟|\\[|~|]|<|\\)|➕|}|=>|\\{|\\(|—|>|•|【|】|---|《|\\$|\\\\|👏|⃣|'|,|_\",\n",
    "            '', cleaned_text)\n",
    "\n",
    "        cleaned_text = re.sub(r'\\.\\s+-', '。', cleaned_text)\n",
    "        cleaned_text = re.sub(r'\\.', '。', cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\s+\", \" \", cleaned_text)\n",
    "        return cleaned_text\n",
    "\n",
    "    def replace_special_char(self, cleaned_text):\n",
    "        for i in re.findall(\"\\s+\\d\", cleaned_text):\n",
    "            if not \"\\n\" in i:\n",
    "                cleaned_text = cleaned_text.replace(i, \"\")\n",
    "        cleaned_text = re.sub( r'‧|\"|\\\\ufeff|◆|:|;|—|–|«|\\\\xa0|-|\\)|#|•|>|\\\\n|]|\\[|»|\\\\uf0b7|\\\\t|~|\\(|♦|】|\\*|➕|…|\\'|\\\\r|●|\\+|【|\\\\|$', \"\", cleaned_text)\n",
    "        return cleaned_text\n",
    "\n",
    "    def run(self, text):\n",
    "        \"\"\"\n",
    "        文本清洗\n",
    "        :param text: \n",
    "        \"\"\"\n",
    "        text = self.public_special_char(text)\n",
    "        if not len(re.findall('[A-Z]|[a-z]', text)) / len(text) > 0.8:  # 中文的\n",
    "            # 全角字符统一为半全角字符\n",
    "            text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "            # 去除特殊字符\n",
    "            text = self.zh_replace_special_char(text)\n",
    "        else:  # 英文的\n",
    "            # 中英文 标点符号统一  中文符号转换为英文\n",
    "            text = self.chinese_punctuation_to_english(text)\n",
    "            # 全角字符统一为半全角字符\n",
    "            text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "            # 替换缩写\n",
    "            text = self.Abbreviation_replacement(text)\n",
    "            # 去除特殊字符\n",
    "            text = self.replace_special_char(text)\n",
    "        return text"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-10-18T11:42:47.799753Z",
     "start_time": "2023-10-18T11:42:47.766753Z"
    }
   },
   "id": "809454cc919e9fe0"
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "class EnDataPreprocessingAndBuildCorpus:\n",
    "    title_texts = list()\n",
    "    title_corpus = list()\n",
    "    desc_text = list()\n",
    "    desc_corpus = list()\n",
    "    nlp = spacy.load(\"en_core_web_sm\")\n",
    "\n",
    "    def __init__(self, data_frame: DataFrame):\n",
    "        self.df = data_frame\n",
    "        self.df.apply(func=self.run, axis=1)\n",
    "        self.title_texts, self.title_corpus = self.buildCorpus(self.df[\"职位\"])\n",
    "        self.desc_text, self.desc_corpus = self.buildCorpus(self.df[\"任职要求\"])\n",
    "        \n",
    "    def preprocessing(self, text):\n",
    "        # 命名实体识别\n",
    "        doc = self.nlp(text)\n",
    "        texts = list()\n",
    "        for word in doc.ents:\n",
    "            if word.text not in my_stop_words and len(word.text) > 2 and not word.text.isdigit():\n",
    "                texts.append(word.text)\n",
    "        return texts\n",
    "\n",
    "    @staticmethod\n",
    "    def buildCorpus(texts: Series):\n",
    "        # 创建二元语法模型\n",
    "        big_gram = gensim.models.phrases.Phrases(texts)\n",
    "        # 应用二元语法模型\n",
    "        texts = [big_gram[line] for line in texts]\n",
    "        # 词典将所有文本中的单词和短语映射到唯一的数字标识符\n",
    "        dictionary = Dictionary(texts)\n",
    "        # 创建文档-词袋\n",
    "        corpus = [dictionary.doc2bow(text) for text in texts]\n",
    "        return texts, corpus\n",
    "\n",
    "    def run(self, row):\n",
    "        row['任职要求'] = self.preprocessing(row['任职要求'])\n",
    "        row['职位'] = self.preprocessing(row['职位'])\n",
    "        return row\n",
    "\n",
    "    @staticmethod\n",
    "    def save_dfs_to_excel(data:dict):    \n",
    "        path = \"../Data/语料库创建result\"\n",
    "        if not os.path.exists(path):\n",
    "            os.mkdir(path)\n",
    "        corpu = EnDataPreprocessingAndBuildCorpus(data['df'])\n",
    "        desc = Series(corpu.desc_text, name=\"任职要求\")\n",
    "        title = Series(corpu.title_texts, name=\"职位\")\n",
    "        df_gram = pd.concat([title, desc], axis=1)\n",
    "\n",
    "        desc = Series(corpu.desc_corpus, name=\"任职要求\")\n",
    "        title = Series(corpu.title_corpus, name=\"职位\")\n",
    "        df_corpus = pd.concat([title, desc], axis=1)\n",
    "        \n",
    "         # 创建一个 ExcelWriter 对象\n",
    "        with pd.ExcelWriter(f\"{path}/{data.get('Origin_name')}.xlsx\", engine='xlsxwriter') as writer:\n",
    "            # 将每个 DataFrame 写入不同的工作表\n",
    "            corpu.df.to_excel(writer, sheet_name='命名实体', index=False)\n",
    "            df_gram.to_excel(writer, sheet_name='二元语法', index=False)\n",
    "            df_corpus.to_excel(writer, sheet_name='语料库', index=False)\n",
    "        print(f\"\\t结果已保存到文件 {path}/{data['Origin_name']}.xlsx\")\n",
    "            \n",
    "class ZhDataPreprocessingAndBuildCorpus(EnDataPreprocessingAndBuildCorpus):\n",
    "    nlp = Taskflow(\"ner\")\n",
    "\n",
    "    def __init__(self, data_frame: DataFrame):\n",
    "        super().__init__(data_frame)\n",
    "\n",
    "    def preprocessing(self, text):\n",
    "        doc = self.nlp(text)\n",
    "        texts = list()\n",
    "        for word,desc in doc:\n",
    "            word = word.strip()\n",
    "            if word not in my_stop_words and len(word) > 2 and not word.isdigit() and word not in stop_word and word:\n",
    "                texts.append(word)\n",
    "        return texts"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-10-18T11:42:52.189559200Z",
     "start_time": "2023-10-18T11:42:47.793753Z"
    }
   },
   "id": "5653e4d81efb9284"
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前网站: CareerBuilder\n",
      "\t结果已保存到文件 ../Data/语料库创建result/CareerBuilder.xlsx\n",
      "当前网站: Simplyhired\n",
      "\t结果已保存到文件 ../Data/语料库创建result/Simplyhired.xlsx\n",
      "当前网站: linkedin\n",
      "\t结果已保存到文件 ../Data/语料库创建result/linkedin.xlsx\n",
      "当前网站: CIA\n",
      "\t结果已保存到文件 ../Data/语料库创建result/CIA.xlsx\n",
      "当前网站: DNI\n",
      "\t结果已保存到文件 ../Data/语料库创建result/DNI.xlsx\n",
      "当前网站: 智联招聘\n",
      "\t结果已保存到文件 ../Data/语料库创建result/智联招聘.xlsx\n",
      "当前网站: boss直聘\n",
      "\t结果已保存到文件 ../Data/语料库创建result/boss直聘.xlsx\n"
     ]
    }
   ],
   "source": [
    "my_stop_words = [\"岗位\", \"职责\", \"描述\", \"任职要求\", \"岗位职责\", \"岗位要求\", \"职位福利\", \"福利待遇\", \"工作氛围\", \"薪酬福利\", \"任职资格\"] + ['Recommended', 'Skills', 'Role', 'Category', 'DescriptionRequisition','Job', 'position', 'overview', 'responsibility', \"one\", \"Today\"]\n",
    "\n",
    "stop_word = open(\"../Data/stop_words.txt\", encoding='utf-8').read().split(\"\\n\")\n",
    "\n",
    "workbook = openpyxl.load_workbook('../Data/数据汇总.xlsx')\n",
    "\n",
    "for sheet_name in workbook.sheetnames:\n",
    "    item = dict()\n",
    "    print(f\"当前网站: {sheet_name}\")\n",
    "    df = pd.read_excel('../Data/数据汇总.xlsx', sheet_name=sheet_name, index_col=0)\n",
    "    if re.search('[\\u4e00-\\u9fa5]', sheet_name):  # 中文网站\n",
    "        df = ZhDataClear(data_frame=df).main_()\n",
    "        item[\"Origin_name\"] = sheet_name\n",
    "        item[\"df\"] = df\n",
    "        ZhDataPreprocessingAndBuildCorpus.save_dfs_to_excel(item)\n",
    "    else:  # 英文网站\n",
    "        df = EnDataClear(data_frame=df).main_()\n",
    "        item[\"Origin_name\"] = sheet_name\n",
    "        item[\"df\"] = df\n",
    "        EnDataPreprocessingAndBuildCorpus.save_dfs_to_excel(item)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-10-18T11:46:53.781595900Z",
     "start_time": "2023-10-18T11:42:52.192558900Z"
    }
   },
   "id": "298b385254e40949"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
