{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import spacy\n",
    "import enchant\n",
    "import openpyxl\n",
    "import pandas as pd\n",
    "from pandas import DataFrame\n",
    "\n",
    "checker = enchant.Dict(\"en_US\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-11-07T19:33:43.506869900Z",
     "start_time": "2023-11-07T19:33:43.482155Z"
    }
   },
   "id": "de04f85b0799dae7"
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2023-11-07T19:33:45.076449200Z",
     "start_time": "2023-11-07T19:33:43.487342300Z"
    }
   },
   "outputs": [],
   "source": [
    "class EnDataClear:\n",
    "    \"\"\"\n",
    "    一个英文数据清洗的 工具类\n",
    "    \"\"\"\n",
    "    nlp = spacy.load(\"en_core_web_md\")\n",
    "\n",
    "    def __init__(self, data_frame):\n",
    "        \"\"\"\n",
    "        初始化 工具类\n",
    "        :param data_frame: 传入Dataframe对象\n",
    "        \"\"\"\n",
    "        # 删除重复行\n",
    "        data_frame = data_frame.drop_duplicates(keep='last')\n",
    "        # 去掉缺失值\n",
    "        data_frame = data_frame.dropna(subset=['职位', '任职要求'])\n",
    "        self.df = data_frame\n",
    "\n",
    "    @staticmethod\n",
    "    def convert_fullwidth_to_halfWidth(text):\n",
    "        '''\n",
    "        全角字符统一为半全角字符\n",
    "        '''\n",
    "        halfwidth_chars = \"1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"\n",
    "\n",
    "        # 对应的全角字符Unicode范围\n",
    "        fullwidth_chars = \"１２３４５６７８９０ａｂｃｄｅｆｇｈｉｊｋｌｍｎｏｐｑｒｓｔｕｖｗｘｙｚＡＢＣＤＥＦＧＨＩＪＫＬＭＮＯＰＱＲＳＴＵＶＷＸＹＺ！“＃＄％＆'（）＊＋，－．／：；＜＝＞？＠［＼］＾＿｀｛｜｝～\"\n",
    "\n",
    "        char_mapping = dict()\n",
    "        for i in range(len(halfwidth_chars)):\n",
    "            char_mapping[ord(fullwidth_chars[i])] = halfwidth_chars[i]\n",
    "        return text.translate(char_mapping)\n",
    "\n",
    "    @staticmethod\n",
    "    def chinese_punctuation_to_english(text):\n",
    "        '''\n",
    "        中文标点符号统一为英文\n",
    "        '''\n",
    "        punctuation_mapping = {'，': ',', '。': '.', '？': '?', '！': '!', '；': ';', '：': ':', '“': '\"', '”': '\"', '‘': \"'\",\n",
    "                               '’': \"'\", '&#8203;``oaicite:{\"number\":1,\"invalid_reason\":\"Malformed citation 【': '[',\n",
    "                               '】\"}``&#8203;': ']', '（': '(', '）': ')', '《': '<', '》': '>', '、': ',', '……': '...',\n",
    "                               '·': '.',\n",
    "                               '——': '-'}\n",
    "        # 使用正则表达式替换中文标点符号\n",
    "        for ch, en in punctuation_mapping.items():\n",
    "            text = re.sub(re.escape(ch), en, text)\n",
    "        return text\n",
    "    \n",
    "    @staticmethod\n",
    "    def Abbreviation_replacement(text):\n",
    "        # 缩写和它们的扩展 字典\n",
    "        abbreviations = {\"can't\": 'cannot', \"it's\": 'it is', \"I'm\": 'I am', 'gonna': 'going to', 'wanna': 'want to',\n",
    "                         \"shouldn't\": 'should not', \"didn't\": 'did not', \"couldn't\": 'could not', \"doesn't\": 'does not',\n",
    "                         \"won't\": 'will not', \"i'll\": 'I will', \"you'll\": 'you will', \"he'll\": 'he will',\n",
    "                         \"she'll\": 'she will', \"we'll\": 'we will', \"they'll\": 'they will', \"I've\": 'I have',\n",
    "                         \"you've\": 'you have', \"we've\": 'we have', \"they've\": 'they have', \"I'd\": 'I would',\n",
    "                         \"you'd\": 'you would', \"he'd\": 'he would', \"she'd\": 'she would', \"we'd\": 'we would',\n",
    "                         \"they'd\": 'they would', \"haven't\": 'have not', \"hasn't\": 'has not', \"wouldn't\": 'would not',\n",
    "                         \"should've\": 'should have', \"could've\": 'could have', \"might've\": 'might have',\n",
    "                         \"must've\": 'must have', \"Can't\": 'Cannot', \"It's\": 'It is', 'Gonna': 'Going to',\n",
    "                         'Wanna': 'Want to', \"Shouldn't\": 'Should not', \"Didn't\": 'Did not', \"Couldn't\": 'Could not',\n",
    "                         \"Doesn't\": 'Does not', \"Won't\": 'Will not', \"I'll\": 'I will', \"You'll\": 'You will',\n",
    "                         \"He'll\": 'He will', \"She'll\": 'She will', \"We'll\": 'We will', \"They'll\": 'They will',\n",
    "                         \"You've\": 'You have', \"We've\": 'We have', \"They've\": 'They have', \"You'd\": 'You would',\n",
    "                         \"He'd\": 'He would', \"She'd\": 'She would', \"We'd\": 'We would', \"They'd\": 'They would',\n",
    "                         \"Haven't\": 'Have not', \"Hasn't\": 'Has not', \"Wouldn't\": 'Would not',\n",
    "                         \"Should've\": 'Should have',\n",
    "                         \"Could've\": 'Could have', \"Might've\": 'Might have', \"Must've\": 'Must have'}\n",
    "\n",
    "        # 替换缩写\n",
    "        for abbreviation, expansion in abbreviations.items():\n",
    "            # 使用正则表达式确保只替换完整的单词，而不是部分匹配\n",
    "            text = re.sub(r'\\b' + re.escape(abbreviation) + r'\\b', expansion, text)\n",
    "        text = re.sub(\"(the[\\s]+|The[\\s]+)?U\\.?S\\.?A\\.?\", \" America \", text, flags=re.IGNORECASE)\n",
    "        text = re.sub(\"(the[\\s]+|The[\\s]+)?United State(s)?\", \" America \", text, flags=re.IGNORECASE)\n",
    "        return text\n",
    "    \n",
    "    @staticmethod\n",
    "    def check_spelling(text):\n",
    "        # 将文本分成单词\n",
    "        words = text.split()\n",
    "        # 用于存储纠正后的文本\n",
    "        corrected_text = []\n",
    "        for word in words:\n",
    "            if not re.findall(\"\\W\", word) and re.findall(\"\\w+[A-Z]\\w+\",word) and len(word) >= 5:\n",
    "                if checker.check(word):\n",
    "                    # 如果拼写正确，保留原词\n",
    "                    corrected_text.append(word)\n",
    "                else:\n",
    "                    # 如果拼写错误，尝试获取建议的正确拼写\n",
    "                    suggestions = checker.suggest(word)\n",
    "                    if suggestions:\n",
    "                        corrected_text.append(suggestions[0])  # 使用第一个建议的拼写\n",
    "                    else:\n",
    "                        corrected_text.append(word)  # 如果没有建议，保留原词\n",
    "            else:\n",
    "                corrected_text.append(word)\n",
    "        # 将纠正后的单词组合成文本\n",
    "        corrected_text = ' '.join(corrected_text)\n",
    "        return corrected_text\n",
    "\n",
    "\n",
    "    def replace_special_char(self,cleaned_text):\n",
    "        \"\"\"\n",
    "        去除特殊字符\n",
    "        :param cleaned_text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        # 去掉 `*、(s)、's`\n",
    "        cleaned_text = re.sub(\"[*]|\\(s\\)|\\'s\", \" \", cleaned_text)\n",
    "    \n",
    "        # /\n",
    "        def rule1(patter):\n",
    "            matched_string = patter.group(0)\n",
    "            return matched_string.replace(\"/\", \" a \")\n",
    "    \n",
    "        cleaned_text = re.sub(\"day(s)?/week|\\d+(\\s)?/year(s)?\", rule1, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\s+/\\s+\", \"/\", cleaned_text)\n",
    "    \n",
    "        def rule2(patter):\n",
    "            matched_string = patter.group(0)\n",
    "            return matched_string.replace(\"/\", \" or \")\n",
    "    \n",
    "        cleaned_text = re.sub(\"[A-Za-z]+/[A-Za-z]+\", rule2, cleaned_text)\n",
    "        cleaned_text = re.sub(\"[A-Za-z]+/\\d+\", rule2, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\d+/[A-Za-z]+\", rule2, cleaned_text)\n",
    "    \n",
    "        # 美元\n",
    "        def rule3(patter):\n",
    "            match_string = patter.group(0)\n",
    "    \n",
    "            match_string = match_string.replace(\"between\", \"\").replace(\"and\", \" to \")\n",
    "            return match_string.replace(\",\", \"\").replace(\" \", \"\").replace(\"$\", \"dollar\").replace(\"-\", \" to \")\n",
    "    \n",
    "        cleaned_text = re.sub(\"\\$\\d+(,)?\\d+(\\s)?-(\\s)?\\$\\d+(,)?\\d+\", rule3, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\$\\d+(,)?\\d+(\\s)?to(\\s)?\\$\\d+(,)?\\d+\", rule3, cleaned_text)\n",
    "        cleaned_text = re.sub(\"between\\s+\\$\\d+(,)?\\d+(\\s+)?and(\\s+)?\\$\\d+(,)?\\d+\", rule3, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\$\", \"dollar\", cleaned_text)\n",
    "    \n",
    "        # 其它特殊\n",
    "        cleaned_text = re.sub(\"401\\(k\\)\", \"401k\", cleaned_text)\n",
    "        cleaned_text = re.sub(\"#.*?\\s|@.*?\\s|\\[\\s+[Ll]ink\\s+removed\\s+]|\\(\\d+\\)|\\s+-\\s+\", \" \", cleaned_text)\n",
    "        # 去掉电话号\n",
    "        cleaned_text = re.sub(r'\\d+-\\d+-\\d+', '', cleaned_text)\n",
    "        cleaned_text = re.sub(r'\\d+-\\d+', '', cleaned_text)\n",
    "\n",
    "        # 去除URLs\n",
    "        cleaned_text = re.sub(r'https?://\\S+|www\\.\\S+', '', cleaned_text)\n",
    "\n",
    "        # 去除HTML标记\n",
    "        pattern = re.compile('<.*?>')\n",
    "        cleaned_text = re.sub(pattern, '', cleaned_text)\n",
    "        \n",
    "        cleaned_text = re.sub('(?<=[0-9])\\,(?=[0-9])', \"\", cleaned_text)\n",
    "        \n",
    "        def SplitNumericCharacters(pattern):\n",
    "            matched_string = pattern.group(0)\n",
    "            for index, i in enumerate(matched_string):\n",
    "                if not i.isdigit():\n",
    "                    break\n",
    "            return matched_string[:index]+\" \"+matched_string[index:]\n",
    "\n",
    "        cleaned_text = re.sub('[0-9]+[A-Z]', SplitNumericCharacters, cleaned_text)\n",
    "        \n",
    "        def solve_float(pattern):\n",
    "            matched_string = pattern.group(0)\n",
    "            return str(int(eval(matched_string)))\n",
    "        \n",
    "        cleaned_text = re.sub('[0-9]+\\.[0-9]+', solve_float, cleaned_text)\n",
    "        \n",
    "        cleaned_text = re.sub(\"e\\.g\\.\", \" eg \", cleaned_text, flags=re.IGNORECASE)\n",
    "        cleaned_text = re.sub(\"b\\.g\\.\", \" bg \", cleaned_text, flags=re.IGNORECASE)\n",
    "        cleaned_text = re.sub(\"D\\.C\\.\", \"DC \", cleaned_text, flags=re.IGNORECASE)\n",
    "        cleaned_text = re.sub(\"i\\.e\\.\", \" ie \", cleaned_text, flags=re.IGNORECASE)\n",
    "        cleaned_text = re.sub(\"[Ii]nc\\.\", \" Inc \", cleaned_text, flags=re.IGNORECASE)\n",
    "        cleaned_text = re.sub('\\%', \" percent \", cleaned_text)\n",
    "        cleaned_text = re.sub('\\&', \" and \", cleaned_text)\n",
    "        cleaned_text = re.sub('\\|', \" and \", cleaned_text)\n",
    "        cleaned_text = re.sub(r\" (the[\\s]+|The[\\s]+)?U\\.S\\.(A)? \", \" America \", cleaned_text)\n",
    "        cleaned_text = re.sub(r\"U\\.S\\.(A)?\", \" America \", cleaned_text)\n",
    "        cleaned_text = cleaned_text.replace(\"\\\\n\",\" \")\n",
    "        cleaned_text = cleaned_text.replace(\"\\\\\",\" \")\n",
    "        def pad_str(s):\n",
    "            return ' '+s+' '\n",
    "        \n",
    "        def pad_pattern(pattern):\n",
    "            matched_string = pattern.group(0)\n",
    "            return pad_str(matched_string)\n",
    "        cleaned_text = re.sub('[\\!\\?\\^\\+\\*\\/\\~\\|\\`\\=\\:\\;\\.\\\\\\]', pad_pattern, cleaned_text)\n",
    "        \n",
    "        def quoted_string_parser(pattern):\n",
    "            string = pattern.group(0)\n",
    "            parsed = self.nlp(string[1:-1])\n",
    "            is_meaningful = False\n",
    "            for token in parsed:\n",
    "                # if one of the token is meaningful, we'll consider the full string is meaningful\n",
    "                if len(token.text) > 2 and not token.text.isdigit() and token.has_vector:\n",
    "                    is_meaningful = True\n",
    "            if is_meaningful:\n",
    "                return string\n",
    "            else:\n",
    "                return ''\n",
    "    \n",
    "        cleaned_text = re.sub('\\\".*?\\\"', quoted_string_parser, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\'.*?\\'\", quoted_string_parser, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\(.*?\\)\", quoted_string_parser, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\[.*?\\]\", quoted_string_parser, cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\{.*?\\}\", quoted_string_parser, cleaned_text)\n",
    "        cleaned_text = re.sub(' s ', \" \", cleaned_text)\n",
    "\n",
    "        cleaned_text = ''.join([char for char in cleaned_text if ord(char) < 128])\n",
    "        # 去掉换行 和多余的空格\n",
    "        cleaned_text = \" \".join([word.strip() for word in cleaned_text.split()])\n",
    "        return cleaned_text\n",
    "\n",
    "    def fenJu(self, text):\n",
    "        doc = self.nlp(text)\n",
    "        # 遍历句子\n",
    "        texts = list()\n",
    "        for sent in doc.sents:\n",
    "            sent = sent.text.strip()\n",
    "            sent = re.sub(\":\\n\\n+?\",\" : \",sent,re.S)\n",
    "            for i in re.findall(\".\\n+.\",sent, re.S):\n",
    "                if re.findall(\"(\\W)\\n+(\\W)\",i):\n",
    "                    str1 = \"\".join(re.findall(\"(\\W)\\\\n+(\\W)\",i)[0]).strip()\n",
    "                    if str1 and str1 in [',',';',\" \",\":\"]:\n",
    "                        sent = sent.replace(i, i.replace(\"\\n\",\" \"))\n",
    "                    else:\n",
    "                        sent = sent.replace(i, f\"{i[0]} , {i[-1]}\")  \n",
    "                if re.findall(\"\\w(\\n+?)\\w\", i):\n",
    "                    if len(re.findall(\"\\w(\\n+?)\\w\", i)[0]) >= 2:\n",
    "                        sent = sent.replace(i,f\"{i[0]} , {i[-1]}\")\n",
    "                    else:\n",
    "                        sent = sent.replace(i,f\"{i[0]} . {i[-1]}\")\n",
    "            if not len(sent) > 3:\n",
    "                continue\n",
    "            if sent[-1] != \".\":\n",
    "                sent = sent + \" .\"\n",
    "            texts.append(sent)\n",
    "        return \" \".join(texts)\n",
    "        \n",
    "    def run(self, text, type_):\n",
    "        \"\"\"\n",
    "        文本清洗\n",
    "        :param text: \n",
    "        \"\"\"\n",
    "        if type_ == 1:\n",
    "            text = self.fenJu(text)\n",
    "        # 中英文 标点符号统一  中文符号转换为英文\n",
    "        text = self.chinese_punctuation_to_english(text)\n",
    "        # 全角字符统一为半全角字符\n",
    "        text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "        # 替换缩写\n",
    "        text = self.Abbreviation_replacement(text)\n",
    "        # 去除特殊字符\n",
    "        text = self.replace_special_char(text)\n",
    "        # 拼写检查 主要检查连写错误\n",
    "        text = self.check_spelling(text)\n",
    "        return text\n",
    "\n",
    "    def data_clear(self, row):\n",
    "        \"\"\"\n",
    "        清洗文本的工具函数\n",
    "        :param row: self.df 的每一行 \n",
    "        :return: 返回处理后的 self.df每一行 \n",
    "        \"\"\"\n",
    "        if row.get(\"Qualifications\"):\n",
    "            row['任职要求'] = self.run(row['任职要求']+\" . \"+row[\"Qualifications\"], 1)\n",
    "        else:\n",
    "            row['任职要求'] = self.run(row['任职要求'], 1)\n",
    "        row['职位'] = self.run(row['职位'], 0)\n",
    "        return row\n",
    "\n",
    "    def main_(self) -> DataFrame:\n",
    "        \"\"\"\n",
    "        执行函数\n",
    "        :return: 文本清洗后的 df \n",
    "        \"\"\"\n",
    "        self.df.apply(func=self.data_clear, axis=1)\n",
    "        return self.df\n",
    "\n",
    "\n",
    "class ZhDataClear(EnDataClear):\n",
    "    \"\"\"\n",
    "    一个中文数据清洗的 工具类\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, data_frame):\n",
    "        \"\"\"\n",
    "        初始化 工具类\n",
    "        :param data_frame: 传入Dataframe对象\n",
    "        \"\"\"\n",
    "        super().__init__(data_frame)\n",
    "\n",
    "    @staticmethod\n",
    "    def public_special_char(cleaned_text):\n",
    "        # 使用正则表达式删除以 \"@\" 开头的邮箱地址\n",
    "        cleaned_text = re.sub(r'@\\w+\\.\\w+', ' ', cleaned_text)\n",
    "        # 去除URLs\n",
    "        cleaned_text = re.sub(r'https?://\\S+|www\\.\\S+', ' ', cleaned_text)\n",
    "        # 去除HTML标记\n",
    "        pattern = re.compile('<.*?>')\n",
    "        cleaned_text = re.sub(pattern, ' ', cleaned_text)\n",
    "        # 去掉序号\n",
    "        cleaned_text = re.sub(r'（\\d+）', ' ', cleaned_text)\n",
    "        cleaned_text = re.sub(r'\\d\\.|\\d、|\\d\\s|[a-z]\\.|[A-Z]\\.|\\d,|\\\\n|\\\\r|\\\\t', ' ', cleaned_text)\n",
    "        cleaned_text = re.sub(r'一、|二、|三、|四、|五、|六、|七、|八、|九、|十、', ' ', cleaned_text)\n",
    "        cleaned_text = re.sub(r'（一）|（二）|（三）|（四）|（五）|（六）|（七）|（八）|（九）|（十）', ' ', cleaned_text)\n",
    "        for i in re.findall(\"\\s+\\d\", cleaned_text):\n",
    "            if not \"\\n\" in i:\n",
    "                cleaned_text = cleaned_text.replace(i, \" \")\n",
    "        cleaned_text = re.sub(\n",
    "            r\"\\\\uf09e|\\\\uf09f|\\\\uf0b7|\\\\x9f|\\\\u200b|\\\\u2002|\\\\uf06c|\\\\xa0|\\\\u3000|\\\\uf0fc|\\\\uf0d8|\\\\ufeff|\\\\u2028|●|▪|/\",\n",
    "            \" \", cleaned_text)\n",
    "        cleaned_text = re.sub(r'①|②|③|④|⑤|⑥|⑦|⑧|⑨|⑩', ' ', cleaned_text)\n",
    "\n",
    "        return cleaned_text\n",
    "\n",
    "    @staticmethod\n",
    "    def zh_replace_special_char(cleaned_text):\n",
    "        \"\"\"\n",
    "        去除特殊字符\n",
    "        :param cleaned_text: \n",
    "        :return: \n",
    "        \"\"\"\n",
    "        # 去掉一些特殊字符\n",
    "        cleaned_text = re.sub(\n",
    "            r\"🍁|🏁|💝|😁|#|\\🎖|★|😤|►|◆|🎸|💪|🤔|💎|↓|💜|‧|『|🚁|🔸|%|」|\\$|’|「|@|🎁|☆|⭐|】|\\\\t|❀|·|»|🏠|”|🔹|→|≥|🌻|🧩|■|【⃣▪|😎|«|´|🏅|\\+|️\\👏|😊|🏀|🐣|』|🌺|❗|✨|👣|🎻|\\^|🈶|＂|≦|\\*|\\\\r|✅|◎|§|∣|‘|💡|◼||🎉|�|🌈|🌸|…|♦|`|🥇|》|🌟|\\[|]|<|\\)|➕|}|=>|\\{|\\(|>|•|【|】|---|《|\\$|\\\\|👏|⃣|'|_\",\n",
    "            '', cleaned_text)\n",
    "\n",
    "        cleaned_text = re.sub(r'\\.\\s+-', '。', cleaned_text)\n",
    "        cleaned_text = re.sub(r'\\.', '。', cleaned_text)\n",
    "        cleaned_text = re.sub(\"\\s+\", \" \", cleaned_text)\n",
    "        return cleaned_text\n",
    "\n",
    "    # def replace_special_char(self, cleaned_text):\n",
    "    #     # # 强制替换一些错误格式\n",
    "    #     # errors = re.findall(\"\\d\\.\",cleaned_text)\n",
    "    #     # for error in errors:\n",
    "    #     #     cleaned_text = cleaned_text.replace(error,\" \".join(list(error)))\n",
    "    # \n",
    "    #     cleaned_text = re.sub(\n",
    "    #         '‧|\"|%|\\ufeff|◆|:|;|—|–|«|-|\\)|#|•|>|\\\\n|]|\\[|»|\\\\uf0b7|\\\\t|~|\\(|♦|】|\\*|➕|…|\\'|\\\\r|●|\\+|【|\\\\|$|\\|',\n",
    "    #         \" \", cleaned_text)\n",
    "    #     cleaned_text = re.sub(\"\\s+\", \" \", cleaned_text)\n",
    "    #     return cleaned_text\n",
    "\n",
    "    def run(self, text, type_):\n",
    "        \"\"\"\n",
    "        文本清洗\n",
    "        :param type_: \n",
    "        :param text: \n",
    "        \"\"\"\n",
    "        text = self.public_special_char(text)\n",
    "        if not len(re.findall('[A-Z]|[a-z]', text)) / len(text) > 0.8:  # 中文的\n",
    "            # 全角字符统一为半全角字符\n",
    "            text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "            # 去除特殊字符\n",
    "            text = self.zh_replace_special_char(text)\n",
    "        else:  # 英文的\n",
    "            if type_ == 1:\n",
    "                text = self.fenJu(text)\n",
    "            # 中英文 标点符号统一  中文符号转换为英文\n",
    "            text = self.chinese_punctuation_to_english(text)\n",
    "            # 全角字符统一为半全角字符\n",
    "            text = self.convert_fullwidth_to_halfWidth(text.strip())\n",
    "            # 替换缩写\n",
    "            text = self.Abbreviation_replacement(text)\n",
    "            # 去除特殊字符\n",
    "            text = self.replace_special_char(text)\n",
    "            # 拼写检查\n",
    "            text = self.check_spelling(text)\n",
    "        return text\n",
    "\n",
    "    def data_clear(self, row):\n",
    "        \"\"\"\n",
    "        清洗文本的工具函数\n",
    "        :param row: self.df 的每一行 \n",
    "        :return: 返回处理后的 self.df每一行 \n",
    "        \"\"\"\n",
    "        row['任职要求'] = self.run(row['任职要求'],1)\n",
    "        row['职位'] = self.run(row['职位'],0)\n",
    "        return row\n",
    "\n",
    "    def main_(self) -> DataFrame:\n",
    "        \"\"\"\n",
    "        执行函数\n",
    "        :return: 文本清洗后的 df \n",
    "        \"\"\"\n",
    "        self.df.apply(func=self.data_clear, axis=1)\n",
    "        return self.df\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前网站: CareerBuilder\n",
      "当前网站: Simplyhired\n",
      "当前网站: linkedin\n",
      "当前网站: CIA\n",
      "当前网站: DNI\n",
      "当前网站: 智联招聘\n",
      "当前网站: boss直聘\n"
     ]
    }
   ],
   "source": [
    "workbook = openpyxl.load_workbook('../Data/数据汇总.xlsx')\n",
    "path_ = \"../Data/数据清洗/\"\n",
    "if not os.path.exists(path_):\n",
    "    os.mkdir(path_)\n",
    "for sheet_name in workbook.sheetnames:\n",
    "    save_path = path_ + f\"{sheet_name}.csv\"\n",
    "    item = dict()\n",
    "    print(f\"当前网站: {sheet_name}\")\n",
    "    df = pd.read_excel('../Data/数据汇总.xlsx', sheet_name=sheet_name, index_col=0)\n",
    "    if re.search('[\\u4e00-\\u9fa5]', sheet_name):  # 中文网站\n",
    "        df = ZhDataClear(data_frame=df).main_()\n",
    "    else:  # 英文网站\n",
    "        df = EnDataClear(data_frame=df).main_()\n",
    "    df.to_csv(save_path)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-11-07T19:46:51.251733300Z",
     "start_time": "2023-11-07T19:33:45.066070500Z"
    }
   },
   "id": "28c9311ea8ed14aa"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
