{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# default_exp predefined_problems.ner_data\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Pre-defined Problems\n",
    "\n",
    "Preprocessing functions of pre-defined problems. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# export\n",
    "\n",
    "from glob import glob\n",
    "import re\n",
    "import random\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "from m3tl.utils import cluster_alphnum\n",
    "\n",
    "from m3tl.preproc_decorator import preprocessing_fn\n",
    "\n",
    "NER_TYPE = ['LOC',  # location\n",
    "            'GPE',\n",
    "            'PER',  # person\n",
    "            'ORG',  # organization\n",
    "            'PRD',  # Product\n",
    "            ]\n",
    "\n",
    "\n",
    "def gold_horse_ent_type_process_fn(d):\n",
    "    \"\"\"golden horse ent type process fn\n",
    "    Source: https://github.com/hltcoe/golden-ho rse\n",
    "\n",
    "    Entity type:\n",
    "\n",
    "        B, I, O: Begining \\ In middle \\ Outside of entity\n",
    "        GPE: Country, City, District...\n",
    "        LOC: Location, zoo, school...\n",
    "        PER: Person\n",
    "        ORG: Organiazation\n",
    "        NAM: Entity\n",
    "        NOM: More general, 女生, 男的...\n",
    "\n",
    "        Example:\n",
    "            B-PER.NAM\n",
    "\n",
    "    Only keep NAM here\n",
    "    So after process:\n",
    "        B-PER\n",
    "\n",
    "    Arguments:\n",
    "        ent_type {str} -- ent type from gold_horse data\n",
    "\n",
    "    Returns:\n",
    "        str -- processed enttype\n",
    "    \"\"\"\n",
    "    ent_type = d.split('\\t')[1].replace('\\n', '')\n",
    "    # keep nam only\n",
    "    ent_type = ent_type if 'NAM' in ent_type else 'O'\n",
    "    ent_type = ent_type.replace('.NAM', '')\n",
    "    return ent_type\n",
    "\n",
    "\n",
    "def chinese_literature_ent_type_process_fn(d):\n",
    "    \"\"\"Not match my need\n",
    "\n",
    "    Arguments:\n",
    "        d {[type]} -- [description]\n",
    "\n",
    "    Returns:\n",
    "        [type] -- [description]\n",
    "    \"\"\"\n",
    "\n",
    "    ent_type = d.split(' ')[1].replace('\\n', '')\n",
    "    return ent_type\n",
    "\n",
    "\n",
    "def read_ner_data(file_pattern='data/ner/weiboNER*', proc_fn=None):\n",
    "    \"\"\"Read data from golden horse data\n",
    "\n",
    "\n",
    "    Arguments:\n",
    "        file_pattern {str} -- file patterns\n",
    "\n",
    "    Returns:\n",
    "        dict -- dict, key: 'train', 'eval', value: dict {'inputs', 'target'}\n",
    "    \"\"\"\n",
    "\n",
    "    result_dict = {\n",
    "        'train': {\n",
    "            'inputs': [],\n",
    "            'target': []\n",
    "        },\n",
    "        'eval': {\n",
    "            'inputs': [],\n",
    "            'target': []\n",
    "        }\n",
    "    }\n",
    "    file_list = glob(file_pattern)\n",
    "    for file_path in file_list:\n",
    "        with open(file_path, 'r', encoding='utf8') as f:\n",
    "            raw_data = f.readlines()\n",
    "\n",
    "        inputs_list = [[]]\n",
    "        target_list = [[]]\n",
    "        for d in raw_data:\n",
    "            if d != '\\n':\n",
    "                # put first char to input\n",
    "                inputs_list[-1].append(d[0])\n",
    "                ent_type = proc_fn(d)\n",
    "                target_list[-1].append(ent_type)\n",
    "            else:\n",
    "                inputs_list.append([])\n",
    "                target_list.append([])\n",
    "\n",
    "        # remove trailing empty str/list\n",
    "        if not inputs_list[-1]:\n",
    "            del inputs_list[-1]\n",
    "        if not target_list[-1]:\n",
    "            del target_list[-1]\n",
    "\n",
    "        inputs_with_ent = []\n",
    "        target_with_ent = []\n",
    "        for inputs, target in zip(inputs_list, target_list):\n",
    "            # if len(set(target)) > 1:\n",
    "            inputs_with_ent.append(inputs)\n",
    "            target_with_ent.append(target)\n",
    "\n",
    "        if 'train' in file_path or 'dev' in file_path:\n",
    "            result_dict['train']['inputs'] = inputs_with_ent\n",
    "            result_dict['train']['target'] = target_with_ent\n",
    "        else:\n",
    "            result_dict['eval']['inputs'] = inputs_with_ent\n",
    "            result_dict['eval']['target'] = target_with_ent\n",
    "    return result_dict\n",
    "\n",
    "\n",
    "def get_weibo_ner_fn(file_path):\n",
    "    @preprocessing_fn\n",
    "    def weibo_ner(params, mode):\n",
    "        data = read_ner_data(file_pattern=file_path,\n",
    "                             proc_fn=gold_horse_ent_type_process_fn)\n",
    "        if mode == 'train':\n",
    "            data = data['train']\n",
    "        else:\n",
    "            data = data['eval']\n",
    "        inputs_list = data['inputs']\n",
    "        target_list = data['target']\n",
    "\n",
    "        return inputs_list, target_list\n",
    "    return weibo_ner\n",
    "\n",
    "\n",
    "def gold_horse_segment_process_fn(d):\n",
    "    ent_type = d.split('\\t')[0][-1]\n",
    "    if ent_type not in ['0', '1', '2']:\n",
    "        ent_type = '0'\n",
    "    return ent_type\n",
    "\n",
    "\n",
    "def get_weibo_cws_fn(file_path):\n",
    "    @preprocessing_fn\n",
    "    def weibo_cws(params, mode):\n",
    "        data = read_ner_data(file_pattern=file_path,\n",
    "                             proc_fn=gold_horse_segment_process_fn)\n",
    "        if mode == 'train':\n",
    "            data = data['train']\n",
    "        else:\n",
    "            data = data['eval']\n",
    "        inputs_list = data['inputs']\n",
    "        target_list = data['target']\n",
    "\n",
    "        return inputs_list, target_list\n",
    "    return weibo_cws\n",
    "\n",
    "\n",
    "def read_bosonnlp_data(file_pattern, eval_size=0.2):\n",
    "    file_list = glob(file_pattern)\n",
    "    sentence_split = r'[!?。？！]'\n",
    "\n",
    "    project_table = {\n",
    "        'person_name': 'PER',\n",
    "        'company_name': 'ORG',\n",
    "        'location': 'LOC',\n",
    "        'product_name': 'PRD',\n",
    "        'time': 'TME',\n",
    "        'org_name': 'ORG2'\n",
    "    }\n",
    "    input_list = []\n",
    "    target_list = []\n",
    "\n",
    "    if not file_list:\n",
    "        raise FileNotFoundError('Please make sure you have downloaded BosonNLP\\\n",
    "        data and put it in the path you specified. \\\n",
    "        Download: https://bosonnlp.com/resources/BosonNLP_NER_6C.zip')\n",
    "\n",
    "    for file_path in file_list:\n",
    "        with open(file_path, 'r', encoding='utf8') as f:\n",
    "            data_list = f.readlines()\n",
    "\n",
    "        for doc in data_list:\n",
    "            if '}}}}' in doc:\n",
    "                continue\n",
    "            splited_doc = re.split(sentence_split, doc)\n",
    "\n",
    "            for sentence in splited_doc:\n",
    "\n",
    "                # split doc into sentences\n",
    "\n",
    "                input_list.append([])\n",
    "                target_list.append([])\n",
    "\n",
    "                # split by {{\n",
    "                doc_chunk_list = sentence.split('{{')\n",
    "                for chunk in doc_chunk_list:\n",
    "                    if '}}' not in chunk or ':' not in chunk:\n",
    "                        target_list[-1] += ['O']*len(chunk)\n",
    "                        input_list[-1] += list(chunk)\n",
    "                    else:\n",
    "                        ent_chunk, text_chunk = chunk.split('}}')\n",
    "                        punc_ind = ent_chunk.index(':')\n",
    "                        ent_type = ent_chunk[:punc_ind]\n",
    "                        ent = ent_chunk[punc_ind+1:]\n",
    "                        if ent_type in project_table:\n",
    "                            ent = cluster_alphnum(ent)\n",
    "                            for char_ind, ent_char in enumerate(ent):\n",
    "                                if char_ind == 0:\n",
    "                                    loc_char = 'B'\n",
    "                                else:\n",
    "                                    loc_char = 'I'\n",
    "                                target_list[-1].append(loc_char +\n",
    "                                                       '-'+project_table[ent_type])\n",
    "                                input_list[-1].append(ent_char)\n",
    "                        else:\n",
    "                            target_list[-1] += ['O']*len(ent)\n",
    "                            input_list[-1] += list(ent)\n",
    "\n",
    "                        target_list[-1] += ['O']*len(text_chunk)\n",
    "                        input_list[-1] += list(text_chunk)\n",
    "\n",
    "    return_input, return_target = [], []\n",
    "    for inp, tar in zip(input_list, target_list):\n",
    "        if inp and tar:\n",
    "            return_input.append(inp)\n",
    "            return_target.append(tar)\n",
    "        assert len(inp) == len(tar)\n",
    "\n",
    "    train_input, eval_input, train_target, eval_target = train_test_split(\n",
    "        return_input, return_target, test_size=eval_size, random_state=1024)\n",
    "    result_dict = {\n",
    "        'train': {},\n",
    "        'eval': {}\n",
    "    }\n",
    "    result_dict['train']['inputs'] = train_input\n",
    "    result_dict['train']['target'] = train_target\n",
    "    result_dict['eval']['inputs'] = eval_input\n",
    "    result_dict['eval']['target'] = eval_target\n",
    "    return result_dict\n",
    "\n",
    "\n",
    "def read_msra(file_pattern, eval_size):\n",
    "    file_list = glob(file_pattern)\n",
    "\n",
    "    project_table = {\n",
    "        'nr': 'PER',\n",
    "        'nt': 'ORG',\n",
    "        'ns': 'LOC'\n",
    "    }\n",
    "\n",
    "    input_list = []\n",
    "    target_list = []\n",
    "\n",
    "    for file_path in file_list:\n",
    "        with open(file_path, 'r', encoding='utf8') as f:\n",
    "            data_list = f.readlines()\n",
    "\n",
    "        for sentence in data_list:\n",
    "            sentence = sentence.replace('\\n', '')\n",
    "            input_list.append([])\n",
    "            target_list.append([])\n",
    "            sentence_word_list = sentence.split(' ')\n",
    "            for word in sentence_word_list:\n",
    "                if word:\n",
    "                    ent, ent_type = word.split('/')\n",
    "                    ent = cluster_alphnum(ent)\n",
    "                    if ent_type not in project_table:\n",
    "                        input_list[-1] += list(ent)\n",
    "                        target_list[-1] += ['O'] * len(ent)\n",
    "                    else:\n",
    "                        for char_ind, ent_char in enumerate(ent):\n",
    "                            if char_ind == 0:\n",
    "                                loc_char = 'B'\n",
    "                            else:\n",
    "                                loc_char = 'I'\n",
    "\n",
    "                            target_list[-1].append(loc_char +\n",
    "                                                   '-'+project_table[ent_type])\n",
    "                            input_list[-1].append(ent_char)\n",
    "\n",
    "    return_input, return_target = [], []\n",
    "    for inp, tar in zip(input_list, target_list):\n",
    "        if inp and tar:\n",
    "            return_input.append(inp)\n",
    "            return_target.append(tar)\n",
    "        assert len(inp) == len(tar)\n",
    "\n",
    "    train_input, eval_input, train_target, eval_target = train_test_split(\n",
    "        return_input, return_target, test_size=eval_size, random_state=1024)\n",
    "    result_dict = {\n",
    "        'train': {},\n",
    "        'eval': {}\n",
    "    }\n",
    "    result_dict['train']['inputs'] = train_input\n",
    "    result_dict['train']['target'] = train_target\n",
    "    result_dict['eval']['inputs'] = eval_input\n",
    "    result_dict['eval']['target'] = eval_target\n",
    "    return result_dict\n",
    "\n",
    "\n",
    "def get_msra_ner_fn(file_path):\n",
    "    @preprocessing_fn\n",
    "    def msra_ner(params, mode):\n",
    "\n",
    "        msra_data = read_msra(\n",
    "            file_pattern=file_path, eval_size=0.2)\n",
    "\n",
    "        inputs_list = []\n",
    "        target_list = []\n",
    "        for data in [msra_data]:\n",
    "            if mode == 'train':\n",
    "                inputs_list += data['train']['inputs']\n",
    "                target_list += data['train']['target']\n",
    "\n",
    "            else:\n",
    "                inputs_list += data['eval']['inputs']\n",
    "                target_list += data['eval']['target']\n",
    "        return inputs_list, target_list\n",
    "    return msra_ner\n",
    "\n",
    "\n",
    "def get_boson_ner_fn(file_path):\n",
    "    @preprocessing_fn\n",
    "    def boson_ner(params, mode):\n",
    "        boson_data = read_bosonnlp_data(\n",
    "            file_pattern=file_path, eval_size=0.2)\n",
    "\n",
    "        inputs_list = []\n",
    "        target_list = []\n",
    "        for data in [boson_data]:\n",
    "            if mode == 'train':\n",
    "                inputs_list += data['train']['inputs']\n",
    "                target_list += data['train']['target']\n",
    "\n",
    "            else:\n",
    "                inputs_list += data['eval']['inputs']\n",
    "                target_list += data['eval']['target']\n",
    "        return inputs_list, target_list\n",
    "    return boson_ner"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
