#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 19:51:39 2018
@project: 天池比赛-A股主板上市公司公告信息抽取
@group: MZH_314
@author: LHQ

@desc:
    基于prepare_trainingdata_zengjianchi_text.py的输出结果进行二次处理,
    处理的目的为为下一步抽取的实体构造特征，以便分类来选择我们要的实体

"""
import re
import os
from itertools import chain

import pandas as pd

from reportIE.ner import make_recognizer
from reportIE.ner import CompanyAlias
from reportIE.preprocess import make_converter

from ltp import Ner


class ContextExtractor:
    """上下文提取器
    
    发现实体，并提取其左右邻句子

    Attributes
    ----------
    getters : list
        list中的每个元素为二元元组，该元组由getter函数及该函数的标签组成
        getter函数接收一个句子，迭代返回(比如yield)该句子中的某类实体的起始和结束索引
        tag 为标签字符串，用于对getter返回的索引标记，方便后续的进一步处理
    """

    def __init__(self):
        self.getters = []
        self.tmp_getters = []
        
    def add_index_getter(self, getter, tag):
        """添加getter和tag

        Args
        ----
        getter : func
            接收参数为句子，返回该句子中某类实体的起始和结束位置索引
            getter的返回值可以用yield，或者一系列起始结束位置索引的列表

        tag : str
            标签，用于对getter的返回结果标记，比如getter是返回日期的索引，
            tag可以为date或者其他自定义的标签，保持前后一致即可
        """
        self.getters.append((getter, tag))
    
    def add_tmp_index_getter(self, getter, tag):
        """添加临时的getter和tag

        参数同 add_index_getter
        """
        self.tmp_getters.append((getter, tag))
        
    def clear_tmp_index_getter(self):
        """清空临时的getter
        """
        self.tmp_getters.clear()
        
    def extract(self, sentence):
        """抽取上下文

        Args
        ----
        sentence : str
            句子

        Yields
        ------
        item : dict
            包含上下文的实体信息
        """
        for func, tag in chain(self.getters, self.tmp_getters):
            for start, end in func(sentence):
                left = sentence[:start]
                right = sentence[end:]
                entity = sentence[start:end]
                
                item = {'left': left,
                        'right': right,
                        'entity': entity,
                        'entity_tag': tag,
                        "index": (start, end),
                        }
                yield item

    @classmethod
    def of_default(cls):
        """类方法实例化一个对象，为其预置一些getter函数
        """
        extractor = cls()
        
        date_recognizer = make_recognizer("date")
        amount_recognizer = make_recognizer("amount")
        percent_recognizer = make_recognizer("percent")
        stockquantity_recognizer = make_recognizer("stockquantity")
        
        extractor.add_index_getter(date_recognizer.recognize_entity_span, "date")
        extractor.add_index_getter(amount_recognizer.recognize_entity_span, "amount")
        extractor.add_index_getter(percent_recognizer.recognize_entity_span, "percent")
        extractor.add_index_getter(stockquantity_recognizer.recognize_entity_span, "stockquantity")
        return extractor


def extract_context(df):
    ner = Ner()
    extractor = ContextExtractor.of_default()
    data = []
    recognize_org = ner.recog_org
    for report_id, gp in df.groupby("report_id"):
        origin_texts = gp["origin_text"].tolist()
        alias = CompanyAlias(origin_texts, recognize_org)
        
        ## make_org_recognizer
        org_recognizer = make_recognizer("re")
        orgs = set()
        namesmap = alias.get_allalias()
        for k, v in namesmap.items():
            if k not in ("公司", "本公司"):
                orgs.add(k)
            orgs.add(v)    
        for text in origin_texts:
            orgs.update(recognize_org(text))
        org_recognizer = make_recognizer("re")    
        for org in orgs:
            if org is None:
                continue
            try:
                org_recognizer.add_pattern(org)
            except:
                continue
        extractor.add_tmp_index_getter(org_recognizer.recognize_entity_span, "org")
        
        contexts = []
        for i, row in gp.iterrows():
            is_target = row["is_target"]
            sentence = row['origin_text']
            for item in extractor.extract(sentence):
                item["is_target"] = is_target
                try:
                    item['std_data'] = row['std_data']
                except:
                    pass
                contexts.append(item)
        extractor.clear_tmp_index_getter()
                   
        # 区分公司简称和全称
        for item in contexts:
            org = item['entity']        
            new_item = {k: v for k,v in item.items() if k != "entity"}
            new_item["origin_entity"] = org
            fullname = alias.get_fullname_for_short(org) if org else None
            new_item["full_entity"] = fullname if fullname is not None else org
            new_item["short_entity"] = org if fullname is not None else None
            new_item['report_id'] = report_id
            data.append(new_item)
    dfall = pd.DataFrame(data)
    ner.release()
    return dfall


def process_field(df):
    """字段实体标准化
    """
    date_converter = make_converter("date")
    percent_converter = make_converter("percent")
    price_converter = make_converter("price")
    quantity_converter = make_converter("quantity")
    
    data_new = []
    for i, row in df.iterrows():
        entity_tag = row["entity_tag"]
        entity = row["full_entity"]
#        row['origin_entity'] = entity
        if entity_tag == "date":
            row["full_entity"] = date_converter(entity)
        elif entity_tag == 'percent':
            row["full_entity"] = percent_converter(entity)
        elif entity_tag == 'stockquantity':
            row["full_entity"] = quantity_converter(entity)
        elif entity_tag == 'amount':
            row["full_entity"] = price_converter(entity)
        else:
            pass
        data_new.append(row)
    df_new = pd.concat(data_new, axis=1).T
    return df_new


def label_field(df):
    std_field = ["公告id", "股东全称", "股东简称", "变动截止日期", "变动价格", "变动数量", "变动后持股数", "变动后持股比例"]

    df_target = df.query("is_target==1")
    data = []
    for i, row in df_target.iterrows():
        entity = row["full_entity"]
        std_data = eval(re.sub("nan", '""', row['std_data']))
        row['label'] = "其他"
        for std in std_data:
            std_map = dict(zip(std_field, std))
            try:
                std_map["变动价格"] = str(std_map["变动价格"])
            except:
                pass
            try:
                std_map["变动数量"] = str(int(std_map["变动数量"]))
            except:
                pass
            try:
                std_map["变动后持股数"] = str(int(std_map["变动后持股数"]))
            except:
                pass
            try:
                std_map["变动后持股比例"] = str(std_map["变动后持股比例"])
            except:
                pass
            
            for k, v in std_map.items():
                if entity == v:
                    row['label'] = k
        data.append(row)
    return pd.concat(data, axis=1).T


if __name__ == "__main__":
    trainingdata_path = os.path.abspath("../data/training_data/zengjianchi_text.csv")

    save_dir = os.path.abspath("../data/training_data")
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)    

    df = pd.read_csv(trainingdata_path)
    
    df_new = extract_context(df)
        
    df_field = process_field(df_new)

    df_labeled = label_field(df_field)
    
    save_path = os.path.join(save_dir, "zengjianchi_text_field2.csv")
    
    cols = ["report_id",
             'entity_tag',
             'left',
             'right',
             'full_entity',
             'short_entity',
             'origin_entity',
             'std_data',
             'label',
             ]
    df_labeled[cols].to_csv(save_path, index=False)

            
        

    
    
    
    
    
