#!/usr/bin/env python3
# -*- codi
import sys
import os
import re

import pandas as pd
from sklearn.externals import joblib

from reportIE.preprocess import ZjcReport
from reportIE.utils.load_data import load_reports
from reportIE.ner.alias import CompanyAlias
from reportIE.slot import ZjcTextSlot

from prepare_trainingdata_zengjianchi_text import process_texts
from train_zengjianchi_text_sentence import SentenceFeature
from prepare_trainingdata_zengjianchi_text_field import extract_context,process_field
from train_zengjianchi_text_field import FieldFeature


class EntitySpan:
    """定位实体位置
    定位实体词在文本中的索引位置，得到一个按实体出现位置
    排列的实体序列
    """
    def __init__(self):
        self.patterns = []

    def add_pattern(self, regexp, tag):
        p = re.compile(regexp)
        self.patterns.append((p, tag))

    def get_span(self, sentence):
        spans = []
        for p, tag in self.patterns:
            for m in p.finditer(sentence):
                start, end = m.span()
                entity = sentence[start:end]
                spans.append((start, end, entity, tag))
        return sorted(spans, key=lambda x: x[0])

    def add_pattern_dict(self, pattern_dict):
        for p, label in pattern_dict.items():
            self.add_pattern(p, label)


if __name__ == "__main__":
    if len(sys.argv) == 3:
        html_dir = sys.argv[1]
        save_path = sys.argv[2]
    else:
        ### train data
        #html_dir = os.path.abspath("../data/round1_train_20180518/增减持/html")
        #save_path = os.path.abspath("../data/tmp/zengjianchi_text.csv")
        
        # test data
        html_dir = os.path.abspath("../data/FDDC_announcements_round1_test_b_20180708/增减持/html")
        save_path = os.path.abspath("../submit/zengjianchi_text.txt")

    #================================================
    save_dir = os.path.dirname(save_path)
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    # 加载文本分类器
    sentence_clf_path = "models/zengjianchi/text_sentence_clf.m"
    sentence_clf = joblib.load(sentence_clf_path)
    # 加载字段分类器
    field_clf_path = "models/zengjianchi/text_field_clf.m"
    field_clf = joblib.load(field_clf_path)


    """公告文本分割"""
    datas = []
    all_alias = {}
    for report_id, html in load_reports(html_dir):
        report = ZjcReport(html, report_id)
        if not report.has_table():
            contents = report.flat_contents2
            
            alias = CompanyAlias(contents)
            all_alias[report_id] = alias.get_allalias()

            new_contents = process_texts(contents)
            
            for txt in new_contents:
                item = {"report_id": report_id,
                        "origin_text": txt,}
                datas.append(item)

    df = pd.DataFrame(datas)

    """公告段落文本分类，得到含有待提取字段的句子"""
    docs = df["origin_text"].tolist()

    # 抽取文本特征
    sentence_feat = SentenceFeature.from_modelfile()
    feature = sentence_feat.build_feature(docs)
    # 文本分类
    df['is_target'] = sentence_clf.predict(feature)

    """字段分类, 对候选的实体分类得到最终提取的目标实体"""
    df_target = df.query("is_target==1")
    df_context = extract_context(df_target)  # 抽取候选字段的上下文
    df_field = process_field(df_context)  # 对候选字段进一步格式化
    # 数据准备
    entity_tag = df_field["entity_tag"].tolist()
    left = df_field['left'].fillna("").tolist()
    right = df_field['right'].fillna("").tolist()
    # 构造实体的特征矩阵
    field_feat = FieldFeature.from_modelfile()
    features = field_feat.build_feature(left, right, entity_tag)    
    # 实体分类
    df_field['label'] = field_clf.predict(features)

    """组织目标实体，整理成表格"""
    df_result = df_field.query("label != '其他'")

    entities = {}
    for report_id, gp in df_result.groupby("report_id"):
        d = {}
        for i, row in gp.iterrows():
            ent = row["origin_entity"]
            d[ent] = row[['label', "full_entity", "short_entity"]].to_dict()
        entities[report_id] = d

    # 填充字段槽，得到结构数据
    std_data = []
    slot = ZjcTextSlot()
    for report_id, gp in df_target.groupby("report_id"):
        ents = entities.get(report_id,{})
        full_short = {item['full_entity']:item['short_entity'] for k, item in ents.items() if item['label'] == "股东全称"}

        entspan = EntitySpan()
        entspan.add_pattern_dict(ents)
        for sentence in gp["origin_text"].values:
            sq = entspan.get_span(sentence)
            slot.reset()
            new_sq = [(q[3]["full_entity"],q[3]["label"]) for q in sq]
            slot.feed_sequence(new_sq)
            
            for item in slot.get_data():
                item["公告id"] = report_id
                item["股东简称"] = full_short.get(item['股东全称'])
                std_data.append(item)
            slot.reset()

    columns = ["公告id", "股东全称", "股东简称", "变动截止日期", "变动价格", "变动数量", "变动后持股数","变动后持股比例"]
    df_std = pd.DataFrame(std_data).reindex(columns=columns)
    
    df_last = df_std.dropna(subset=["股东全称"]).dropna(subset=["变动截止日期"])
    df_last.to_csv(save_path, index=False, sep='\t', line_terminator='\r\n')
    
#    tmp1 = df.query("report_id=='1017335'")
#    tmp2 = df_target.query("report_id=='1017335'")
#    tmp3 = df_context.query("report_id=='1017335'")
#    tmp4 = df_field.query("report_id=='1017335'")
#    tmp5 = df_result.query("report_id=='1017335'")
