# -*- coding: utf-8 -*-

import json
import pickle

import jieba
import numpy as np
import pandas as pd

# TODO：将entity_list.csv中已知实体的名称导入分词词典
from tqdm import tqdm
from sklearn.metrics.pairwise import cosine_similarity
from pandas.io.json import json_normalize

from extract_sen_vec import gen_sen_vec

def data_to_df():
    train_data_ori = json.load(open('data/train.json', 'r'))
    valid_data_ori = json.load(open('data/dev.json', 'r'))
    train_data=[]
    valid_data=[]
    for one in train_data_ori:
        item={}
        item['text_id']=one['text_id']
        item['text']=one['text']
        item['mention']=one['lab_result'][0]['mention']
        item['offset']=one['lab_result'][0]['offset']
        item['kb_id']=one['lab_result'][0]['kb_id']
        train_data.append(item)
    for one in valid_data_ori:
        item={}
        item['text_id']=one['text_id']
        item['text']=one['text']
        item['mention']=one['lab_result'][0]['mention']
        item['offset']=one['lab_result'][0]['offset']
        item['kb_id']=one['lab_result'][0]['kb_id']
        valid_data.append(item)
    train=json_normalize(train_data)
    valid=json_normalize(valid_data)
    train.to_csv('data/train.csv',index=None)
    valid.to_csv('data/valid.csv',index=None)
    # train_data=pd.read_json('data/train.json')
    # valid_data=pd.read_json('data/dev.json')

def view():
    # 列名： text_id   text  mention  offset  kb_id
    train_data = pd.read_csv('data/train.csv')
    valid_data = pd.read_csv('data/valid.csv')
    entity_data = pd.read_csv('data/company_2_code_sub.txt', sep='\t', encoding='utf-8')
    train_data=train_data.sort_values(by='mention')
    valid_data=valid_data.sort_values(by='mention')
    # print(train_data.count()) #统计非空值有：15718行
    # print(valid_data.count()) #统计非空值有：1630行
    train_entities=train_data.groupby('mention').count() # 统计各个待消歧对象的数量
    valid_entities=valid_data.groupby('mention').count()
    # print(train_entities.count()) #一共有327个待消歧对象
    # print(valid_entities.count()) #一共有302个待消歧对象
# view()
def gen_X(train_data,mention):
    X = []
    mention_ids=[]
    data=train_data[train_data['mention'] == mention][['kb_id','text']]
    # df.iloc[0,1] #返回index=0的行中列序号为1的cell的值
    # df.loc[0, 'new_column'] #返回index=0的行中列名为new_column的cell的值
    for idx in data.index.tolist():
        sen=data.loc[idx,'text']
        vec = gen_sen_vec(sen)
        X.append(vec)
        id=data.loc[idx,'kb_id']
        mention_ids.append(id)

    return X,mention_ids
def gen_one_mention_res(valid_result):
    # data_to_df()
    train_data = pd.read_csv('data/train.csv')
    valid_data = pd.read_csv('data/valid.csv')

    def get_entityid(sentence):
        vec=gen_sen_vec(sentence)
        res = cosine_similarity([vec], X)[0]
        ans=np.argsort(res)
        top_idx = ans[-1]

        return mention_ids[top_idx],res[top_idx]
    # TODO：将计算结果存入文件
    entity_data = pd.read_csv('data/company_2_code_sub.txt',sep='\t', encoding ='utf-8')
    entity_list=entity_data['stock_name'].values.tolist()
    neighbor_sentence = ''
    data=valid_data[['text_id','text']]
    for idx in tqdm(data.index):
        text_id=data.loc[idx,'text_id']
        sentence=data.loc[idx,'text']
        one_res={}
        one_res['text_id']=int(text_id)
        one_res['text']=sentence
        one_res['mention_result']=[]
        for entity in entity_list:
            if entity in sentence:

                X, mention_ids = gen_X(train_data, entity)
                X = np.array(X)
                # 查询关键词在句子中的索引
                k_len = len(entity)
                for i in range(len(sentence) - k_len + 1):

                    if sentence[i:i + k_len] == entity:
                        res = {}
                        res['mention'] = entity
                        res['offset']=int(i)
                        if i > 10 and i + k_len < len(sentence) - 9:
                            neighbor_sentence = sentence[i - 10:i + k_len + 9]
                        elif i < 10:
                            neighbor_sentence = sentence[:20]
                        elif i + k_len > len(sentence) - 9:
                            neighbor_sentence = sentence[-20:]
                        kb_id,confidence=get_entityid(neighbor_sentence)
                        res['kb_id']=int(kb_id)
                        res['confidence']=float(confidence)
                        one_res['mention_result'].append(res)
        valid_result['submit_result'].append(one_res)
    return

def main_sen_per_entity():
    entity_data = pd.read_csv('data/company_2_code_sub.txt',sep='\t', encoding ='utf-8')
    entities=entity_data['stock_name'].values.tolist()
    valid_result={}
    valid_result['team_name']='gogogo'
    valid_result['submit_result']=[]
    gen_one_mention_res(valid_result)
    pickle.dump(valid_result,open('valid_result.pkl','wb'))
    json.dump(valid_result,open('valid_result.json','w',encoding='utf-8'),indent=4,ensure_ascii=False)
main_sen_per_entity()

