#!/usr/bin/env python
# coding: utf-8

import pandas as pd
from zhNLP_build_dict import xl_dict
from zhNLP_build_dict import run_word_freq
from zhNLP_jieba import tbl_p_list


def read_xls(xls_fn):
    with pd.ExcelFile(xls_fn) as xls: 
        all_sheets = xls.sheet_names
    d = dict()
    for sheet in all_sheets:
        d[sheet] = pd.read_excel(xls_fn, sheet_name=sheet, index_col=0)
    return d

def update_zh_stopwords(d):
    return pd.concat([d['stopwords'].rename(columns={0:"words"}),                          d['periods'].rename(columns={"periods":"words"}),                         d['word_freq'].query("类别=='stop'")[['words']]])               .drop_duplicates()               .reset_index(drop=True)


def update_修正(df, word_type="content"):
    dd = df['word_freq'].query("类别=='{c}'".format(c=word_type))
    dd_无 = dd [ dd.修正.isna() ]
    dd_有 = dd [ ~dd.修正.isna() ]
    dd_out = pd.concat( [ dd_无[['words']], 
                          dd_有[['修正']].rename(columns={"修正":"words"})])\
                .reset_index(drop=True)
    return(dd_out)


def update_jiaba_dict(content_words,entity_words,d):
    """
    生成更新后的jiaba字典
    """
    words = pd.concat([content_words, entity_words]).rename(columns={"words":"word"})
    words["weight"] = [5**len(x) for x in words["word"]]
    user_dict_jieba_expanded = pd.concat([words,d["dictionary"]]).reset_index(drop=True)
    return user_dict_jieba_expanded


def BOW_split(BOW,list_entity):
    BOW_entity = [x for x in BOW if x in list_entity]
    BOW_entity_non = [x for x in BOW if x not in list_entity]
    return({"lentity":BOW_entity, "lcontent":BOW_entity_non})


def build_dataset(df,entity_words):
    """
    生成dataset栏位
    """
    df_out = df.copy()
    list_entity = entity_words['words'].to_list()
    e_c = dict()
    for i in df_out.index:
        e_c[i] =  BOW_split(df_out.loc[i,'BOW_JB'],list_entity) 
    df_ec = pd.DataFrame(e_c).T
    df_o = df_out.join(df_ec)
    return df_o

