# -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 09:57:39 2020

@author: yuanling
"""


import numpy as np
import math 
import jieba
import jieba.analyse
import jieba.posseg as pseg
import pandas as pd
import os
import string
from functools import reduce
from sklearn import model_selection
import json
#from sklearn.feature_extraction.text import TfidfTransformer  
#from sklearn.feature_extraction.text import CountVectorizer 

pt = os.path.abspath(os.path.dirname(__file__)) + os.sep + "user_dict.txt"
print(pt)
jieba.load_userdict(pt)

def get_corpus(dir):
    #加载数据集
    
    #创建一个dataframe，列名为text和label
    trainDF = pd.read_excel(dir)
    new_column = ['text','label']
    trainDF.columns = new_column
    #将数据集分为训练集和验证集
    train,valid = model_selection.train_test_split(trainDF)
    labels = trainDF.ix[:,1].drop_duplicates().values.tolist()
    train = train.reset_index(drop=True)
    valid = valid.reset_index(drop=True)
    return trainDF,valid,labels


def word_cut(sentences,allowPOS=('ns','nz','n','vd','v','vn','nt','d','ad','a')):
    freq = {}
    words = pseg.cut(sentences)
    for w in words:
        if w.flag not in allowPOS:
            continue
        elif len(w.word.strip()) < 2:
            continue
        else:
            freq[w.word] = freq.get(w.word,0.0)+1.0
    
    df = pd.DataFrame(pd.Series(freq),columns=['num'])
    df = df.reset_index().rename(columns = {'index':'word'})
    return df.sort_values(by='num',ascending=False)

def get_wfreq(list_words):
    doc_frequency=defaultdict(int)
    for word_list in list_words:
        for i in word_list:
            doc_frequency[i]+=1 
    return doc_frequency

def mkdir(path):
    isExists=os.path.exists(path)
    if not isExists:
        # 如果不存在则创建目录
        os.makedirs(path) 

def train_model(model_name,train,labels):
    df_dic = pd.DataFrame(columns = ['word'])
    dic = pd.DataFrame()
    for cls in labels:
        df_seg = pd.DataFrame(columns = ['word','num'])
        temp = train.loc[train['label']==cls]
        str = temp.loc[:,'text'].values.tolist()
        for i in str:
            seg=word_cut(i)
            df_seg = pd.concat([df_seg,seg],ignore_index = True)
        row = len(str)
        df_dic = pd.concat([df_dic,df_seg],ignore_index = True)
        group_seg = df_seg.groupby(df_seg['word'])
        group_idf = pd.DataFrame(group_seg.count())

        group_idf['sum'] = pd.DataFrame(group_seg.sum())
        group_idf['idf']=list(map(lambda x:x/row,group_idf['num']))
        group_idf['label']=cls
        group_idf = group_idf.sort_values(by='sum',ascending=False)
        #print(group_idf)
        dic = pd.concat([dic,group_idf])
    
    mkdir(model_name)    
    dic = dic.reset_index()   
    dic.to_excel(os.path.join(model_name,'dic.xlsx'))

def normal_probability(class_r):
    sum_r = 0
    l = len(class_r)
    arr_r = np.array(class_r)
    mean_r = np.mean(arr_r[:,1].astype(float))
    std_r = np.std(arr_r[:,1].astype(float))
    for i in range(l):
        if std_r == 0:
            class_r[i][1] = 1/(math.exp(-(class_r[i][1]-mean_r))+1)
        else:
            class_r[i][1] = 1/(math.exp(-(class_r[i][1]-mean_r)/std_r)+1)
        

def predict_model(model_name,text,prior = 0.5,c = 1e-6):
    class_r=[]
    dic = pd.read_excel(os.path.join(model_name,'dic.xlsx'))
    seg_list = list(jieba.cut(text,cut_all = False))
    dic_d = pd.DataFrame(dic['word'].drop_duplicates().values.tolist())
    keywds = dic_d[dic_d[0].isin(seg_list)]
    keywds = keywds.reset_index(drop = True)
    keywds.columns = ['word'] 
    
    for i in dic['label'].drop_duplicates().values.tolist():
        keywd_match = pd.merge(keywds,dic[dic['label']==i],on=['word'])
        print(keywd_match)
        if len(keywd_match)<1:
            p = prior*c**len(keywds)
        else:
            p=(prior*reduce(lambda x,y:x*y,keywd_match['idf']))*c**(len(keywds)-len(keywd_match))     
        class_r.append([i,p])
    class_r.sort(key=lambda x:x[1],reverse = True)
    normal_probability(class_r)
    #print(class_r)
    return class_r
    
def accuracy_score(model_name,valid):
    class_i = []
    for i in range(len(valid)):
        class_i = predict_model(model_name, valid.ix[i,'text'])
        valid.ix[i,'predict'] = class_i[0][0]
    #统计分类正确记录
    valid['acc'] = valid[['label','predict']].apply(lambda x:x['label'] == x['predict'],axis = 1)
    acc_s = valid.groupby('acc',as_index=False).count()
    
    valid.to_excel(os.path.join(model_name,'valid_r.xlsx'))
    return acc_s.ix[acc_s['acc']==True,'text']/acc_s['text'].sum()
        


def trainingAlgorithm(modeljson:json):
    print(modeljson)
    training_file = modeljson['training_file_path']
    model_base_dir = modeljson['model_base_dir']
    trainDF,valid,labels = get_corpus(training_file)
    train_model(model_base_dir,trainDF,labels)
    acc = accuracy_score(model_base_dir,valid)
    return acc.to_json(orient="split",force_ascii=False)    

def callAlgorithm(modeljson:json,params:str):
    print(modeljson,params)
    model_base_dir = modeljson['model_base_dir']
    
    result = predict_model(model_base_dir, params)
    return result 

#if __name__ == "__main__":
    #trainDF,valid,labels = get_corpus('order1.xlsx')
    #valid.to_excel('valid.xlsx')
    #train_model('222',trainDF,labels)
    #acc = accuracy_score('222',valid)
    #print(predict_model('222', '42层东边的消防通道的门关的时候噪音很大，麻烦师傅去看下辛苦了'))
    #print(acc)
