# coding:utf-8

import pandas as pd
from collections import  defaultdict
import thulac
import pickle
from gensim.models import KeyedVectors
from sklearn.svm import SVR,SVC
import re
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import metrics
import random
import numpy as np
from elmoformanylangs import Embedder
from sklearn.feature_extraction.text import TfidfVectorizer
from chinese2digit import chinese2digits,changeChineseNumToArab

def create_qd_sets(inf,outf):
    out = open(outf,mode='w',encoding='utf-8')
    df = pd.read_excel(inf,sheetname='Sheet3')
    for i in range(df.shape[0]):
        if i == 519:
            print('a')
        if df.iloc[i,3] != df.iloc[i,3]:
            label = 0
        elif '诉讼' in df.iloc[i,3]:
            label = int(changeChineseNumToArab(df.iloc[i,3].strip())[3:-1])+451
        else:
            label = changeChineseNumToArab(df.iloc[i,3].strip())[1:-1]
        if df.iloc[i,2] == df.iloc[i,2]:
            out.write(str(label)+'\t'+df.iloc[i,2]+'\n')
        out.flush()
        print(i)
    out.close()


def statsic_seq_length(inf,outf):
    result = defaultdict(int)
    with open(inf,encoding='utf-8') as f:
        for i,line in enumerate(f.readlines()):
            print(i)
            result[len(line.split('\t')[1])] += 1
    df = pd.DataFrame([[key,result[key]] for key in result.keys()])
    df.to_excel(outf,index=False,encoding='utf-8')


def get_tfidf(inf,outf):
    thu = thulac.thulac(seg_only=True)
    datas = []
    with open(inf,encoding='utf-8') as f:
        for i,line in enumerate(f.readlines()):
            print(i)
            txt = line.split('\t')[1].strip()
            datas.append(thu.cut(txt,text=True))

    tfidf = TfidfVectorizer(min_df = 1,max_features = dim,ngram_range = (1, 4),use_idf = 1,smooth_idf = 1)
    tfidf.fit(datas)
    pickle.dump(tfidf,open(outf,'wb'))

def split_train_test(data_path,outf):
    thu = thulac.thulac(seg_only=True)
    train_x,train_y,test_x,test_y = [],[],[],[]
    with open(data_path,encoding='utf-8') as f:
        lines = f.readlines()
        random.shuffle(lines)
        for i,line in enumerate(lines):
            x,y = thu.cut(line.split('\t')[1].strip(),text=True),[int(line.split('\t')[0])]
            if i <= int(len(lines)*0.9):
                train_x.append(x)
                train_y.append(y)
            else:
                test_x.append(x)
                test_y.append(y)
            print(i)
    pickle.dump([train_x,train_y,test_x,test_y],open(outf,'wb'))


def SVR_model(c,gamma,train_x,train_y,test_x,test_y):
    if c == 10000 and gamma == 0.0001:
        print('a')
    #svr = SVR(C=c,gamma=gamma)
    svr = SVC(C=c,gamma=gamma)

    svr.fit(train_x,train_y)

    patten = re.compile('刑法第.*?条',re.I)

    y_preds = list(map(int,svr.predict(test_x)))
    # for i in range(len(y_preds)):
    #     print(y_preds[i],test_y[i])
    f1 = metrics.f1_score(test_y,y_preds,average='micro')
    error = metrics.mean_absolute_error(test_y,y_preds)
    return f1,metrics.precision_score(test_y,y_preds,average='micro'),metrics.recall_score(test_y,y_preds,average='micro')

# def GBDT_model(train_x,train_y,test_x,test_y):


def train_svc(train_x_embedding,test_x_embedding):
    # C =  [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000,10000]
    # Gamma = [10,1,0.1,0.01,0.001, 0.0001]
    C = [1000]
    Gamma = [0.01]
    # C = [10000,20000,30000,40000,50000,60000,70000,80000,90000]
    # Gamma = [0.001,0.002,0.003,0.004,0.005,0.006,0.007,0.008,0.009]
    c_best = 0
    gamma_best = 0
    max_f1 = 0
    for c in C:
        for gamma in Gamma:
            error,p,r = SVR_model(c,gamma,np.squeeze(train_x_embedding),train_y,np.squeeze(test_x_embedding),test_y)
            print('C=', c, 'gamma=', gamma, 'f1=', error,'p=',p,'r=',r)
            #if error <= min_error:
            if error >= max_f1:
                #min_error = error
                max_f1 = error
                gamma_best = gamma
                c_best = c
    print('best_c:',c_best,'best_gamma:',gamma_best,'f1:',max_f1)
    SVR_model(c_best,gamma_best,np.squeeze(train_x_embedding),train_y,np.squeeze(test_x_embedding),test_y)


def get_sentence_embedding_by_emlo(elmo_path,train_test_path,outf):
    e = Embedder(elmo_path)
    train_x, train_y, test_x, test_y = pickle.load(open(train_test_path,'rb'))
    train_x_embedding = []
    test_x_embedding = []
    for i in range(len(train_x)):
        print('train:',i)
        train_x_list = [train_x[i].split(' ')[:800]]
        train_x_embedding.append(np.average(e.sents2elmo(train_x_list),axis=1))
    for i in range(len(test_x)):
        print('test:',i)
        test_x_list = [test_x[i].split(' ')[:800]]
        test_x_embedding.append(np.average(e.sents2elmo(test_x_list),axis=1))
    pickle.dump([train_x_embedding,test_x_embedding],open(outf,'wb'))


def get_sentence_embedding_by_glove(glove_path,train_test_path,outf):
    model = KeyedVectors.load_word2vec_format(glove_path)
    train_x_embedding, test_x_embedding =[],[]
    train_x, train_y, test_x, test_y = pickle.load(open(train_test_path, 'rb'))
    for i,x in enumerate(train_x):
        print('train:',i)
        w2v = []
        for word in x.split(' '):
            if word in model.vocab.keys():
                w2v.append(model[word])
        train_x_embedding.append(np.average(w2v,axis=0))
    for i,x in enumerate(test_x):
        print('test:',i)
        w2v = []
        for word in x.split(' '):
            if word in model.vocab.keys():
                w2v.append(model[word])
        test_x_embedding.append(np.average(w2v, axis=0))
    #pickle.dump([train_x_embedding,test_x_embedding],open(outf,'wb'))
    pickle.dump(train_x_embedding, open(outf, 'wb'))






if __name__ == '__main__':
    dim = 6000
    #create_qd_sets('./datasets/match_result_re.xlsx','./datasets/datas.txt')
    #statsic_seq_length('./datasets/datas.txt','./datasets/statsic_length.xlsx')
    get_tfidf('./datasets/all_datas.txt','./datasets/all_tfidf-'+str(dim)+'.pkl')
    #split_train_test('./datasets/datas.txt','./datasets/train_test_data.pkl')
    min_error = 50000
    max_f1 = 0
    train_x, train_y, test_x, test_y = pickle.load(open('./datasets/train_test_data.pkl','rb'))
    #get_sentence_embedding_by_glove('./datasets/w2v_xingshi.txt','./datasets/all_laws.pkl','./datasets/law_glove_embedding.pkl')
    tfidf = pickle.load(open('./datasets/all_tfidf-'+str(dim)+'.pkl','rb'))
    train_x = tfidf.transform(train_x).toarray()
    test_x = tfidf.transform(test_x).toarray()
    # model = SVC(C=1000,gamma=0.01,probability=True)
    # model.fit(train_x,train_y)
    # pickle.dump([model,tfidf],open('./datasets/model_1828.pkl','wb'))
    # train_x_embedding,test_x_embedding = pickle.load(open('./datasets/glove_embedding.pkl','rb'))
    #train_x_embedding2, test_x_embedding2 = pickle.load(open('./datasets/jianti_embedding.pkl', 'rb'))
    #train_x_embedding3, test_x_embedding3 = pickle.load(open('./datasets/new_embedding.pkl', 'rb'))


    train_svc(train_x,test_x)


    # GBDT
    # gbm0 = GradientBoostingClassifier(random_state=10)
    # gbm0.fit(train_x,train_y)
    # y_pred = gbm0.predict(test_x)
    # print("f1 : %.4g" % metrics.f1_score(test_y,y_pred,average='macro'))
    # param_test1 = {'n_estimators': [i for i in range(20, 81, 10)]}
    # gsearch1 = GridSearchCV(estimator=GradientBoostingClassifier(learning_rate=0.1, min_samples_split=300,
    #                                                              min_samples_leaf=20, max_depth=8, max_features='sqrt',
    #                                                              subsample=0.8, random_state=10),
    #                         param_grid=param_test1, scoring=None, iid=False, cv=5)
    # train_y = np.squeeze(train_y,axis=1)
    # gsearch1.fit(train_x, train_y)
    # print(gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_)

    # param_test2 = {'max_depth': [i for i in range(3, 14, 2)], 'min_samples_split': [i for i in range(100, 801, 200)]}
    # gsearch2 = GridSearchCV(
    #     estimator=GradientBoostingClassifier(learning_rate=0.1, n_estimators=80, min_samples_leaf=20,
    #                                          max_features='sqrt', subsample=0.8, random_state=10),
    #     param_grid=param_test2, scoring=None, iid=False, cv=5)
    # train_y = np.squeeze(train_y, axis=1)
    # gsearch2.fit(train_x, train_y)
    # print(gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_)
    #get_sentence_embedding('./new_checkpoint/','./datasets/train_test_data.pkl','./datasets/new_embedding.pkl')

    # param_test3 = {'min_samples_split': [i for i in range(800, 1900, 200)], 'min_samples_leaf': [i for i in range(60, 101, 10)]}
    # gsearch3 = GridSearchCV(estimator=GradientBoostingClassifier(learning_rate=0.1, n_estimators=80, max_depth=3,
    #                                                              max_features='sqrt', subsample=0.8, random_state=10,min_samples_split=100),
    #                         param_grid=param_test3, scoring=None, iid=False, cv=5)
    # train_y = np.squeeze(train_y,axis=1)
    # gsearch3.fit(train_x, train_y)
    #
    # print(gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_)



