# coding:utf-8

import os.path
import pickle
import time
import datetime
import numpy as np
from operator import itemgetter
import math
import tornado.web
import tornado.httpserver
import re
import requests
import json
import thulac
from jieba.analyse import textrank
from gensim.models import KeyedVectors
import argparse
from bert_serving.client import BertClient
from tornado.options import define, options
from predict import Predict
import data_io,params, SIF_embedding
from chinese2digit import chinese2digits,changeChineseNumToArab





class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.int32):
            return int(obj)
        elif isinstance(obj, np.float32):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)

class IndexHandler(tornado.web.RequestHandler):
    def get(self):
        self.render('index.html')

class PoemPageHandler(tornado.web.RequestHandler):
    def post(self):
        content = self.get_argument('content').strip()
        type = self.get_argument('type').strip()
        nums = int(self.get_argument('nums').strip())
        embedding_type = self.get_argument('embedding_type')
        result = self.get_predict_result(content,embedding_type)[:nums]
        self.render('result.html',result=result,content=content,type=type)
    def get_predict_result(self,content,embedding_type):
        yred,test_x,prob = predict.predict(content,embedding_type)
        result = []
        # use svm
        if yred == 0:
           result = self.get_similar(embedding_type,test_x)
        else:
            r = self.get_law(yred)
            r['cos'] = prob
            result.append(r)

        if len(result) < 2 and prob < 1.0:
            for r in self.get_similar(embedding_type,test_x):
                if r['result'] != result[0]['result']:
                    result.append(r)
        #r = result[0]
        #result = sorted(result[1:], key=itemgetter('cos'), reverse=True)
        result = sorted(result, key=itemgetter('cos'), reverse=True)
        #result.insert(0,r)
        return result
           # result['result'] = '不好意思,当前文本暂时没有合适的刑法或诉讼法法条对应'
           # result['num'] = '暂无'
    def get_law(self,yred):
        if yred>0 and yred <= 451:
            return {'result':xing_laws[yred-1].split('    ')[1].strip(),
                    'num':'刑法第'+str(yred)+'条'}
        else:
            return {'result':susong_laws[yred-451-1].split(' ')[1].strip(),
                    'num':'刑事诉讼法第'+str(yred-450)+'条'}

    def get_similar(self,embedding_type,test_x):

        result = []
        # max_dis = 0
        for i in range(len(law_tfidf)):
            if embedding_type == 'glove':
                dis = np.dot(test_x, law_glove[i]) / (
                            (np.sqrt(np.sum(np.square(test_x)))) * (np.sqrt(np.sum(np.square(law_glove[i])))))
            else:
                dis = np.dot(test_x, law_tfidf[i]) / (
                            (np.sqrt(np.sum(np.square(test_x), axis=1))) * (np.sqrt(np.sum(np.square(law_tfidf[i])))))
                dis = dis[0]
            if dis > 0:
                r = self.get_law(i + 1)
                r['cos'] = dis
                result.append(r)
        return result

class PredcitByNet(tornado.web.RequestHandler):

    def post(self):

        try:
            req = self.request.body
            raw = json.loads(req.decode(), encoding='utf-8')
        except:
            raw = ""
        try:
            if raw == "":
                content = self.get_argument('content').strip().replace('\r\n','')
            else:
                content = raw['content']
        except:
            content = ""
        try:
            if raw == "":
                type = self.get_argument('type').strip()
            else:
                type = raw['type']
        except:
            type = "司法解释"
        try:
            if raw == "":
                nums = int(self.get_argument('nums').strip())
            else:
                nums = raw['nums']
        except:
            nums = 1
        try:
            if raw == "":
                embedding_type = self.get_argument('embedding_type')
            else:
                embedding_type = raw['embedding_type']
        except:
            embedding_type = "标签分类"
        #a = self.get_sencence_embedding_by_attention(content)
        if str(re.search(r'.*?刑法第.*?条',content)) != 'None':
            txt = re.findall(r'.*?刑法第.*?条', content)[0]
            result = [{'num':'刑法第'+changeChineseNumToArab(txt[txt.find('刑法第')+3:txt.find('条',txt.find('刑法第')+3)])+'条',
                       'result':xing_laws[int(changeChineseNumToArab(txt[txt.find('刑法第')+3:txt.find('条',txt.find('刑法第')+3)]))-1].split('    ')[1].strip(),
                       'cos':1.0,
                       'words':""}]
        elif embedding_type == '标签分类':
            result = self.get_predict_result(content)[:nums]
        elif embedding_type == '关键词匹配':
            result = self.get_predict_by_word_match(content)[:nums]
        elif embedding_type == '案由匹配':
            result = self.get_predict_by_case_classify(content)[:nums]
        elif embedding_type == 'ELMo匹配':
            result = self.get_predict_by_elmo(content)[:nums]
        elif embedding_type == 'glove句向量':
            result = self.get_sentence_embedding_by_SIF(content)[:nums]
        else:
            result = self.get_predict_by_bert(content)[:nums]


        self.write(json.dumps({'result':result},cls=MyEncoder))
        self.finish()

        # self.render('result.html',result=result,content=content,type=type,embedding_type=embedding_type)


    # 根据法条分类匹配法条
    def get_predict_result(self,content):
        res = []
        results,probs = predict.predict_by_net(content)
        for i in range(len(results)):
            r = {}
            r['num'] = '刑法第'+str(results[i])+'条'
            r['result'] = xing_laws[results[i]-1].split('    ')[1].strip()
            r['cos'] = probs[i]
            r['words'] = ''
            res.append(r)
        return res

    # 根据关键词匹配法条
    def get_predict_by_word_match(self,content):
        res = []
        laws = [line.split('    ')[-1].strip() for line in xing_laws]
        match_laws = self.match_result(content.replace('\r\n',''),30,laws,w2v)
       # print(match_laws)
        for match_law in match_laws:
            r = {}
            words = []
            if len(match_law) > 2:
                for i in range(2,len(match_law)):
                    words.append(match_law[i]['word'])
            r['num'] = '刑法第'+str(match_law[0])+'条'
            r['result'] = laws[match_law[0]-1]
            r['cos'] = ''
            r['words'] = words
            res.append(r)
        return res

    # 根据司法解释判断案由
    def get_predict_by_case_classify(self,content):
        req = requests.post('http://172.23.7.100:8000/anyouClassify',data=json.dumps({"params":{"result_type":"flat","anyou_type":"xingshi","src":content}}))
        print(req.json())
        labels = req.json()['xingshi']['labels']
        res = []
        for label in labels:
            r = {}
            r['num'] = label['DM']
            r['result'] = label['anyou_label']
            r['cos'] = label['prob']
            r['words'] = ''
            res.append(r)
        return res

    def match_result(self,explanation,topN,laws,w2v):
        explanation = re.sub(r'\d|[a-z]|[A-Z]|第|[(（）)一二三四五六七八九十百千万亿]|条(?!(岁|周岁))', '', explanation)
        # keywords = extract_tags(explanation,topK=topN)
        keywords = textrank(explanation, topK=topN)
        law_match_results = []
        for j, law in enumerate(laws):
            law = re.sub(r'\d|[a-z]|[A-Z]|第|[(（）)一二三四五六七八九十百千万亿]|条(?!(岁|周岁))', '', law)
            # law_keys = extract_tags(law,topK=topN)
            law_keys = textrank(law, topK=topN)
            sims = [j + 1]
            for key in keywords:
                dlist = []
                for lk in law_keys:
                    if key in w2v.vocab and lk in w2v.vocab:
                        dlist.append({'word': key + '-' + lk, 'cos': w2v.similarity(key, lk)})
                if dlist != []:
                    dmax = sorted(dlist, key=itemgetter('cos'))[-1]
                    if dmax['cos'] >= math.sqrt(2) / 2:
                        sims.append(dmax)
            law_match_results.append(sims)
        #match_law = sorted(law_match_results, key=lambda i: len(i),reverse=True)
        match_law = sorted(law_match_results, key=lambda i:self.sum_cos(i) ,reverse=True)
        for ml in match_law:
            ml.insert(1, len(ml[min(1, len(ml) - 1):]))

        return match_law

    def sum_cos(self,i):
        s = 0
        if len(i) > 1:
            for j in range(1,len(i)):
                s += i[j]['cos']
        return s

    def get_predict_by_elmo(self,content):
        content = [thu.cut(content,text=True).split(' ')]
        req = requests.post('http://172.23.7.100:8090/embedding',data=json.dumps({"query":content}))
        embedding = req.json()['embedding'][0]
        embedding = np.average(embedding,axis=0)
        d = []
        for i,law in enumerate(law_elmo):
            d.append({'num':'刑法第'+str(i+1)+'条',
                      'result':xing_laws[i].split('    ')[1].strip(),
                      'words':'',
                      'cos':np.round(np.dot(embedding,law) / (np.sqrt(np.sum(np.square(embedding)))*np.sqrt(np.sum(np.square(law)))),2)})
        d = sorted(d,key=itemgetter('cos'),reverse=True)
        return d

    def get_predict_by_bert(self,content):
        query_embedding = bc.encode([content.strip()])
        d = []
        for i,law in enumerate(law_bert_embeddings):
            d.append({"num":'刑法第'+str(i+1)+'条',
                      "result":xing_laws[i].split('    ')[1].strip(),
                      "words":"",
                      "cos":np.round(np.dot(query_embedding,law) / (np.sqrt(np.sum(np.square(query_embedding)))*np.sqrt(np.sum(np.square(law)))),2)})
        d = sorted(d,key=itemgetter('cos'),reverse=True)
        return d



    def get_sencence_embedding_by_attention(self,content):

        sentence = []
        content = thu.cut(content,text=True).split(' ')
        for word in content:
            if word in w2v.vocab:
                sentence.append(w2v[word])
        QK = np.matmul(sentence,np.transpose(sentence))
        QK = np.exp(np.divide(QK, np.sqrt(300)))
        e_sum = np.sum(QK, axis=0)
        for i in range(len(QK)):
            for j in range(len(QK[i])):
                QK[i][j] = QK[i][j] / e_sum[i]
        sentence_embedding = np.matmul(QK,sentence)
        a = 1
        return sentence_embedding

    def get_sentence_embedding_by_SIF(self,content):

        content = re.sub(r'[\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”！，。？、~@#￥%……&*（）]+','',content)
        content = thu.cut(content,text=True)
        sentences = [content]
        d = []
        for i,law in enumerate(xing_laws):
            law = law.split('    ')[1].strip()
            law = re.sub(r'[\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”！，。？、~@#￥%……&*（）]+','',law)
            law = thu.cut(law,text=True)
            sentences.append(law)
        # load sentences
        x, m = data_io.sentences2idx(sentences, words) # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
        w = data_io.seq2weight(x, m, weight4ind) # get word weights
        # get SIF embedding
        embedding = SIF_embedding.SIF_embedding(We, x, w, params) # embedding[i,:] is the embedding for sentence i
        for i in range(1,len(embedding)):
            d.append({'num': '刑法第' + str(i) + '条',
                      'result': xing_laws[i-1].split('    ')[1].strip(),
                      'words': '',
                      'cos': np.round(np.dot(embedding[0], embedding[i]) / (
                                  np.sqrt(np.sum(np.square(embedding[0]))) * np.sqrt(np.sum(np.square(embedding[i])))), 2)})
            print('判断',i)
        d = sorted(d, key=itemgetter('cos'), reverse=True)
        return d





if __name__ == '__main__':

    #define("port", default=8090, help="run on the given port", type=int)

    #tornado.options.parse_command_line()
    parser = argparse.ArgumentParser()
    parser.add_argument('--server_port',type=int,default=8090,help="run on the given port")
    parser.add_argument('--bert_ip',type=str,default='172.23.7.101',help='bert embedding ip(default:172.23.7.101)')
    parser.add_argument('--bert_port',type=int,default=5555,help='bert embedding port(default:5555)')
    parser.add_argument('--bert_port_out',type=int,default=5556)
    args = parser.parse_args()
    bc = BertClient(ip=args.bert_ip,port=args.bert_port,port_out=args.bert_port_out)

    #model_path = './datasets/model_1828.pkl'
    #glove_path = './datasets/w2v_xingshi.txt'
    xing_laws = open('./datasets/clean_xing_law_1.txt').readlines()
    law_bert_embeddings = bc.encode([line.strip() for line in xing_laws])
    susong_laws =  open('./datasets/clean_xingshisusong_law_1.txt').readlines()
    law_tfidf = pickle.load(open('./datasets/law_tfidf.pkl','rb'))
    law_glove = pickle.load(open('./datasets/law_glove_embedding.pkl', 'rb'))
    start_time = time.time()
    #base_dir = '/home/nlp/pySpace/match_law'
    base_dir = '/home/abc/pySpace/law_match'
    model_path = base_dir + '/checkpoints/textcnn_11250623/best_validation'
    glove_path = base_dir + '/datasets/gamedatas/w2v_xingshi.txt'
    familyname_path = base_dir + '/datasets/gamedatas/familyname.txt'
    vocab_path = base_dir + '/datasets/gamedatas/vocab.txt'
    labels_path = base_dir + '/datasets/gamedatas/labels.txt'
    law_elmo = pickle.load(open('./datasets/law_s2v_elmo.pkl','rb'))
    weightfile = base_dir+'/datasets/vocab_frequency.txt'  # each line is a word and its frequency
    wordfile = base_dir+'/datasets/vocab.txt'
    words = {word.strip():i for i,word in enumerate(open(wordfile,encoding='utf-8').readlines())}
    We = pickle.load(open(base_dir+'/datasets/We.pkl','rb'))
    weightpara = 1e-3  # the parameter in the SIF weighting scheme, usually in the range [3e-5, 3e-3]
    rmpc = 1  # number of principal components to remove in SIF weighting scheme
    word2weight = data_io.getWordWeight(weightfile, weightpara)  # word2weight['str'] is the weight for the word 'str'
    weight4ind = data_io.getWeight(words, word2weight)  # weight4ind[i] is the weight for the i-th word
    # set parameters
    params = params.params()
    params.rmpc = rmpc
    thu = thulac.thulac(seg_only=True)
    print('这块走完了,贼快')
    with open(familyname_path, encoding='utf-8') as f:
        fns = "".join([line.strip() for line in f.readlines()])
        fn = "[" + fns + "]" + "[1-9]*某+[甲乙丙丁午己庚辛壬癸]*" \
             + "|[" + fns + "]" + "x+[甲乙丙丁午己庚辛壬癸]*"
    w2v = KeyedVectors.load_word2vec_format(glove_path)
    predict = Predict(model_path, w2v, True, True, fn, vocab_path=vocab_path, labels_path=labels_path)
    #predict = Predict(model_path,glove_path,True)
    print('实例化完毕,用时:',datetime.timedelta(seconds=int(time.time()-start_time)))
    # 自定义settings
    settings = dict(
        template_path=os.path.join(os.path.dirname(__file__), "templates"),
        static_path=os.path.join(os.path.dirname(__file__), "statics"),  # 为了便于部署，建议使用static
        static_url_prefix="/statics/"  # 默认使用的是static，为了便于部署，建议使用static
    )
    # app = tornado.web.Application(
    #     handlers=[(r'/', IndexHandler), (r'/result/show', PoemPageHandler),(r'/result/net',PredcitByNet)],
    #     template_path=os.path.join(os.path.dirname(__file__), "templates"),static_path=os.path.join(os.path.dirname(__file__), "templates"),debug=True
    # )
    app = tornado.web.Application(
        handlers=[(r'/', IndexHandler), (r'/result/show', PoemPageHandler), (r'/result/net', PredcitByNet)],**settings, debug=True
    )
    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(args.server_port)
    tornado.ioloop.IOLoop.instance().start()
