# coding:utf-8

from gensim.models import KeyedVectors
import numpy as np
import pickle
import re
from operator import itemgetter
import pandas as pd
import jieba
from jieba.analyse import extract_tags
from jieba.analyse import textrank
from collections import defaultdict
import requests
import math
import thulac
import json
import time
import datetime
#from fastText import load_model

def get_sencence_vector_by_w2v(w2v_path,path,outf):
    model = KeyedVectors.load_word2vec_format(w2v_path)
    with open(path,encoding='utf-8') as f:
        matched_sentence = []
        for i,line in enumerate(f.readlines()):
            #m = []
            m = line.split('\t')[1].strip().split(' ')
            w2vs = []
            for word in m:
                if word in model.vocab.keys():
                    w2vs.append(model[word])
            matched_sentence.append(np.average(w2vs,axis=0))
            print(i)

        pickle.dump(matched_sentence,open(outf,'wb'))


def get_similar_result(law_path,data_path):
    law_s2v = pickle.load(open(law_path,'rb'))
    data_s2v = pickle.load(open(data_path,'rb'))
    result = []
    for i,d in enumerate(data_s2v):
        m = {}
        if not isinstance(d,np.ndarray):
            m[i] = ''
        else:
            min_dis = 100000

            for j,l in enumerate(law_s2v):
                if np.dot(d,l)<min_dis:
                    min_dis = np.dot(d,l)
                    m[i] = j
        print(m)
        result.append(m)

def get_similar_by_re(data_path,law_path,outf):
    patten = re.compile('刑法第.*?条',re.I)
    df = pd.read_excel(data_path)
    with open(law_path,encoding='utf-8') as f:
        laws = f.readlines()
    m = defaultdict(list)
    result = []
    for i in range(df.shape[0]):
        r = [df.iloc[i,0],df.iloc[i,9].strip()]
        s = df.iloc[i,12]
        if s == s:
            r.append(s.strip())
            for j in re.finditer(patten,s):
                num = j.group().replace('刑法第','').replace('条','')
                law_patten = re.compile('第'+num+'条', re.I)
                for k,law in enumerate(laws):
                    if str(re.match(law_patten,law.split('    ')[0])) != 'None':
                        if k not in m[df.iloc[i,0]]:
                            m[df.iloc[i,0]].append(k)
                            r.extend([law.split('    ')[0].strip(),law.split('    ')[1].strip()])
        result.append(r)
        print(i)
    c = 0                   #print(k,law)
    for i,r in enumerate(result):
        if len(r) == 3:
            c += 1
            patten = re.compile('以.*?罪', re.I)
            for j in re.finditer(patten,r[2]):
                a = j.group()
            print(c)
        #print(m)
        # if len(m[df.iloc[i,0]]) == 0 or s != s:
        #     m[df.iloc[i, 0]] = ['']
    # for item in m:
    #     print(item,m[item])

    result = pd.DataFrame(result)
    result.to_excel(outf,'Sheet1',index=False,encoding='utf-8')
    # for r in result:
    #     print(r)

def get_similar_from_key_words(inf,topN,law_inf,w2v_path,outf):
    start = time.time()
    w2v = KeyedVectors.load_word2vec_format(w2v_path)
    print('w2v load with:',datetime.timedelta(seconds=int(time.time()-start)))
    start = time.time()
    df = pd.read_excel(inf)
    laws = [line.split('    ')[-1].strip() for line in open(law_inf,encoding='utf-8').readlines()]
    result = {}
    for i in range(df.shape[0]):
        explanation = df.iloc[i,2].strip().replace('\r\n','')
        explanation = re.sub(r'\d|[a-z]|[A-Z]|第|[(（）)一二三四五六七八九十百千万亿]|条|','',explanation)
        #keywords = extract_tags(explanation,topK=topN)
        keywords = textrank(explanation, topK=topN)
        law_match_results = []
        for j,law in enumerate(laws):
            law = re.sub(r'\d|[a-z]|[A-Z]|第|[(（）)一二三四五六七八九十百千万亿]|条','',law)
            #law_keys = extract_tags(law,topK=topN)
            law_keys = textrank(law, topK=topN)
            sims = [j+1]
            for key in keywords:
                dlist = []
                for lk in law_keys:
                    if key in w2v.vocab and lk in w2v.vocab:
                        dlist.append({'word':key+'-'+lk,'cos':w2v.similarity(key,lk)})
                if dlist != []:
                    dmax = sorted(dlist,key=itemgetter('cos'))[-1]
                    if dmax['cos'] >= math.sqrt(2)/2:
                        sims.append(dmax)
            law_match_results.append(sims)
            print(i,j)
        match_law = sorted(law_match_results,key = lambda i:len(i))[len(law_match_results)-1]
        match_law.insert(1,len(match_law[min(1,len(match_law)-1):]))
        result[i] = match_law
    out = []
    print('process load with:', datetime.timedelta(seconds=int(time.time() - start)))
    for i,r in enumerate(result):
        m = [df.iloc[i,0],df.iloc[i,1].strip(),df.iloc[i,2].strip(),result[r][0],laws[result[r][0]-1],str(result[r][1])]
        words = ''
        for j in range(2,len(result[r])):
            words = words+result[r][j]['word']+'、'
        m.append(words.strip('、'))
        out.append(m)

        print('writing',i,'...')
    out = pd.DataFrame(out)
    out.to_excel(outf,index=False,encoding='utf-8')

def get_elmo_sentence(inf,law_inf):
    thu = thulac.thulac(seg_only=True)

    df = pd.read_excel(inf)
    laws = [line.split('    ')[-1].strip() for line in open(law_inf, encoding='utf-8').readlines()]
    querys = []
    for i in range(df.shape[0]):
        query = df.iloc[i,2].strip().replace('\r\n','')
        query = re.sub(r'\d|[a-z]|[A-Z]|第|[(（）)一二三四五六七八九十百千万亿、　；，“”：]|条|', '', query)
        querys.append(thu.cut(query, text=True).split(' '))
        print('cut query:',i)
    law_embeddings = []
    law_qs = []
    # for i in range(len(laws)):
    #     law_qs.append(thu.cut(laws[i],text=True).split(' '))
    # #     req = requests.post('http://172.23.7.101:9093/embedding',
    # #                         data=json.dumps({'query': law}))
    # #     law_embeddings.append(req.json()['embedding'][0])
    # #     print('law:',i)
    # for i in range((len(law_qs)//128)+1):
    #     req = requests.post('http://172.23.7.101:9093/embedding',data=json.dumps({'query':law_qs[i*128:(i+1)*128]}))
    #     law_embeddings.extend(req.json()['embedding'])
    for i in range(len(laws)):
        law_qs.append(thu.cut(laws[i],text=True).split(' '))
    #     req = requests.post('http://172.23.7.101:9093/embedding',
    #                         data=json.dumps({'query': law}))
    #     law_embeddings.append(req.json()['embedding'][0])
    #     print('law:',i)
    for i in range((len(law_qs)//128)+1):
        req = requests.post('http://172.23.7.101:9094/embedding',data=json.dumps({'query':law_qs[i*128:(i+1)*128]}))
        law_embeddings.extend(req.json()['embedding'])
        print('batch:',i)
    # query_embeddings = []
    # for i in range((len(querys)//128)+1):
    #     req = requests.post('http://172.23.7.101:9093/embedding',data=json.dumps({'query':querys[i*128:(i+1)*128]}))
    #     query_embeddings.extend(req.json()['embedding'])
    #     print('batch:',i)
    query_embeddings = []
    for i in range((len(querys)//128)+1):
        req = requests.post('http://172.23.7.101:9093/embedding',data=json.dumps({'query':querys[i*128:(i+1)*128]}))
        query_embeddings.extend(req.json()['embedding'])
        print('batch:',i)

    pickle.dump(law_embeddings,open('./datasets/law_elmo.pkl','wb'))









if __name__ == '__main__':
    # get_sencence_vector_by_w2v('./datasets/w2v_xingshi.txt','./datasets/cut_data.txt','./datasets/data_s2v.pkl')
    # get_sencence_vector_by_w2v('./datasets/w2v_xingshi.txt','./datasets/cut_xing_law.txt','./datasets/law_s2v.pkl')
    # get_similar_result('./datasets/law_s2v.pkl','./datasets/data_s2v.pkl')
    # f = load_model('../fasttext-PVDM/result/law_d2v.bin')
    # result = []
    # with open('./datasets/corpus_law.txt',encoding='utf-8') as t:
    #     for i,line in enumerate(t.readlines()):
    #         result.append(f.get_sentence_vector(line.strip()))
    #         print(i)
    #     pickle.dump(result,open('./datasets/law_sencence2vec.pkl','wb'))

    # law_result = pickle.load(open('./datasets/law_sencence2vec.pkl', 'rb'))
    # result = pickle.load(open('./datasets/sencence2vec.pkl', 'rb'))
    # law = open('./datasets/corpus_law.txt', encoding='utf-8').readlines()
    # for i, r in enumerate(result):
    #     min_dis = 100000
    #     m = {}
    #     for j, l in enumerate(law_result):
    #         if np.dot(r, l) < min_dis:
    #             min_dis = np.dot(r, l)
    #             m[i] = ' '.join(law[j].split(' ')[1:])
    #     print(m)
    #get_similar_by_re('./datasets/clean_data_exlaination.xlsx','./datasets/clean_xing_law.txt','')
    # topN = 20
    # get_similar_from_key_words('./datasets/explanation_all.xlsx',topN,'./datasets/clean_xing_law_1.txt','./datasets/w2v_xingshi.txt',
    #                            './datasets/keywords_'+str(topN)+'.xlsx')
    get_elmo_sentence('./datasets/explanation_all.xlsx','./datasets/clean_xing_law_1.txt')
