# coding:utf-8

import pandas as pd
import pickle
import operator
from gensim.models import KeyedVectors
from hashes.simhash import simhash
from collections import defaultdict
from simhash_demo import SimHash


def match_by_simhash(match_file,matched_file,threshold,outf,seg_sign):
    simhash = SimHash()
    result = defaultdict(list)
    match_df = pd.read_excel(match_file)
    match_txts = list(match_df['fulltext'])
    with open(matched_file,encoding='utf-8') as f:
        # for line in f.readlines():
        #     a = line.split(seg_sign)
        #     if len(a)<2:
        #         print(a)
        #lines = [line.split('    ')[1] for line in f.readlines() if len(line.split('    ')[1])>1]
        lines = [[line.split(seg_sign)[0],line.split(seg_sign)[1]] for line in f.readlines() if len(line.split(seg_sign)[1]) > 1]
    #out = open(outf,'w',encoding='utf-8')
    for i,match_txt in enumerate(match_txts):
        distince_list = defaultdict(list)
        hash1 = simhash.simHash(match_txt.strip())
        #hash = simhash(match_txt.strip())
        for j,line in enumerate(lines):
            hash2 = simhash.simHash(line[1].strip())
            distince = simhash.getDistince(hash1, hash2)
            #hash_ed = simhash(line.strip())
            #if hash.similarity(hash_ed) >= threshold:
            if distince <= threshold:
                #result[match_txt.strip()] = [i]
                distince_list[distince].append(line)
            print(i, j)
        for k in sorted(distince_list.keys()):
            for d in range(len(distince_list[k])):
                result[match_txt.strip()].append([i,distince_list[k][d][0].strip(),distince_list[k][d][1].strip()])
    result_df = []
    for k in result.keys():
        m = [k]
        for r in result[k]:
            m.extend(r)
        result_df.append(m)
    result_df = pd.DataFrame(result_df)
    result_df.to_csv(outf,index=False,encoding='utf-8')


def match_by_strictly(match_file,matched_file,outf):
    result = defaultdict(list)
    match_df = pd.read_excel(match_file)
    match_txts = list(match_df['fulltext'])
    with open(matched_file,encoding='utf-8') as f:
        lines = [line.split('    ')[1]  for line in f.readlines() if len(line.split('    ')[1])>1]
    for i,match_txt in enumerate(match_txts):
        for j,line in enumerate(lines):
            if (line.strip().find(match_txt.strip())!=-1) or (match_txt.strip == line.strip()):
                result[match_txt.strip()].append(line.strip())
            print(i,j)
    result_df = []
    for k in result.keys():
        m = [k]
        for r in result[k]:
            m.append(r)
        result_df.append(m)
    result_df = pd.DataFrame(result_df)
    result_df.to_csv(outf, index=False, encoding='utf-8')

def match_result(src_inf,inf,outf):
    src_df = pd.read_excel(src_inf)
    df = pd.read_csv(inf,encoding='utf-8')
    indexs = list(df.iloc[:,1])
    result = []
    for i in range(src_df.shape[0]):
        m = []
        m.extend([src_df.iloc[i,0],src_df.iloc[i,9],src_df.iloc[i,12]])
        if i in indexs:
            m.extend([df.iloc[indexs.index(i),2],df.iloc[indexs.index(i),3]])
        result.append(m)
        print(i)
    result_df = pd.DataFrame(result)
    result_df.to_excel(outf,index=False,encoding='utf-8')

def get_keywords_by_tfidf(inf,top,outf):
    tfidf = pickle.load(open(inf,'rb'))
    out = open(outf,'wb')
    key_words = []
    for i,t in enumerate(tfidf):
        k = []
        for j in range(len(t)-1,max(len(t)-top-1,-1),-1):
            k.append(t[j][0])
        key_words.append(k)
        print(i)
    pickle.dump(key_words,out)

def get_similar_keywords(inf,w2v_path,outf):
    keywords = pickle.load(open(inf,'rb'))

    w2v = KeyedVectors.load_word2vec_format(w2v_path,binary=False,encoding='utf-8')
    result = []
    for i in range(len(keywords)):
        similar_words = []
        for key in keywords[i]:

            if key in w2v.vocab.keys():
                # similar_words = w2v.most_similar(key)
                similar_words.extend([s[0] for s in w2v.most_similar(key)])
        result.append(similar_words)
        print(i)
    pickle.dump(result,open(outf,'wb'))

def match_by_keywords(data_inf,inf,outf,rate):
    result = []
    keywords = pickle.load(open(inf,'rb'))
    matched_keywords = pickle.load(open(data_inf,'rb'))
    for i,line in enumerate(matched_keywords):
        m = defaultdict(list)
        word_nums = len(line)
        line = set(line)
        for j,key in enumerate(keywords):
            key = set(key)
            if len(key & line)>int(word_nums*rate):
                m[i].append(j)
                print(i,j)
        result.append(m)






if __name__ == '__main__':
    #match_by_simhash('./datasets/clean_data.xlsx','./datasets/clean_xing_law.txt',15,'./datasets/result.csv')
    #match_by_simhash('./datasets/clean_data.xlsx', './datasets/clean_xing_law.txt', 32, './datasets/result_32.csv','    ')
    #match_by_strictly('./datasets/clean_data.xlsx','./datasets/clean_xing_law.txt','./datasets/result_strictly.csv')
    #match_result('./datasets/clean_data.xlsx','./datasets/result.csv','./datasets/match_result_20.xlsx')
    #get_keywords_by_tfidf('./datasets/data_tfidf.pkl',top=15,outf='./datasets/keywords_15.pkl')
    #get_keywords_by_tfidf('./datasets/xing_law_tfidf.pkl', top=15, outf='./datasets/xing_keywords_15.pkl')
    #get_similar_keywords('./datasets/xing_keywords_15.pkl','./datasets/w2v_xingshi.txt','./datasets/xing_similar_words_15.pkl')
    match_by_keywords('./datasets/keywords_15.pkl','./datasets/xing_similar_words_15.pkl','',0.5)
    #print(1)


