# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
import wikipedia
from langconv import *
import Levenshtein
import cPickle
import time 

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''

def findSimilarKeys(key,key_lst,nbest):
    res_lst = []
    for itm in key_lst:
        sv = Levenshtein.ratio( itm, key )
        res_lst.append((sv,itm))
        if len(res_lst)>nbest+50:
            res_lst = sorted(res_lst,reverse=True)[0:nbest]
    
    res_lst = sorted(res_lst,reverse=True)[0:nbest]
    
    ret_lst= []
    for itm in res_lst:
        ret_lst.append(itm[1])
    return ret_lst

###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

RUN_BASELINE = True

if RUN_BASELINE:#加载sample_query_2014和PKBase_key_title
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
if False:#在map_entity2sml_keys中查找最接近的10个name
    map_entity2sml_keys = {}
    for query in query_set:
        for entity in query[2]:
            if not map_entity2sml_keys.get(entity[1]) is None:
                continue
            sml_keys = findSimilarKeys(entity[1],name_list,10)
            map_entity2sml_keys[ entity[1] ] = sml_keys
    ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','w')
    cPickle.dump(map_entity2sml_keys, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
    map_entity2sml_keys=cPickle.load(ftemp)
    ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
if False:#在wikipedia里扩展name的最接近的20个
    map_query2wikipedia = {}    
    for query in query_set:
        for entity in query[2]:
            if not map_query2wikipedia.get(entity[1]) is None:
                continue
            print 'begin query',entity[1].encode('gbk','ignore')
            
            done = False
            try_count = 0
            while (not done) and try_count<20:
                try:
                    search_set = wikipedia.search(entity[1],20,False)
                    new_search_set = []
                    for itm in search_set:
                        new_search_set.append(Converter('zh-hans').convert(itm))
                    map_query2wikipedia[ entity[1] ] = new_search_set
                    done = True
                except :
                    time.sleep(1)
                    try_count=try_count+1
                    print "warning: wikipedia.search(",entity[1],") error",try_count
                    pass
    
    ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','w')
    cPickle.dump(map_query2wikipedia, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
    map_query2wikipedia=cPickle.load(ftemp)
    ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
#Levenshtein.ratio(u'建行',u'中国建设银行')
BaseLine_Error_Set = []
if RUN_BASELINE: #BASE_LINE测试
    found_count = 0
    all_count = 0
    for query in query_set:
        all_count = all_count+len(query[2])
        for entity in query[2]:
            name = entity[1]
            entId = entity[3]
            
            print 'begin check',name.encode('gbk','ignore'),entId.encode('gbk','ignore')
            found_keys = None
            silname_sets = map_entity2sml_keys[name]
            if name in  silname_sets:
                keys = map_name2key[name]
                if entId in keys:
                    found_count=found_count+1.0
                    found_keys = keys
                    print "found",name.encode('gbk','ignore'),entId.encode('gbk','ignore'),len(keys)
            else:
                if not map_query2wikipedia.get(name) is None:
                    wiki_sets = map_query2wikipedia[name]
                    for idx in range(0,min([5,len(wiki_sets)])):
                        wiki_name = wiki_sets[idx]
                        if map_name2key.get(wiki_name) is None:
                            continue
                        keys = map_name2key[wiki_name]
                        if entId in keys:
                            found_count=found_count+1.0
                            found_keys = keys
                            print "exfound",name.encode('gbk','ignore'),wiki_name.encode('gbk','ignore'),entId.encode('gbk','ignore'),len(keys)
                            break
                        
            if found_keys==None and entId=='NIL':
                print "found",name.encode('gbk','ignore'),'NIL'
                found_count=found_count+1.0
            elif found_keys==None:
                silnamekey_sets = []
                for nm in silname_sets:
                    silnamekey_sets.append([nm,map_name2key[nm]])
                BaseLine_Error_Set.append((entity,silnamekey_sets,query))
    print 'reg=', found_count/all_count
    
    ftemp = open(BASE_DATA_PATH+'BaseLine_Error_Set.dup','w')
    cPickle.dump(BaseLine_Error_Set, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'BaseLine_Error_Set.dup','r')
    BaseLine_Error_Set=cPickle.load(ftemp)
    ftemp.close()
    print 'Load BaseLine_Error_Set finished!'
##---------------------------------------------------------------------
if False:  #通过基线测试后的BaseLine_Error_Set错误集中的name获取KB中的全信息
    collect_keys = []
    for error_item in BaseLine_Error_Set:
        collect_keys.append(error_item[0][3])
        
    kbbasefd = codecs.open(BASE_KB_PATH+'PKBase_context.txt', 'r', encoding='utf-8')
    collect_kbitems = []
    kbitem=None#('key','name',[])
    line=kbbasefd.readline(1000)
    while line:
        line.replace('\r\n',' ')
        line.replace('\n',' ')
        line=line.strip()
        if not line:
            if not kbitem is None:
                collect_kbitems.append(kbitem)
                kbitem = None
            line=kbbasefd.readline(1000)
            continue
        
        if line[0]=='#':
            idot1 = line.find(',')
            key = line[1:idot1].replace('"','').strip()
            name = line[idot1+1:].replace('"','').strip()
            if key in collect_keys:
                kbitem = (key,name,[])
            else:
                kbitem = None
            
        if line[0]=='@' and (not kbitem is None):
            idot1 = line.find(',')
            attr_itm = (line[1:idot1].strip(),line[idot1+1:].strip())
            kbitem[2].append(attr_itm)
            
        line=kbbasefd.readline(1000)
    
    ftemp = open(BASE_DATA_PATH+'collect_kbitems.dup','w')
    cPickle.dump(collect_kbitems, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'collect_kbitems.dup','r')
    collect_kbitems=cPickle.load(ftemp)
    ftemp.close()
    collect_keys = []
    for error_item in BaseLine_Error_Set:
        collect_keys.append(error_item[0][3])
print 'Load collect_kbitems finished!'
##---------------------------------------------------------------------
if not RUN_BASELINE:
    map_key2kbitems={}
    for kbitm in collect_kbitems:
        map_key2kbitems[kbitm[0]] = kbitm
    
    
    last_query = None
    for error_item in BaseLine_Error_Set:
        entity = error_item[0]
        silnamekey_sets = error_item[1]
        query = error_item[2]
        
        name = entity[1]
        entId = entity[3] 
        
        if not last_query == query:
            print '\n#----------------------------------------------------'
            print query[0].encode('gbk','ignore'),query[1].encode('gbk','ignore')
            last_query = query
        silnamekey_sets_string = ''
        for itm in silnamekey_sets:
            silnamekey_sets_string = silnamekey_sets_string+ ' | '+ itm[0]+','+' '.join(itm[1])
        print '\t',entId.encode('gbk','ignore'),name.encode('gbk','ignore')
        print '\t silnamekey: ',silnamekey_sets_string.encode('gbk','ignore')
        if not map_query2wikipedia.get(name) is None:
            wiki_sets = map_query2wikipedia[name]
            print '\t','wikipedia: ',(' | '.join(wiki_sets)).encode('gbk','ignore')
        

        
        