# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
import wikipedia
from langconv import *
import cPickle 
import time
import Levenshtein

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 0

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=0:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
BaseLine_Candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
if BASELINE_STEP<=1:
    found_count = 0
    all_count = 0
    for query in query_set:
        all_count = all_count+len(query[2])
        for entity in query[2]:
            name = entity[1]
            entId = entity[3]
                
            #print 'begin check',name.encode('gbk','ignore'),entId.encode('gbk','ignore')
            silname_sets = map_entity2sml_keys[name]
            found_namekeys={}
            for itm in silname_sets:
                if not found_namekeys.get(itm) is None:
                    continue
                keys = map_name2key[itm]
                found_namekeys[itm] = keys
            
            found_namekeys_wiki={}
            if not map_query2wikipedia.get(name) is None:
                wiki_sets = map_query2wikipedia[name]
                for idx in range(0,min([len(wiki_sets)])):
                    wiki_name = wiki_sets[idx]
                    if not found_namekeys_wiki.get(wiki_name) is None:
                        continue
                    if map_name2key.get(wiki_name) is None:
                        continue
                    keys = map_name2key[wiki_name]
                    found_namekeys_wiki[wiki_name] = keys
            
            BaseLine_Candidate_set.append( ( entity, query,(found_namekeys,found_namekeys_wiki) ) )
        
    ftemp = open(BASE_DATA_PATH+'BaseLine_Candidate_set_old.dup','w')
    cPickle.dump(BaseLine_Candidate_set, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'BaseLine_Candidate_set_old.dup','r')
    BaseLine_Candidate_set = cPickle.load(ftemp)
    ftemp.close()
    
'step 2. 获取baseline result并打印输出'
if BASELINE_STEP<=2:
    fd_out = codecs.open(BASE_DATA_PATH+'baseline_result.txt', 'w', encoding='utf-8')
    fd_out_idx = 0
    last_query = None
    all_count = 0
    fnd_count = 0
    for cs in BaseLine_Candidate_set:
        entity = cs[0]
        query = cs[1]
        (namekeys_base,namekeys_wiki) = cs[2]
        entity_id = entity[0]
        query_id = query[0]
        
        name = entity[1]
        entId = entity[3]
        
        all_count=all_count+1.0
        fd_out_idx= fd_out_idx+1
        
        '**********baseline searching**********'
        base_key = 'NIL'
        if not namekeys_base.get(name) is None:
            base_key = namekeys_base[name][0]
        if base_key=='NIL' and not namekeys_wiki.get(name) is None:
            base_key = namekeys_wiki[name][0]
        if not base_key=='NIL':
            fnd_count=fnd_count+1.0
        fd_string = '%d\tentRelation\t%s\t%s\t%s\n' % (fd_out_idx,query_id,entity_id,base_key)
        fd_out.write(fd_string)
        '**********baseline searching**********'
    
        if not last_query == query:
            print '\n#----------------------------------------------------'
            print query[0].encode('gbk','ignore'),query[1].encode('gbk','ignore')
            last_query = query
        print '@',entId.encode('gbk','ignore'),name.encode('gbk','ignore'),'\t,baseline=\t',base_key.encode('gbk','ignore')
        entity_namekeys_string = ''
        for itm in namekeys_base:
            base_keys = namekeys_base[itm]
            entity_namekeys_string = entity_namekeys_string+ ' | '+itm+','+' '.join(base_keys)
        print '@sbasekeys: ',entity_namekeys_string.encode('gbk','ignore')
        entity_namekeys_string = ''
        for itm in namekeys_wiki:
            base_keys = namekeys_wiki[itm]
            entity_namekeys_string = entity_namekeys_string+ ' | '+itm+','+' '.join(base_keys)
        print '@wikikeys: ',entity_namekeys_string.encode('gbk','ignore')

    
    print 'found rate=',fnd_count/all_count
    fd_out.close()
    
'step 3.'
    


        