# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
import wikipedia
from langconv import *
import Levenshtein
import cPickle
import time

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

RUN_BASELINE = False

if RUN_BASELINE:#加载sample_query_2014和PKBase_key_title
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
if False:#在map_entity2sml_keys中查找最接近的10个name
    map_entity2sml_keys = {}
    for query in query_set:
        for entity in query[2]:
            if not map_entity2sml_keys.get(entity[1]) is None:
                continue
            sml_keys = findSimilarKeys(entity[1],name_list,10)
            map_entity2sml_keys[ entity[1] ] = sml_keys
    ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','w')
    cPickle.dump(map_entity2sml_keys, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
    map_entity2sml_keys=cPickle.load(ftemp)
    ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
if False:#在wikipedia里扩展name的最接近的20个
    map_query2wikipedia = {}    
    for query in query_set:
        for entity in query[2]:
            if not map_query2wikipedia.get(entity[1]) is None:
                continue
            print 'begin query',entity[1].encode('gbk','ignore')
            
            done = False
            try_count = 0
            while (not done) and try_count<20:
                try:
                    search_set = wikipedia.search(entity[1],20,False)
                    new_search_set = []
                    for itm in search_set:
                        new_search_set.append(Converter('zh-hans').convert(itm))
                    map_query2wikipedia[ entity[1] ] = new_search_set
                    done = True
                except :
                    time.sleep(1)
                    try_count=try_count+1
                    print "warning: wikipedia.search(",entity[1],") error",try_count
                    pass
    
    ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','w')
    cPickle.dump(map_query2wikipedia, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
    map_query2wikipedia=cPickle.load(ftemp)
    ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------

        
        