# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
from google_browser import *
import wikipedia
from langconv import *
import cPickle 
import time
import Levenshtein

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 3

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=0:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
BaseLine_Candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
ftemp = open(BASE_DATA_PATH+'BaseLine_Candidate_set_old.dup','r')
BaseLine_Candidate_set = cPickle.load(ftemp)
ftemp.close()
    
'step 3. 获取google扩展'
'''
(name,wiki_srh_cfd,base_srh_cfd) 
wiki_srh_cfd = [(score,results,itm)]
'''
google_search_set = []
try:
    ftemp = open(BASE_DATA_PATH+'google_search_set.dup','r')
    google_search_set=cPickle.load(ftemp)
    ftemp.close()
except :
    pass
if True:
    InitBrowser(True)
    for cs in BaseLine_Candidate_set:
        entity = cs[0]
        query = cs[1]
        (namekeys_base,namekeys_wiki) = cs[2]
        entity_id = entity[0]
        query_id = query[0]
            
        name = entity[1]
        entId = entity[3]
        '**************************************'
        name_existed = False
        for gss in google_search_set:
            if gss[0]==name:
                name_existed = True
        if name_existed:
            print '#',name.encode('gbk','ignore'),'is existed!'
            continue
        '**********baseline searching**********'
        base_key = 'NIL'
        results=[]
        base_name = name
        wiki_srh_cfd = []
        if base_key=='NIL' and namekeys_wiki:
            print '#searching',name.encode('gbk','ignore'),'from namekeys_wiki'
            for itm in namekeys_wiki:
                print '#\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                try:
                    (score,results) = brgoogle_relation_confidence(name,itm,u'是',10)
                except :
                    score = 0
                    print '#ERROR: search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                wiki_srh_cfd.append((score,results,itm))
                print '###\tsearched:',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),'\tSCORE=\t',score
            wiki_srh_cfd = sorted(wiki_srh_cfd,reverse=True)
        
        base_srh_cfd = []
        if base_key=='NIL':
            print '#searching',name.encode('gbk','ignore'),'from namekeys_base'
            for itm in namekeys_base:
                print '#\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                try:
                    (score,results) = brgoogle_relation_confidence(name,itm,u'是',10)
                except :
                    score = 0
                    print '#ERROR: search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                base_srh_cfd.append((score,results,itm))
                print '###\tsearched:',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),'\tSCORE=\t',score
            base_srh_cfd = sorted(base_srh_cfd,reverse=True)
        google_search_set.append( (name,wiki_srh_cfd,base_srh_cfd) )
        
    ftemp = open(BASE_DATA_PATH+'google_search_set.dup','w')
    cPickle.dump(google_search_set, ftemp)
    ftemp.close()
        
    

  


        