# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
from google_browser import *
import wikipedia
from langconv import *
import cPickle
import time 
import Levenshtein
import random

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 0

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=0:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
BaseLine_Candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
ftemp = open(BASE_DATA_PATH+'BaseLine_Candidate_set_old.dup','r')
BaseLine_Candidate_set = cPickle.load(ftemp)
ftemp.close()
    
'step 3. '
wiki_candidate_set=[]
try:
    ftemp = open(BASE_DATA_PATH+'wiki_candidate_set.dup','r')
    wiki_candidate_set=cPickle.load(ftemp)
    ftemp.close()        
except :
    pass

if True:
    fd_out = codecs.open(BASE_DATA_PATH+'baseline_result1.txt', 'w', encoding='utf-8')
    fd_out_idx = 0 
    fnd_count = 0 
    all_count =0 
    for cs in BaseLine_Candidate_set:
        entity = cs[0]
        query = cs[1]
        (namekeys_base,namekeys_wiki) = cs[2]
        entity_id = entity[0]
        query_id = query[0]
            
        name = entity[1]
        entId = entity[3]
        
        all_count=all_count+1.0
        fd_out_idx= fd_out_idx+1
        '**************************************'
        found_name = False
        for wcs in wiki_candidate_set:
            if wcs[2]==name:
                found_name = True
        if found_name:
            print '@founded:', name
            continue
        '**********baseline searching**********'
        base_key = 'NIL'
        base_name = name
        if not namekeys_base.get(name) is None:
            base_key = namekeys_base[name][0]
        
        wiki_srh_cfd = []
        if namekeys_wiki:
            print '@searching',name,'from namekeys_wiki'
            for itm in namekeys_wiki:
                print '@\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                run_count = 0
                while True:
                    try:
                        time.sleep(random.randint(0,100)/500.0)
                        page = wikipedia.page(itm)
                        if page.content.find(name)!=-1:
                            wiki_srh_cfd.append( (itm, page.title, page.content) )
                        break
                    except :
                        print '@ERROR:\t search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),'rerun',run_count
                        run_count=run_count+1
                        if run_count>10:
                            break
                print '@\tsearched',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),len(wiki_srh_cfd)
                
        base_srh_cfd = []
        if namekeys_base:
            print '@searching',name.encode('gbk','ignore'),'from namekeys_base'
            for itm in namekeys_base:
                print '@\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
                run_count = 0
                while True:
                    try:
                        time.sleep(random.randint(0,100)/100.0)
                        page = wikipedia.page(itm)
                        if page.content.find(name)!=-1:
                            base_srh_cfd.append( (itm, page.title, page.content) )
                        break
                    except :
                        print '@ERROR:\t search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),'rerun',run_count
                        run_count=run_count+1
                        if run_count>10:
                            break
                print '@\tsearched',name.encode('gbk','ignore'),itm.encode('gbk','ignore'),len(base_srh_cfd)        
        
        if base_key=='NIL' and wiki_srh_cfd:
            base_name = wiki_srh_cfd[0][0]
            base_key = map_name2key[base_name][0]
            
        if not base_key=='NIL':
            fnd_count=fnd_count+1.0
            
        fd_string = '%d\tentRelation\t%s\t%s\t%s' % (fd_out_idx,query_id,entity_id,base_key)
        fd_string = fd_string+'\t:\t'+name +',\t'+base_name+'\n'
        fd_out.write(fd_string)
        print '@\t',fd_string.encode('gbk','ignore')
        
        candidate = (query_id,entity_id,name,entity,query,wiki_srh_cfd,base_srh_cfd)
        wiki_candidate_set.append(candidate)
        
        if not (len(wiki_candidate_set) % 10):
            ftemp = open(BASE_DATA_PATH+'wiki_candidate_set[%d].dup' % len(wiki_candidate_set),'w')
            cPickle.dump(wiki_candidate_set, ftemp)
            ftemp.close()
        '**********baseline searching**********'
    print 'found rate=',fnd_count/all_count
    fd_out.close()
    
    ftemp = open(BASE_DATA_PATH+'wiki_candidate_set.dup','w')
    cPickle.dump(wiki_candidate_set, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'wiki_candidate_set.dup','r')
    wiki_candidate_set=cPickle.load(ftemp)
    ftemp.close()        
  
  



        