# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
import wikipedia
from langconv import *
import cPickle
import time 
import Levenshtein
from google_browser import *
from people_daily_nepipeline import *
from extract_entitys_fromKB import *

def google_rel_extract(name,itm,max_loop = 2):
    print '#\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
    irun_cnt = 0
    score = 0
    while True:
        try:
            (score,results) = brgoogle_relation_confidence(name,itm,u'是',10)
            break
        except :
            irun_cnt=irun_cnt+1
            score = 0
            print '#ERROR: search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
            if irun_cnt>max_loop:
                break
    return score 
'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 3
InitBrowser(True)

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=5:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
ftemp = open(BASE_DATA_PATH+'wiki_page_set_old.dup','r')
wiki_page_set_extent=cPickle.load(ftemp)
ftemp.close() 
wiki_page_map_extent = {}
for wps in wiki_page_set_extent:
    wiki_page_map_extent[wps[0]] = (wps[1],wps[2])
ftemp = open(BASE_DATA_PATH+'wiki_page_set_exact.dup','r')
wiki_page_set_exact=cPickle.load(ftemp)
ftemp.close() 
wiki_page_map_exact = {}
for wps in wiki_page_set_exact:
    wiki_page_map_exact[wps[0]] = (wps[1],wps[2])    
##---------------------------------------------------------------------
    
baseLine_candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
if True:
    ftemp = open(BASE_DATA_PATH+'baseLine_candidate_set.dup','r')
    baseLine_candidate_set = cPickle.load(ftemp)
    ftemp.close()

'step 4. 对test中wikipage进行postag'
if True: 
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_exact.dup','r')
    map_ics2wikipage_exact = cPickle.load(ftemp)
    ftemp.close()
    
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_extent.dup','r')
    map_ics2wikipage_extent = cPickle.load(ftemp)
    ftemp.close()          
'-------------------------------------------------------------------------------------------------------------'
'step 6. 获取句子解析结果'
if False:   
    ftemp = open(BASE_DATA_PATH+'results_all0.dup','r')
    results = cPickle.load(ftemp)
    ftemp.close()
    
    keys = []
    for rs in results:
        (ics,query_id,entity_id,find_key,name,find_name,search_state) = rs
        keys.append(find_key)
    
    entitys_kb = []
    extract_entitys(keys,entitys_kb)
    
    ftemp = open(BASE_DATA_PATH+'entitys_kb[7].dup','w')
    cPickle.dump(entitys_kb, ftemp)
    ftemp.close() 
else:
    ftemp = open(BASE_DATA_PATH+'results_all0.dup','r')
    results = cPickle.load(ftemp)
    ftemp.close()
    
    ftemp = open(BASE_DATA_PATH+'entitys_kb[7].dup','r')
    entitys_kb = cPickle.load(ftemp)
    ftemp.close()

entitys_kb_map = {}
for ett in entitys_kb:
    key = ett[0][0].replace('"','').strip()
    entitys_kb_map[key] = ett
entitys_kb=[]
'-------------------------------------------------------------------------------------------------------------'
fwd = codecs.open(BASE_DATA_PATH+'Result[EntAnsys][Chinese Entity Linking].txt', 'w', encoding='utf-8')
fwd.write('id\tsystem-id\tdoc-id\tname-id\tKB-id\n')
for irs in range(0,len(results)):
    (ics,query_id,entity_id,find_key,name,find_name,search_state) = results[irs]
    cs = baseLine_candidate_set[ics]
    entity = cs[0]
    query = cs[1]
    assert(query_id == query[0])
    assert(entity_id== entity[0])
    assert(cs[2][0])
    
    sentence = query[1].strip()
    iqx = cs[3]
    entity_nm = entity[1]
    (name,fkey,key_extent,key_wikiextent,wiki_page_exact,wiki_page_extent) = cs[2]
    '-------------------------------------u'
    ext_name = []
    for nm in name_list:
        fd_idx = nm.find(name)
        if fd_idx!=-1 and nm!=name:
            if nm.find('_(')!=-1 and  nm[fd_idx+len(name)]==u'_' and nm[fd_idx+len(name)+1]==u'(':
                ext_name.append(nm)
    if name!=find_name and ext_name:
        if len(ext_name)==1:
            newkey = map_name2key[ext_name[0]][0]
            newnm = ext_name[0]
            print '$\tfind_ext:',newnm.encode('gbk','ignore'),'-->',name.encode('gbk','ignore'),'| <----not',find_name.encode('gbk','ignore')
            find_name = name
            find_key = newkey
    '---------获取句法解析结果-------------'
    print '#begin',query_id.encode('gbk','ignore'),entity_id,iqx
    
    entitys_kb_itm = ()
    if not entitys_kb_map.get(find_key) is None:
        entitys_kb_itm = entitys_kb_map[find_key]
    
    
    if entitys_kb_itm and name!=find_name:
        entitys_kb_atts= entitys_kb_itm[1]
        bcheck = False
        for att in entitys_kb_atts:
            if sentence.find(att[1])!=-1:
                print '!\tcheck',find_name.encode('gbk','ignore'),'--->',name.encode('gbk','ignore'),iqx,search_state
                bcheck = True
        if not bcheck:
            print '*\tuncheck',find_name.encode('gbk','ignore'),'--->',name.encode('gbk','ignore'),iqx,search_state
    if find_key==u'':
        find_key = u'NIL'
    fd_string = u'%d\tEntAnsys\t%s\t%s\t%s\n' % (irs,query_id,entity_id,find_key)
    fwd.write(fd_string)
fwd.close()
        
    
    
    
        