# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
import wikipedia
from langconv import *
import cPickle
import time 
import Levenshtein
from google_browser import *
from people_daily_nepipeline import *
from extract_entitys_fromKB import *

def google_rel_extract(name,itm,max_loop = 2):
    print '#\tbegin searching',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
    irun_cnt = 0
    score = 0
    while True:
        try:
            (score,results) = brgoogle_relation_confidence(name,itm,u'是',10)
            break
        except :
            irun_cnt=irun_cnt+1
            score = 0
            print '#ERROR: search error,',name.encode('gbk','ignore'),itm.encode('gbk','ignore')
            if irun_cnt>max_loop:
                break
    return score 

def read_nefile(path):
    fid = codecs.open(path, 'r', encoding='gbk')
    raw = fid.read()
    fid.close()
    lines = raw.split('\n')
    
    corpus_list = []
    for ln in lines:
        ln_ne = []
        wds = ln.split('\t')
        for wd in wds:
            idx = wd.find('_')
            wd_p = [wd[0:idx],wd[idx+1:]]
            ln_ne.append(wd_p)
        corpus_list.append(ln_ne)
    return corpus_list

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 3

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=5:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
ftemp = open(BASE_DATA_PATH+'wiki_page_set_old.dup','r')
wiki_page_set_extent=cPickle.load(ftemp)
ftemp.close() 
wiki_page_map_extent = {}
for wps in wiki_page_set_extent:
    wiki_page_map_extent[wps[0]] = (wps[1],wps[2])
ftemp = open(BASE_DATA_PATH+'wiki_page_set_exact.dup','r')
wiki_page_set_exact=cPickle.load(ftemp)
ftemp.close() 
wiki_page_map_exact = {}
for wps in wiki_page_set_exact:
    wiki_page_map_exact[wps[0]] = (wps[1],wps[2])    
##---------------------------------------------------------------------
    
baseLine_candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
if BASELINE_STEP<=1:
    found_count = 0
    all_count = 0
    for iqx in range(0,len(query_set)):
        query = query_set[iqx]
        all_count = all_count+len(query[2])
        for entity in query[2]:
            name = entity[1]
            entId = entity[3]
            fkey = []
            if not map_name2key.get(name) is None:
                fkey = map_name2key[name]
                
            key_extent = {}
            for nm in name_list:
                if nm.find(name)!=-1 and nm!=name:
                    if not key_extent.get(nm) is None:
                        continue
                    key_extent[nm]=map_name2key[nm]
            
            wiki_page_exact = ()
            if not wiki_page_map_exact.get(name) is None:
                wiki_page_exact = wiki_page_map_exact[name]
            wiki_page_extent = ()
            if not wiki_page_map_extent.get(name) is None:
                wiki_page_extent = wiki_page_map_extent[name]
                    
            key_wikiextent=[]
            if not map_query2wikipedia.get(name) is None:
                wiki_sets = map_query2wikipedia[name]
                for idx in range(0,min([len(wiki_sets)])):
                    wiki_name = wiki_sets[idx]
                    keys = []
                    if not map_name2key.get(wiki_name) is None:
                        keys = map_name2key[wiki_name]
                    key_wikiextent.append((wiki_name,keys))
            
            baseLine_candidate_set.append((entity,query,(name,fkey,key_extent,key_wikiextent,wiki_page_exact,wiki_page_extent),iqx ) )
    ftemp = open(BASE_DATA_PATH+'baseLine_candidate_set.dup','w')
    cPickle.dump(baseLine_candidate_set, ftemp)
    ftemp.close()
else:
    ftemp = open(BASE_DATA_PATH+'baseLine_candidate_set.dup','r')
    baseLine_candidate_set = cPickle.load(ftemp)
    ftemp.close()

'获取句子解析结果'    
'step 3. 对句子进行解析'
if False:
    print u'对原始问句进行命名实体检测'
    raw_sens = []
    for idx in range(0,len(query_set)):
        query_idx = idx
        query = query_set[idx]
        query_id = query[0]
        sentence = query[1].encode('gbk','ignore')
        sentence=sentence.replace(' ','')
        raw_sens.append(sentence)
    fid = codecs.open(BASE_DATA_PATH+'raw_alltext.txt', 'w', encoding='gbk')
    fid.write('\n'.join(raw_sens))
    fid.close()
    ne_pipeline(BASE_DATA_PATH,'raw_alltext.txt','raw_alltext_ne.txt')
    print 'finished ne!'
        
query_ne_cpr = read_nefile(BASE_DATA_PATH+'raw_alltext_ne.txt')

'step 4. 对test中wikipage进行postag'
if False: 
    map_ics2wikipage_exact = {}
    map_ics2wikipage_extent = {}
    for ics in range(0,len(baseLine_candidate_set)):
        cs = baseLine_candidate_set[ics]
        entity = cs[0]
        query = cs[1]
        (name,fkey,key_extent,key_wikiextent,wiki_page_exact,wiki_page_extent) = cs[2]
        print '#done',float(ics)/len(baseLine_candidate_set)
        
        find_key = u''
        find_name = name
        if fkey:
            find_key = fkey[0]
        if find_key==u'':
            if len(wiki_page_exact)>0:
                print '@\texact find and tag wikipage:',wiki_page_exact[0].encode('gbk','ignore'),'-->',name.encode('gbk','ignore')
                fod1 = codecs.open(BASE_DATA_PATH+'raw_all_wiki_page_exact.txt', 'w', encoding='gbk')
                cnv_text = Converter('zh-hans').convert(wiki_page_exact[1])
                fod1.write(cnv_text.encode('gbk','ignore'))
                fod1.close()
                ne_pipeline(BASE_DATA_PATH,'raw_all_wiki_page_exact.txt','raw_all_wiki_page_exact_out.txt')
                query_ne_cpr = read_nefile(BASE_DATA_PATH+'raw_all_wiki_page_exact_out.txt')
                map_ics2wikipage_exact[ics] = query_ne_cpr
            if len(wiki_page_extent)>0:
                print '@\textent find and tag wikipage:',wiki_page_extent[0].encode('gbk','ignore'),'-->',name.encode('gbk','ignore')
                fod1 = codecs.open(BASE_DATA_PATH+'raw_all_wiki_page_extent.txt', 'w', encoding='gbk')
                cnv_text = Converter('zh-hans').convert(wiki_page_extent[1])
                fod1.write(cnv_text.encode('gbk','ignore'))
                fod1.close()
                ne_pipeline(BASE_DATA_PATH,'raw_all_wiki_page_extent.txt','raw_all_wiki_page_extent_out.txt')
                query_ne_cpr = read_nefile(BASE_DATA_PATH+'raw_all_wiki_page_extent_out.txt')
                map_ics2wikipage_extent[ics] = query_ne_cpr
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_exact.dup','w')
    cPickle.dump(map_ics2wikipage_exact, ftemp)
    ftemp.close()
    
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_extent.dup','w')
    cPickle.dump(map_ics2wikipage_extent, ftemp)
    ftemp.close()      
else:
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_exact.dup','r')
    map_ics2wikipage_exact = cPickle.load(ftemp)
    ftemp.close()
    
    ftemp = open(BASE_DATA_PATH+'map_ics2wikipage_extent.dup','r')
    map_ics2wikipage_extent = cPickle.load(ftemp)
    ftemp.close()          

'辅助函数'
def search_query_ne(qne,name):
    tags = []
    for wd in qne:
        if wd[0].strip()==name.strip() and (not wd[1] in tags):
            tags.append(wd[1])
    return tags

def search_wikipage_ne(wikipage_ne,tags):
    names = []
    if tags==[]:
        return []
    for ln in wikipage_ne:
        for wd in ln:
            if wd[1] in tags and (not wd[0] in names):
                names.append(wd[0])
        break
    return names

def pripor_check_name_pair(name1,name2):
    if checkTypeRL(nm)!=checkTypeRL(name):
        return False
    if name1==u'人' and name2.find(u'人')==-1:
        return False
    if name1[-1]==u'族' and name2.find(u'族')==-1:
        return False
    return True

'-------------------------------------------------------------------------------------------------------------'
'step 6. 获取句子解析结果'   
#InitBrowser(True)
results = []
all_count  = 0
found_count = 0
for ics in range(0,len(baseLine_candidate_set)):
    cs = baseLine_candidate_set[ics]
    entity = cs[0]
    query = cs[1]
    query_id = query[0]
    entity_id = entity[0]
    sentence = query[1].strip()
    all_count = all_count +1
    iqx = cs[3]
    query_ne = query_ne_cpr[iqx]
    '---------获取句法解析结果-------------'
    print '#begin',query_id,entity_id,iqx
    entity_nm = entity[1]
    '-------------------------------------'
    (name,fkey,key_extent,key_wikiextent,wiki_page_exact,wiki_page_extent) = cs[2]
    key_extent_vec = sorted(key_extent)
    
    search_state = 0
    find_key = u''
    find_name = name
    if fkey:
        find_key = fkey[0]
    if find_key==u'':
        if len(wiki_page_exact)>0:
            tags = search_query_ne(query_ne,entity_nm)
            #tags=['ng','nr','ns','nt','nx','nz']
            names = search_wikipage_ne(map_ics2wikipage_exact[ics],tags)
            for nm in names:
                if pripor_check_name_pair(nm,name) and not map_name2key.get(nm) is None:
                    fkey = map_name2key[nm]
                    find_key = fkey[0]
                    find_name = nm
                    search_state = 1
                    print '!!!\texact find wikipage named:',find_name.encode('gbk','ignore'),'-->',name.encode('gbk','ignore')
                    break
    if find_key==u'':  
        if len(wiki_page_extent)>0:
            if not map_name2key.get(wiki_page_extent[0]) is None:
                fkey = map_name2key[wiki_page_extent[0]]
                find_name = wiki_page_extent[0]
                find_key = fkey[0]
                search_state = 2
                print '@\textent find wikipage:',wiki_page_extent[0].encode('gbk','ignore'),'-->',name.encode('gbk','ignore')
            
    #if find_key==u'' and key_wikiextent:
    if key_wikiextent and find_key==u'':
        keypair = key_wikiextent[0]
        if keypair[1]:
            find_name = keypair[0]
            find_key  = keypair[1][0]
            search_state = 3
            print '@\texfind wikiSearch:',keypair[0].encode('gbk','ignore'),'-->',name.encode('gbk','ignore')
    if find_key != u'':
        found_count=found_count+1
    
    res = (ics,query_id,entity_id,find_key,name,find_name,search_state)
    results.append(res)
    #fd_string = u'1\tentRelation\t%s\t%s\t%s\t%s-->\t%s' % (query_id,entity_id,find_key,find_name,name)  
    #print fd_string.encode('gbk','ignore')      

print float(found_count)/all_count

ftemp = open(BASE_DATA_PATH+'results_all0.dup','w')
cPickle.dump(results, ftemp)
ftemp.close()
        