# -*- coding: utf-8 -*-
import codecs
import sys,os
from st_common import *
from google_browser import *
import wikipedia
from langconv import *
import cPickle
import time 
import Levenshtein
import random

'''
<weibo id = "wanyuangongjijin7">
    <content>【兰州石化被曝最高每月为员工缴万元公积金】近日，一份名为《兰州石化党办2011年度职工住房公积金明细账》的文件显示，名单上52人中，兰州石化为其缴存公积金超过9万元的有10人，缴存额最高达12万元，平均每月一万。网友纷纷吐槽人家公积金比他一个月收入都要多。http://t.cn/zTwmi9i 有钱人……</content>
    <name id = "1">兰州石化</name>
    <startoffset id = "1">66</startoffset>
    <endoffset id = "1">70</endoffset>
    <kb id = "1">NIL</kb>
</weibo>
'''
###################################################################################################
wikipedia.set_lang("zh")
BASE_DATA_PATH  = 'D:\\data\\Shared_Task_NLPCC14\\'
BASE_KB_PATH = 'E:\\desktop\\wu-request\\NLPCC 2014 Shared Tasks Guidelines\\Chinese Entity Linking  SAMPLE DATA NLPCC2014_EL_sample\\'

BASELINE_STEP = 0

'step0. 加载sample_query_2014和PKBase_key_title'
if BASELINE_STEP<=0:
    kb_path = BASE_KB_PATH
    sp_file = kb_path+'weiboAutoTag_6.txt'
    query_set = readSample(sp_file)
    kb_file = kb_path + 'PKBase_key_title.txt'
    (map_key2name,map_name2key,name_list) = load_KBaseKey(kb_file)
    print 'Load sample and key finished!'
##---------------------------------------------------------------------
#在map_entity2sml_keys中查找最接近的10个name
ftemp = open(BASE_DATA_PATH+'map_entity2sml_keys.dup','r')
map_entity2sml_keys=cPickle.load(ftemp)
ftemp.close()
print 'Load map_entity2sml_keys finished!'
##---------------------------------------------------------------------
#在wikipedia里扩展name的最接近的20个
ftemp = open(BASE_DATA_PATH+'map_query2wikipedia.dup','r')
map_query2wikipedia=cPickle.load(ftemp)
ftemp.close()
print 'Load map_query2wikipedia finished!'
##---------------------------------------------------------------------
BaseLine_Candidate_set = []
'step 1.获取candidate，通过wikipedia扩展'
ftemp = open(BASE_DATA_PATH+'BaseLine_Candidate_set_old.dup','r')
BaseLine_Candidate_set = cPickle.load(ftemp)
ftemp.close()
    
'step 3. 对句子进行依存句法解析'
if True:
    raw_sens = []
    for idx in range(0,len(query_set)):
        query_idx = idx
        query = query_set[idx]
        query_id = query[0]
        sentence = query[1].strip()
        sentence=sentence.replace(u' ',u'，')
        raw_sens.append(sentence)
    fid = codecs.open(BASE_DATA_PATH+'ParsingAndTag\\raw_alltext.txt', 'w', encoding='utf-8')
    fid.write('\n'.join(raw_sens))
    fid.close()
    
if False:
    raw_sens = []
    raw_sens_info = []
    raw_title = []
    raw_title_info = []
    
    query_idx = 0
    for idx in range(0,len(query_set)):
        query_idx = idx
        query = query_set[idx]
        query_id = query[0]
        sentence = query[1].strip()
        '**************************************'
        title = ''
        if sentence[0] == '#':
            idx = sentence.find('#',1)
            if idx !=-1:
                title =  sentence[1:idx]
                sentence = sentence[idx+1:]
        elif sentence[0]==u'【':
            idx = sentence.find(u'】',1)
            if idx !=-1:
                title =  sentence[1:idx]
                sentence = sentence[idx+1:]
                
        sentence=sentence.replace(u'！', u'。')
        sentence=sentence.replace(u'？', u'。')
        sens = sentence.split(u'。')
        sens = [itm.strip() for itm in sens if len(itm.strip())>0]
        
        sens_info = (query_idx, query_id, len(raw_sens),len(raw_sens)+len(sens) )
        raw_sens_info.append(sens_info)
        for ss in sens:
            raw_sens.append(ss)
        
        title_info = (query_idx, query_id, len(raw_title), title)
        raw_title_info.append(title_info)
        if title:
            raw_title.append(title)
    
    ftemp = open(BASE_DATA_PATH+'ParsingAndTag\\raw_sens_info.dup','w')
    cPickle.dump(raw_sens_info, ftemp)
    ftemp.close()
    
    ftemp = open(BASE_DATA_PATH+'ParsingAndTag\\raw_title_info.dup','w')
    cPickle.dump(raw_title_info, ftemp)
    ftemp.close()
    
    fid = codecs.open(BASE_DATA_PATH+'ParsingAndTag\\raw_title.txt', 'w', encoding='utf-8')
    fid.write('\n'.join(raw_title))
    fid.close()
    
    fid = codecs.open(BASE_DATA_PATH+'ParsingAndTag\\raw_sens.txt', 'w', encoding='utf-8')
    fid.write('\n'.join(raw_sens))
    fid.close()
    
    work_path = BASE_DATA_PATH+'ParsingAndTag\\'
    
    if True:
        print 'begin test pos_tag'
        print '---------------------------------------'
        os.chdir(work_path+'zpar_poseg_cn\\')
        os.system('copy %s %s' % (work_path+'raw_title.txt','raw_title.txt' ) )
        os.system('copy %s %s' % (work_path+'raw_sens.txt','raw_sens.txt' ) )
        
        os.system('test model raw_title.txt raw_title[tag].txt' )
        os.system('test model raw_sens.txt raw_sens[tag].txt' )
        
        os.system('copy %s %s' % ('raw_title[tag].txt',work_path+'zpar_depparser_cn\\raw_title[tag].txt' ) )
        os.system('copy %s %s' % ('raw_sens[tag].txt',work_path+'zpar_depparser_cn\\raw_sens[tag].txt' ) )
        os.system('copy %s %s' % ('raw_title[tag].txt',work_path+'raw_title[tag].txt' ) )
        os.system('copy %s %s' % ('raw_sens[tag].txt',work_path+'raw_sens[tag].txt' ) )
        
        os.chdir(work_path+'zpar_depparser_cn\\')
        os.system('test raw_title[tag].txt raw_title[dep].txt model[9]' )
        os.system('test raw_sens[tag].txt raw_sens[dep].txt model[9]' )
        
        os.system('copy %s %s' % ('raw_title[dep].txt',work_path+'raw_title[dep].txt' ) )
        os.system('copy %s %s' % ('raw_sens[dep].txt',work_path+'raw_sens[dep].txt' ) )
        
        print '---------------------------------------'
    
    raw_sens_corpora = LoadCDB(work_path+'raw_sens[dep].txt')
    raw_title_corpora = LoadCDB(work_path+'raw_title[dep].txt') 
    
    query_parsing_set = []
    for idx in range(0,len(raw_sens_info)):
        sens_info = raw_sens_info[idx]
        title_info= raw_title_info[idx]
        assert(sens_info[0]==title_info[0] and sens_info[1]==title_info[1] and sens_info[0]==idx)
        sen_b = sens_info[2]
        sen_e = sens_info[3]
        title_i = title_info[2]
        
        if title_info[3]!='':
            query_parsing_itm = (idx, sens_info[1],raw_sens_corpora[sen_b:sen_e],raw_title_corpora[title_i] )
        else:
            query_parsing_itm = (idx, sens_info[1],raw_sens_corpora[sen_b:sen_e],[] )
            
        query_parsing_set.append(query_parsing_itm)
    
    ftemp = open(BASE_DATA_PATH+'ParsingAndTag\\query_parsing_set.dup','w')
    cPickle.dump(query_parsing_set, ftemp)
    ftemp.close()
else:
    work_path = BASE_DATA_PATH+'ParsingAndTag\\'
    ftemp = open(work_path+'query_parsing_set.dup','r')
    query_parsing_set=cPickle.load(ftemp)
    ftemp.close()


        
      
            
        
        
        

    
  
  



        