#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update:

'''docstring
'''

__revision__ = '0.1'
__author__ = 'lxd'

import re
from lxml import etree
import nltk
import tools
import cPickle as pickle
import os
import logging

from log import getLogger
log = getLogger('wiki')

DATE_PATTERN = r'''(?x)    # set flag to allow verbose regexps
    \d+\s+BC # 14 BC
    | \d+\s+(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d+ # 4 April 1842
    | (January|February|March|April|May|June|July|August|September|October|November|December)\s+\d+,\s+\d+ # December 7, 1987
'''
#lxml bug fixes
def get_xpath_texts(nodes):
    """replace xpath text()
    """
    r = []
    for node in nodes:
        t = node.xpath('text()')
        if t:
            s = unicode(t[0])
            s = re.sub(r'\(.*\)', '', s)#去掉()里的内容
            s = s.strip()
            if s:
                r.append(s)
    return r

def get_xpath_text(nodes):
    """返回第一个text
    """
    r = get_xpath_texts(nodes)
    if r:
        return r[0]

class Wiki(object):
    def _get_lines(self):
        """解析html得到每段组成的数组
        """
        tree = etree.HTML(self.html)
        ptree_list = tree.xpath('//div[@id="bodyContent"]/p')
        p_list = [nltk.clean_html(etree.tostring(p)) for p in ptree_list]
        return p_list

    def _parse_infobox(self):
        def update_text(d, node, xpath, property):
            """去空格直接返回text
            """
            value = get_xpath_text(node.xpath(xpath))
            if value:
                d.update({property:value})
            return d

        def update_texts(d, node, xpath, property):
            """去空格直接返回list, 自动加上内部的p，a, 可能要放到get_xpath_texts里
            """
            values = get_xpath_texts(node.xpath(xpath))
            values.extend(get_xpath_texts(node.xpath('%s/p' %xpath)))
            values.extend(get_xpath_texts(node.xpath('%s/a' %xpath)))
            if values:
                d.update({property:values})
            return d

        def parse_texts(d, node, xpath, property):
            print nltk.clean_html(etree.tostring(node))

        def update_list(d, node, xpath, property):
            """list类型的字符串按','分割, 去空格，小写
            """
            value = get_xpath_text(node.xpath(xpath))
            if value:
                values = [v.lower().strip() for v in value.split(',')]
                if not values[0] or values[0] == '(':
                    #a中的也要解析
                    values.extend(get_xpath_texts(node.xpath('%s/a' %xpath)))
                    #去掉空项和符号项
                    values = [v for v in values if (v and v not in ('(', ')'))]
                d.update({property:values})
            return d

        def update_list_first(d, node, xpath, property):
            """list类型的字符串按','分割, 去空格，返回第一个
            """
            value = get_xpath_text(node.xpath(xpath))
            if value:
                values = [v.strip() for v in value.split(',')]
                d.update({property:values[0]})
            return d

        def update_combo_text(d, node, xpath, property):
            """如果该property存在则将value加入list
            """
            value = get_xpath_text(node.xpath(xpath))
            if value:
                if d.has_key(property):
                    d[property].append(value)
                else:
                    d.update({property:[value]})
            return d

        tree = etree.HTML(self.html)
        d = {}
        for tr_tree in tree.xpath('//table[contains(@class,"infobox")]/tr'):
            label = get_xpath_text(tr_tree.xpath('th'))
            if label:
                if label == 'Born':
                    d = update_text(d, tr_tree, 'td', 'birthdate')
                    d = update_list_first(d, tr_tree, 'td/a', 'birthplace')
                if label == 'Died':
                    d = update_text(d, tr_tree, 'td', 'deathdate')
                    d = update_list_first(d, tr_tree, 'td/a', 'deathplace')
                if label.startswith('Occupation'):
                    #小写
                    d = update_list(d, tr_tree, 'td', 'occupations')
                if label == 'Nationality':
                    d = update_text(d, tr_tree, 'td', 'nationality')
                if label == 'Religion':
                    d = update_text(d, tr_tree, 'td/a', 'religion')
                if label.startswith('Spouse'):
                    d = update_texts(d, tr_tree, 'td', 'spouses')
                if label == 'Father':
                    d = update_combo_text(d, tr_tree, 'td/a', 'parents')
                if label == 'Mother':
                    d = update_combo_text(d, tr_tree, 'td/a', 'parents')
                if label == 'Residence':
                    d = update_list_first(d, tr_tree, 'td/a', 'residence')
                if label == 'Children':
                    d = update_texts(d, tr_tree, 'td', 'children')
                if label == 'Birth:':
                    d = update_text(d, tr_tree, 'td', 'birthdate')
                if label == 'Death:':
                    d = update_text(d, tr_tree, 'td', 'deathdate')

        return d


    def _word_tokenize(self, sent):
        pattern = DATE_PATTERN + r'''(?x)    # set flag to allow verbose regexps
            | ([A-Z]\.)+        # abbreviations, e.g. U.S.A.
            | \&\#\d+\;    # &#198;
            | \w+(-\w+)*        # words with optional internal hyphens
            | \$?\d+(\.\d+)?%?  # currency and percentages, e.g. $12.40, 82%
            | \.\.\.            # ellipsis
            | [][.,;"'?():-_`]  # these are separate tokens
        '''
        words = nltk.regexp_tokenize(sent, pattern)
        return words

    def get_samples(self, directory):
        """返回所有样本的关键词和句组
        """
        r = {}
        for file in os.listdir(directory):
            name = os.path.splitext(file)[0]
            self.html = tools.open_file(r'%s/%s.html' %(directory, name))
            lines = self._get_lines()
            keywords = self._parse_infobox()
            r[name] = {'lines':lines, 'keywords':keywords}
        return r

    def save_samples(self, directory):
        samples = self.get_samples(directory)
        data = pickle.dumps(samples)
        tools.save_file(r'result/samples.pickle', data)

    def _get_chunk(self, sent):
        words = self._word_tokenize(sent)
        tags = nltk.pos_tag(words)
        tree = nltk.ne_chunk(tags)
        chunk = nltk.chunk.tree2conlltags(tree)
        return chunk

    def get_items(self, lines, key_value = None):
        def get_chunks(sents, key_value):
            r = []
            for sent in sents:
                if key_value is not None:
                    key_values = self.get_list_key_value(key_value)
                    for v in key_values:
                        if v in sent:
                            chunk = self._get_chunk(sent)
                            r.append(chunk)
                else:
                    chunk = self._get_chunk(sent)
                    r.append(chunk)
            return r

        r = []
        for line in lines:
            sents = nltk.sent_tokenize(line)
            chunks = get_chunks(sents, key_value)
            r.append(chunks)
        return r

    def get_samples_of_key(self, content):
        """找到wiki中有某关键词的所有句子
        """
        r = {}
        for name, value in content.items():
            if value['keywords'].has_key(self.key):
                key_value = value['keywords'][self.key]
                items = self.get_items(value['lines'], key_value)
                r[name] = {'items':items, 'key_value':key_value}
        return r

    def save_samples_of_key(self):
        data = tools.open_file(r'result/samples.pickle')
        content = pickle.loads(data)
        samples = self.get_samples_of_key(content)
        data = pickle.dumps(samples)
        tools.save_file(r'result/%s/samples.pickle' %self.key, data)

    def get_list_key_value(self, key_value):
        if not isinstance(key_value, list):
            return [key_value]
        else:
            return key_value

    def clean_key_values(self, key_values):
        def pick_before_comma(keywords):
            # 对于除了date类型的word, 如果有, ，则只要, 之前的
            for keyword in keywords:
                if self._is_date(keyword):
                    yield keyword
                else:
                    index = keyword.find(',')
                    if index >= 0:
                        yield keyword[:index].strip()
                    else:
                        yield keyword

        def pick_before_of(keywords):
            # 如果有of，则只要of之前的
            for keyword in keywords:
                index = keyword.find('of')
                if index >= 0:
                    yield keyword[:index].strip()
                else:
                    yield keyword

        def del_short(key_values):
            for value in key_values:
                if len(value)<3:
                    key_values.remove(value)
            return key_values

        key_values = list(pick_before_comma(key_values))
        key_values = list(pick_before_of(key_values))
        key_values = del_short(key_values)
        return key_values

    def save_html_of_key(self):
        """将有某关键词的句子按名称写入html
        """
        def clean_key_value(value):
            return value.replace('?', '\?').replace('(', '\(').replace(')', '\)')

        data = tools.open_file(r'result/samples.pickle')
        samples = pickle.loads(data)
        content = ''
        for name, value in samples.items():
            if value['keywords'].has_key(self.key):
                content += '%s<br/>' %name+'*'*30+'<br/>'
                key_value = value['keywords'][self.key]
                key_values = self.get_list_key_value(key_value)
                key_values = [clean_key_value(v) for v in key_values]
                key_values = self.clean_key_values(key_values)

                for line in value['lines']:
                    sents = nltk.sent_tokenize(line)
                    for sent in sents:
                        html = sent
                        for key_value in key_values:
                            if key_value in sent:
                                html = re.compile(r'\b%s\b' %key_value).sub(r'<b>%s</b>' %key_value, html)
                        if html<>sent:
                            content += '%s<br/>' %html
                content += '<hr/>'
        tools.save_file(r'result/%s/sents.html' %self.key, content)

    ##########classify##########
    def train(self):
        self.get_featuresets()
        return self.get_classifier()

    def _is_date(self, word):
        return re.compile(DATE_PATTERN).search(word)

    def _all_in_word(self, lst, word):
        """lst中的所有项都出现在word中，返回True
        """
        for l in lst:
            if not (l in word):
                return False
        return True

    def _either_in_word(self, lst, word):
        """lst中的任何项都出现在word中，返回True
        """
        for l in lst:
            if l in word:
                return True
        return False

    def gender_features(self, sent, i):
        return {}

    def get_featuresets(self):
        data = tools.open_file(r'result/%s/samples.pickle' %self.key)
        samples = pickle.loads(data)
        featuresets = []
        for name, value in samples.items():
            key_value = value['key_value']
            key_values = self.get_list_key_value(key_value)
            key_values = self.clean_key_values(key_values)
            log.debug(key_values)
            for sent in self.get_parse_sents(value['items']):
                for i, chunk in enumerate(sent):
                    features = self.gender_features(sent, i)
                    if features:
                        parse_value = self._get_continue_entity(chunk[0], sent)
                        result = str(False)
                        for v in key_values:
                            if parse_value == v:
                                result = str(True)
                                break
                        featuresets.append((features, result))
        data = pickle.dumps(featuresets)
        tools.save_file(r'result/%s/featuresets.pickle' %self.key, data)
        return featuresets

    def get_parse_sents(self, lines):
        for line in lines:
            for sent in line:
                yield sent

    def get_classifier(self):
        data = tools.open_file(r'result/%s/featuresets.pickle' %self.key)
        featuresets = pickle.loads(data)
        log.debug('featuresets:%s' %featuresets)

        size = int(len(featuresets) * 0.5)
        train_set, test_set = featuresets[size:], featuresets[:size]

        classifier = nltk.NaiveBayesClassifier.train(train_set)
        #if log.handlers[0].level == logging.DEBUG:
            #classifier.show_most_informative_features()
            #log.info('accuracy:%s' %nltk.classify.accuracy(classifier, test_set))
        classifier.show_most_informative_features()
        log.info('accuracy:%s' %nltk.classify.accuracy(classifier, test_set))

        #classifier = nltk.DecisionTreeClassifier.train(train_set)
        #log.info('features:%s' %classifier.pseudocode(depth=4))

        return classifier

    ##########测试##########
    def _get_continue_entity(self, word, chunks):
        """返回word开始的第一个连续entity
        """
        result = word
        for i, (w, t, c) in enumerate(chunks):
            if w == word:
                for j in range(i+1, len(chunks)):
                    if chunks[j][2].startswith('I'):
                        result = result+' '+chunks[j][0]
                    else:
                        break
        return result

    def _get_forcast_value(self, classifier, value):
        for sent in self.get_parse_sents(value['items']):
            for i, chunk in enumerate(sent):
                features = self.gender_features(sent, i)
                if features:
                    if classifier.classify(features) == 'True':
                        yield self._get_continue_entity(chunk[0], sent)

    def check(self):
        classifier = self.train()
        correct_num, total_num, key_num = 0, 0, 0
        data = tools.open_file(r'result/%s/samples.pickle' %self.key)
        samples = pickle.loads(data)
        for name, value in samples.items():
            if value.has_key('key_value'):
                key_value = value['key_value']
                key_values = self.get_list_key_value(key_value)

                forcast_values = self._get_forcast_value(classifier, value)
                for f_v in set(forcast_values):
                    if f_v in key_values:
                        correct_num += 1/len(key_values)
                #is_good = (key_value == forcast_value)
                #if is_good: correct_num += 1
                #print '%-18s%-8s%-20s%s' %(name, is_good, key_value, forcast_value)
                key_num += 1
            total_num += 1

        log.info('correct/key: %f' %(float(correct_num)/key_num))
        log.info('correct/total: %f' %(float(correct_num)/total_num))

    def forcast(self, value):
        classifier = self.train()
        forcast_values = self._get_forcast_value(classifier, value)
        print self.key, set(forcast_values)

    def parse_file(self):
        lines = self._get_lines()
        items = self.get_items(lines)
        value = {'items':items}
        return value

class BirthdateWiki(Wiki):
    def __init__(self):
        self.key = 'birthdate'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        if self._is_date(words[i]):
            if i == 0:
                features['before-word'] = '<START>'
            else:
                features['before-word'] = words[i-1].lower()

            if i == len(words)-1:
                features['after-word'] = '<END>'
            else:
                features['after-word'] = words[i+1].lower()

            features['has-born'] = 'born' in words
            features['has-brackets'] = self._all_in_word(['(', ')', '&#8211;'], words)

        return features

    def get_parse_sents(self, lines):
        # 只考虑第一段第一句
        if lines:
            line = lines[0]
            if line:
                yield line[0]

class DeathdateWiki(Wiki):
    def __init__(self):
        self.key = 'deathdate'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        if self._is_date(words[i]):

            status = False
            if i>0 and i<len(words)-1:
                if words[i-1] == '&#8211;' and words[i+1] == ')':
                    status = True
            features['with-bracket'] = status

            features['has-death'] = 'death' in words or 'died' in words or 'dying' in words

        return features

    def get_parse_sents(self, lines):
        # 只考虑第一段第一句
        if lines:
            line = lines[0]
            if line:
                yield line[0]

class BirthplaceWiki(Wiki):
    def __init__(self):
        self.key = 'birthplace'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        if 'born' in words:

            features['name-entity'] = sent[i][2]
            if i>1:
                features['has-born'] = (words[i-2].lower() == 'born')
            else:
                features['has-born'] = False

        return features

class DeathplaceWiki(Wiki):
    def __init__(self):
        self.key = 'deathplace'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        if 'died' in words:
        #if 'died' in words or 'death' in words or 'buried' in words or 'died' in words:
            features['name-entity'] = sent[i][2]
            features['speech-tag'] = sent[i][1]
            features['died-before'] = 'died' in words[:i]
            if i <= 1:
                features['before-2-speech-tag'] = '<START>'
            else:
                features['before-2-speech-tag'] = sent[i-2][1]

            if i == 0:
                features['before-speech-tag'] = '<START>'
            else:
                features['before-speech-tag'] = sent[i-1][1]

            if i == len(words)-1:
                features['after-speech-tag'] = '<END>'
            else:
                features['after-speech-tag'] = sent[i+1][1]

        return features

class NationalityWiki(Wiki):
    def __init__(self):
        self.key = 'nationality'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        features['name-entity'] = sent[i][2]
        features['speech-tag'] = sent[i][1]
        if i <= 1:
            features['before-2-speech-tag'] = '<START>'
        else:
            features['before-2-speech-tag'] = sent[i-2][1]

        if i == 0:
            features['before-speech-tag'] = '<START>'
        else:
            features['before-speech-tag'] = sent[i-1][1]

        if i == len(words)-1:
            features['after-speech-tag'] = '<END>'
        else:
            features['after-speech-tag'] = sent[i+1][1]

        return features

    def get_parse_sents(self, lines):
        # 只考虑第一段第一句
        if lines:
            line = lines[0]
            if line:
                yield line[0]

class ReligionWiki(Wiki):
    def __init__(self):
        self.key = 'religion'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]

        features['name-entity'] = sent[i][2]
        features['speech-tag'] = sent[i][1]

        if i <= 1:
            features['before-2-speech-tag'] = '<START>'
        else:
            features['before-2-speech-tag'] = sent[i-2][1]

        if i == 0:
            features['before-speech-tag'] = '<START>'
        else:
            features['before-speech-tag'] = sent[i-1][1]

        if i == len(words)-1:
            features['after-speech-tag'] = '<END>'
        else:
            features['after-speech-tag'] = sent[i+1][1]

        return features


class ResidenceWiki(Wiki):
    def __init__(self):
        self.key = 'residence'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]

        features['name-entity'] = sent[i][2]
        before_num = 6
        if i < before_num:
            temps = words[:i]
        else:
            temps = words[i-before_num:i]
        features['born-before'] = ('born' in temps) or ('reside' in temps) or ('lives' in temps)

        return features

##########复数##########
class OccupationsWiki(Wiki):
    def __init__(self):
        self.key = 'occupations'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]

        features['speech-tag'] = sent[i][1]
        if i == 0:
            features['before-speech-tag'] = '<START>'
            features['before-word'] = '<START>'
        else:
            features['before-speech-tag'] = sent[i-1][1]
            features['before-word'] = words[i-1].lower()

        if i == len(words)-1:
            features['after-speech-tag'] = '<END>'
            features['after-word'] = '<END>'
        else:
            features['after-speech-tag'] = sent[i+1][1]
            features['after-word'] = words[i+1].lower()
        return features

    def get_parse_sents(self, lines):
        # 只考虑第一段第一句
        if lines:
            line = lines[0]
            if line:
                yield line[0]

class SpousesWiki(Wiki):
    def __init__(self):
        self.key = 'spouses'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        lst = ['marriage', 'wife', 'husband', 'married']
        if self._either_in_word(lst, words):
            features['name-entity'] = sent[i][2]
            features['word-before'] = 'wife' in words[i-3:i] or 'husband' in words[i-3:i]
            if i == 0:
                features['before-speech-tag'] = '<START>'
                features['before-word'] = '<START>'
            else:
                features['before-speech-tag'] = sent[i-1][1]
                features['before-word'] = words[i-1].lower()

        return features

class ParentsWiki(Wiki):
    def __init__(self):
        self.key = 'parents'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        str_words = ' '.join(words)
        if 'daughter of' in str_words or 'son of' in str_words:
            features['name-entity'] = sent[i][2]
            features['word-before'] = 'daughter' in words[i-3:i] or 'son' in words[i-3:i]

        if 'father' in words or 'mother' in words:
            features['name-entity'] = sent[i][2]
            features['word-before'] = 'father' in words[i-3:i] or 'mother' in words[i-3:i]

        return features

class ChildrenWiki(Wiki):
    def __init__(self):
        self.key = 'children'

    def gender_features(self, sent, i):
        features = {}
        words = [w for w, t, c in sent]
        if 'gave birth to' in ' '.join(words):
            features['birth-name-entity'] = sent[i][2]
            features['birth-before'] = 'birth' in words[:i]
        if 'children' in words:
            features['children-name-entity'] = sent[i][2]
            features['children-before'] = 'children' in words[:i]

        return features

def look_sent_tree(html):
    wiki = Wiki()
    wiki.html = html
    lines = wiki._get_lines()
    sents = nltk.sent_tokenize(lines[0])
    for sent in sents:
        chunk = wiki._get_chunk(sent)
        print nltk.chunk.conlltags2tree(chunk)

def find_infobox_from_wiki(html):
    value = None
    for w in (BirthdateWiki, DeathdateWiki, BirthplaceWiki, DeathplaceWiki, ResidenceWiki, NationalityWiki, SpousesWiki, ParentsWiki, ChildrenWiki):
        wiki = w()
        wiki.html = html
        if not value:
            value = wiki.parse_file()
        wiki.forcast(value)

    for w in (OccupationsWiki, ReligionWiki):
        wiki = w()
        find_static_from_wiki(wiki, html, value)

def find_static_from_wiki(wiki, html, value = None):
    """基于统计的关键词提取
    """
    def clean_dirty(lst, dirty_lst):
        #删除明显有问题的
        for dirty in dirty_lst:
            if dirty in lst:
                lst.remove(dirty)
        return lst

    r_list = []
    key = wiki.key
    data = tools.open_file(r'result/%s/%s.txt' %(key, key))
    religions_list = data.split(',')
    religions_list = clean_dirty(religions_list, ['and'])
    if not value:
        wiki.html = html
        value = wiki.parse_file()
    for sent in wiki.get_parse_sents(value['items']):
        words = ' '.join([w for w, t, c in sent])
        for religion in religions_list:
            if religion in words:
                r_list.append(religion)
    print key, set(r_list)

def get_static_set(key):
    """统计样本中该key的所有值，保存到文件里
    """
    data = tools.open_file(r'result/samples.pickle')
    samples = pickle.loads(data)
    r_list = []
    wiki = Wiki()
    for name, value in samples.items():
        if value['keywords'].has_key(key):
            key_value = value['keywords'][key]
            key_values = wiki.get_list_key_value(key_value)
            key_values = wiki.clean_key_values(key_values)
            r_list.extend(key_values)
    r_list = set(r_list)
    data = ','.join(r_list)
    tools.save_file(r'result/%s/%s.txt' %(key, key), data)

def select_random_wiki(directory, num):
    """任选num个wiki，从中找出infobox来测试效果
    """
    import random
    files = os.listdir(directory)
    files = random.sample(files, num)

    data = tools.open_file(r'result/samples.pickle')
    samples = pickle.loads(data)
    for file in files:
        name = os.path.splitext(file)[0]
        print '*'*20+name+'*'*20
        print '-'*20+'infobox'+'-'*20
        value = samples[name]
        for key in ('birthdate', 'deathdate', 'birthplace', 
                    'deathplace', 'residence', 'nationality', 
                    'spouses', 'parents', 'children',
                    'occupations','religion'):
            if value['keywords'].has_key(key):
                key_value = value['keywords'][key]
            else:
                key_value = []
            print key, key_value
        print '-'*20+'my result'+'-'*20
        html = tools.open_file(r'%s/%s.html' %(directory, name))
        find_infobox_from_wiki(html)
        print

from web import Web
def get_infobox(name):
    data = tools.open_file(r'result/samples.pickle')
    samples = pickle.loads(data)
    value = samples[name]
    for key in ('birthdate', 'deathdate', 'birthplace', 
                'deathplace', 'residence', 'nationality', 
                'spouses', 'parents', 'children',
                'occupations','religion'):
        if value['keywords'].has_key(key):
            key_value = value['keywords'][key]
        else:
            key_value = []
        print key, key_value

def get_wiki(name):
    """从wiki中保存某name的页面
    """
    url = r'http://en.wikipedia.org/wiki/%s' %name
    html = Web(log).fetch(url)
    find_infobox_from_wiki(html)

if __name__ == '__main__':
    print 'start'
    #get_wiki('Bill_Gates')
    #key = 'religion'
    #key = 'occupations'
    #get_static_set(key)

    #directory = r'result/wiki'
    #wiki = Wiki()
    #wiki.save_samples(directory)


    wiki = BirthdateWiki()
    #wiki = DeathdateWiki()
    #wiki = BirthplaceWiki()
    #wiki = DeathplaceWiki()
    #wiki = ResidenceWiki()
    #wiki = NationalityWiki()

    #0%
    #wiki = ReligionWiki()
    #wiki = OccupationsWiki()

    #复数
    #wiki = SpousesWiki()
    #wiki = ParentsWiki()
    #wiki = ChildrenWiki()

    #wiki.save_samples_of_key()
    #wiki.save_html_of_key()

    #wiki.check()

    #directory = r'result/wiki'
    ##name = 'Albert_Ghiorso'
    name = 'Bruce_Lee'
    ###name = 'Gustav_Heinemann'

    #html = tools.open_file(r'%s/%s.html' %(directory, name))
    #wiki.html = html
    #value = wiki.parse_file()
    #wiki.forcast(value)
    get_infobox(name)

    #look_sent_tree(html)

    #find_infobox_from_wiki(html)
    #find_static_from_wiki(wiki, html)
    #select_random_wiki(directory, 3)

    #wiki = Wiki()
    #sent = 'His father, Lee Hoi-chuen, was Chinese, and his mother Grace Ho'
    #chunk = wiki._get_chunk(sent)
    #print nltk.chunk.conlltags2tree(chunk)
