#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update:

'''docstring
'''

__revision__ = '0.1'
__author__ = 'lxd'

import re
from lxml import etree
import nltk
import tools
from web import Web
import cPickle as pickle
import os

from log import getLogger
log = getLogger('wiki')

DATE_PATTERN = r'''(?x)    # set flag to allow verbose regexps
    \d+\s+BC # 14 BC
    | \d+\s+(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d+ # 4 April 1842
    | (January|February|March|April|May|June|July|August|September|October|November|December)\s+\d+,\s+\d+ # December 7, 1987
'''
def get_xpath_texts(nodes):
    """replace xpath text()
    """
    r = []
    for node in nodes:
        t = node.xpath('text()')
        if t:
            s = unicode(t[0])
            s = re.sub(r'\(.*\)', '', s)#去掉()里的内容
            s = s.strip()
            if s:
                r.append(s)
    return r

def get_xpath_text(nodes):
    """返回第一个text
    """
    r = get_xpath_texts(nodes)
    if r:
        return r[0]

def get_lines(html):
    """解析html得到每段组成的数组
    """
    tree = etree.HTML(html)
    ptree_list = tree.xpath('//div[@id="bodyContent"]/p')
    p_list = [nltk.clean_html(etree.tostring(p)) for p in ptree_list]
    return p_list

def parse_infobox(html):
    def update_text(d, node, xpath, property):
        """去空格直接返回text
        """
        value = get_xpath_text(node.xpath(xpath))
        if value:
            d.update({property:value})
        return d

    def update_texts(d, node, xpath, property):
        """去空格直接返回list, 自动加上内部的p，a, 可能要放到get_xpath_texts里
        """
        values = get_xpath_texts(node.xpath(xpath))
        values.extend(get_xpath_texts(node.xpath('%s/p' %xpath)))
        values.extend(get_xpath_texts(node.xpath('%s/a' %xpath)))
        if values:
            d.update({property:values})
        return d

    def parse_texts(d, node, xpath, property):
        """去空格直接返回list, 自动加上内部的p，a, 可能要放到get_xpath_texts里
        """
        print nltk.clean_html(etree.tostring(node))

    def update_list(d, node, xpath, property):
        """list类型的字符串按','分割, 去空格，小写
        """
        value = get_xpath_text(node.xpath(xpath))
        if value:
            values = [v.lower().strip() for v in value.split(',')]
            if not values[0] or values[0] == '(':
                #a中的也要解析
                values.extend(get_xpath_texts(node.xpath('%s/a' %xpath)))
                #去掉空项和符号项
                values = [v for v in values if (v and v not in ('(', ')'))]
            d.update({property:values})
        return d

    def update_list_first(d, node, xpath, property):
        """list类型的字符串按','分割, 去空格，返回第一个
        """
        value = get_xpath_text(node.xpath(xpath))
        if value:
            values = [v.strip() for v in value.split(',')]
            d.update({property:values[0]})
        return d

    def update_combo_text(d, node, xpath, property):
        """如果该property存在则将value加入list
        """
        value = get_xpath_text(node.xpath(xpath))
        if value:
            if d.has_key(property):
                d[property].append(value)
            else:
                d.update({property:[value]})
        return d

    tree = etree.HTML(html)
    d = {}
    for tr_tree in tree.xpath('//table[contains(@class,"infobox")]/tr'):
        label = get_xpath_text(tr_tree.xpath('th'))
        if label:
            if label == 'Born':
                d = update_text(d, tr_tree, 'td', 'birthdate')
                d = update_list_first(d, tr_tree, 'td/a', 'birthplace')
            if label == 'Died':
                d = update_text(d, tr_tree, 'td', 'deathdate')
                d = update_list_first(d, tr_tree, 'td/a', 'deathplace')
            if label.startswith('Occupation'):
                #小写
                d = update_list(d, tr_tree, 'td', 'occupations')
            if label == 'Nationality':
                d = update_text(d, tr_tree, 'td', 'nationality')
            if label == 'Religion':
                d = update_text(d, tr_tree, 'td/a', 'religion')
            if label.startswith('Spouse'):
                d = update_texts(d, tr_tree, 'td', 'spouses')
            if label == 'Father':
                d = update_combo_text(d, tr_tree, 'td/a', 'parents')
            if label == 'Mother':
                d = update_combo_text(d, tr_tree, 'td/a', 'parents')
            if label == 'Residence':
                d = update_list_first(d, tr_tree, 'td/a', 'residence')
            if label == 'Children':
                d = update_texts(d, tr_tree, 'td', 'children')
            if label == 'Birth:':
                d = update_text(d, tr_tree, 'td', 'birthdate')
            if label == 'Death:':
                d = update_text(d, tr_tree, 'td', 'deathdate')

    return d

def note_keyword_in_lines(keyword, lines):
    """将关键词标出来
    """
    def _add_value_with_property(body, property, value):
        value = value.replace('?', '\?').replace('(', '').replace(')', '')
        if value:
            return re.compile(r'\b%s\b' %value).sub(r'[%s]%s[/%s]' %(property, value, property), body)
        else:
            return body

    for line in lines:
        for (property, value) in keyword.items():
            if isinstance(value, list):
                for v in value:
                    line = _add_value_with_property(line, property, v)
            else:
                line = _add_value_with_property(line, property, value)

        yield line


def label_contents():
    #if True:
    for file in os.listdir(r'result/wiki'):
        name = os.path.splitext(file)[0]
        #name = 'Catherine_Dolgorukov'
        log.debug(name)
        html = tools.open_file(r'result/wiki/%s.html' %name)

        lines = get_lines(html)
        keyword = parse_infobox(html)
        log.debug(str(keyword))
        lines = list(note_keyword_in_lines(keyword, lines))
        content = '<br/>'.join(lines)
        tools.save_file(r'result/content/%s.html' %name, content)


def find_sents_with_value_in_lines(lines, value):
    """在段落组lines中找到value所在的句子
    """
    for line in lines:
        sents = nltk.sent_tokenize(line)
        for sent in sents:
            if value in sent:
                yield sent

def find_sents_with_key(key, directory):
    """找到某关键词所在的所有句子, 返回(关键词，句子)的数组
    """
    for file in os.listdir(directory):
        name = os.path.splitext(file)[0]
        log.debug(name)

        html = tools.open_file(r'result/wiki/%s.html' %name)
        lines = get_lines(html)
        keywords = parse_infobox(html)
        log.debug('keywords:%s' %keywords)

        if keywords.has_key(key):
            value = keywords[key]
            for sent in find_sents_with_value_in_lines(lines, value):
                yield value, sent

def save_sents_of_key(key, directory):
    """找到某关键词所在的所有句子，保存到一个html中
    """
    content = ''
    for value, sent in find_sents_with_key(key, directory):
        value = value.replace('?', '\?').replace('(', '\(').replace(')', '\)')
        html = re.compile(r'\b%s\b' %value).sub(r'<b>%s</b>' %value, sent)
        content += '%s<br/>' %html
    tools.save_file(r'result/sents_with_%s.html' %key, content)

def is_birthdate(word):
    return re.compile(DATE_PATTERN).search(word)

def is_deathdate(word):
    return is_birthdate(word)

def gender_birthdate_features(words, i):
    features = {}
    if i == 0:
        features['before-word'] = '<START>'
    else:
        features['before-word'] = words[i-1].lower()

    if i == len(words)-1:
        features['after-word'] = '<END>'
    else:
        features['after-word'] = words[i+1].lower()

    features['has-born'] = 'born' in words
    features['has-brackets'] = '(' in words and ')' in words

    return features

def gender_deathdate_features(words, i):
    features = {}
    if i == 0:
        features['before-word'] = '<START>'
    else:
        features['before-word'] = words[i-1].lower()

    if i == len(words)-1:
        features['after-word'] = '<END>'
    else:
        features['after-word'] = words[i+1].lower()

    features['has-death'] = 'death' in words or 'died' in words or 'dying' in words
    features['has-brackets'] = '(' in words and ')' in words
    #features['has-born'] = 'born' in words[i-5, i]

    return features

def gender_birthplace_features(words, i):
    features = {}
    features['before-word-1'] = '<START>'
    features['before-word-2'] = '<START>'
    features['before-words'] = '<START>'
    if i > 0:
        features['before-word-1'] = words[i-1]
    if i > 1:
        features['before-word-2'] = words[i-2]
        features['before-words'] = words[i-2:i]

    return features


def get_featuresets(key, sents):
    featuresets = []
    for value, sent in sents:
        words = word_tokenize(sent)
        log.debug('words:%s' %words)
        for i, word in enumerate(words):
            if globals()['is_%s' %key](word):
                features = globals()['gender_%s_features' %key](words, i)
                result = str((word == value))
                featuresets.append((features, result))
    content = pickle.dumps(featuresets)
    tools.save_file(r'result/%s_featuresets.pickle' %key, content)
    return featuresets

def get_classifier(key):
    content = tools.open_file(r'result/%s_featuresets.pickle' %key)
    featuresets = pickle.loads(content)
    log.debug('featuresets:%s' %featuresets)

    size = int(len(featuresets) * 0.1)
    train_set, test_set = featuresets[size:], featuresets[:size]
    #classifier = nltk.NaiveBayesClassifier.train(train_set)
    classifier = nltk.DecisionTreeClassifier.train(train_set)
    log.info('accuracy:%s' %nltk.classify.accuracy(classifier, test_set))
    #log.info('features:%s' %classifier.show_most_informative_features())
    return classifier

def word_tokenize(sent):
    pattern = DATE_PATTERN + r'''(?x)    # set flag to allow verbose regexps
        | ([A-Z]\.)+        # abbreviations, e.g. U.S.A.
        | \&\#\d+\;    # &#198;
        | \w+(-\w+)*        # words with optional internal hyphens
        | \$?\d+(\.\d+)?%?  # currency and percentages, e.g. $12.40, 82%
        | \.\.\.            # ellipsis
        | [][.,;"'?():-_`]  # these are separate tokens
    '''
    words = nltk.regexp_tokenize(sent, pattern)
    return words

def _process_sent(key, classifier, sent):
    """将句子中的单词用classifier分类
    """
    words = word_tokenize(sent)
    for i, word in enumerate(words):
        if globals()['is_%s' %key](word):
            features = globals()['gender_%s_features' %key](words, i)
            #print classifier.classify(features)
            if classifier.classify(features) == 'True':
                print word

def print_wiki_with_classifier(key, name):
    """从本地classifier中查看某人信息
    """
    classifier = get_classifier(key)

    html = tools.open_file(r'result/wiki/%s.html' %name)
    lines = get_lines(html)
    for line in lines:
        sents = nltk.sent_tokenize(line)
        for sent in sents:
            _process_sent(key, classifier, sent)

def print_wiki_with_wikis(key, name, directory):
    """从样本库中查看某人信息
    """
    sents = find_sents_with_key(key, directory)
    get_featuresets(key, sents)
    print_wiki_with_classifier(key, name)

def print_wikis_with_classifier(key, directory):
    """从本地classifier中查看该关键字所能找到的信息
    """
    classifier = get_classifier(key)
    for file in os.listdir(directory):
        name = os.path.splitext(file)[0]

        print name
        html = tools.open_file(r'result/wiki/%s.html' %name)
        lines = get_lines(html)
        for line in lines:
            sents = nltk.sent_tokenize(line)
            for sent in sents:
                _process_sent(key, classifier, sent)

def print_sent_with_classifier(key, sent):
    """从本地classifier中查看某句话的信息
    """
    classifier = get_classifier(key)
    _process_sent(key, classifier, sent)

def print_sent_with_wikis(key, sent, directory):
    """从样本库中查看某句话的信息
    """
    sents = find_sents_with_key(key, directory)
    get_featuresets(key, sents)
    print_sent_with_classifier(key, sent)

def print_others():
    """简单的
    """
    name = '%C3%89mile_Boutroux'
    html = tools.open_file(r'result/wiki/%s.html' %name)
    lines = get_lines(html)

    line = lines[1]
    sents = nltk.sent_tokenize(line)
    sent = sents[0]
    #sent = '[ 5 ] After his success in Taiwan, he returned to Hong Kong in 1991 to do Cantopop .'
    words = word_tokenize(sent)
    tags = nltk.pos_tag(words)
    print nltk.ne_chunk(tags)


if __name__ == '__main__':
    #print_others()
    #label_contents()
    key = 'birthdate'
    #key = 'deathdate'
    wikis = r'result/wiki'
    #save_sents_of_key(key, wikis)

    #name = 'Aage_Bohr'
    #name = 'Aaron_Kwok'
    #name = 'William_Hedley'

    #print_wiki_with_wikis(key, name, wikis)
    #print_wiki_with_classifier(key, name)
    print_wikis_with_classifier(key, r'result/wiki1')

    #sent = '&#198;lfweard (904 &#8211; 12 August 924) was the second son of Edward the Elder , the eldest born to his second wife &#198;lffl&#230;d .' 
    #sent = 'Marie Joseph Louis Adolphe Thiers ( French pronunciation:&#160; [lwi ad&#596;lf tj&#603;&#641;] ; Marseille , 18 April 1797&#8211;3 September 1877) was a French politician and historian .'

    #print_sent_with_wikis(key, sent, wikis)
    #print_sent_with_classifier(key, sent)



    def todo():
        tag_words = nltk.pos_tag(words)
        grammar = "DATE: {<CD>+<NNP>?<CD>?}" 
        cp = nltk.RegexpParser(grammar) 
        tree = cp.parse(tag_words) 
        tags = nltk.chunk.tree2conlltags(tree)


