#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update:

'''docstring
'''

__revision__ = '0.1'
__author__ = 'lxd'
import re
from lxml import etree
import tools
from web import Web
import logging
logging.basicConfig(filename = 'log.txt', level = logging.DEBUG)

def get_people():
    url = 'http://dbpedia.org/ontology/Person'
    html = Web(logging).fetch(url)
    p = re.compile(r'http://dbpedia.org/resource/([^"]+)')
    return p.findall(html)

class Wiki(object):
    def __init__(self, name):
        self.name = name

    def get_page(self):
        url = 'http://en.wikipedia.org/wiki/%s' %self.name
        #self.html = Web(logging, proxy = '127.0.0.1:1984').fetch(url)
        self.html = Web(logging).fetch(url)
        self.tree = etree.HTML(self.html)

    def get_mock(self):
        self.html = tools.open_file(r'record/%s.txt' %self.name)
        self.tree = etree.HTML(self.html)

    def process(self):
        self.get_body()
        self.add_tags()
        self.replace_his_with_name()

    def get_body(self):
        body_content = self.tree.xpath('//div[@id="bodyContent"]')[0]
        lst = body_content.xpath('table[contains(@class, "infobox")]/following-sibling::*')
        if lst:
            topices = body_content.xpath('table[contains(@class, "infobox")]/following-sibling::table[@id="toc"]')
            if topices: lst.remove(topices[0])

            reflist = body_content.xpath('div[@class="reflist"]')
            if reflist:
                lasts = reflist[0].xpath('following-sibling::*')
                for last in lasts:
                    lst.remove(last)
                lst.remove(reflist[0])

            body_html = ''.join([etree.tostring(l) for l in lst])
        else:
            logging.info('can not reduce the body')
            body_html = etree.tostring(body_content)
        self.body = tools.clean_html(body_html)

    def _get_xpath_value(self, objs):
        """得到obj下或a中或span中的文字, 以list形式返回
        去掉空格或符号
        """
        if objs:
            obj = objs[0]
            r_list = obj.xpath('text()') \
                    + obj.xpath('a/text()') \
                    + obj.xpath('span/text()')
            return [r_item.strip() for r_item in r_list if r_item.strip() not in ['', ',', '(', ')']]
        else:
            return None

    def get_infobox(self):
        infobox = self.tree.xpath('//table[contains(@class, "infobox")]')[0]
        for line in infobox.xpath('tr'):
            d = {}
            d['label'] = self._get_xpath_value(line.xpath('th'))
            d['values'] = self._get_xpath_value(line.xpath('td'))
            yield d

    def _add_value_with_tag(self, value, tag):
        if value:
            self.body = re.compile(r'\b%s\b' %value).sub(r'[%s]%s[/%s]' %(tag, value, tag), self.body)

    def add_tags(self):
        value_replace_list = ('(', ')', '?', '.', '*')
        for info in self.get_infobox():
            if info['label'] and info['values']:
                tag = info['label'][0].replace(u'\xa0', ' ').replace(u'\u2013', ' ')
                for value in info['values']:
                    for rep in value_replace_list:
                        value = value.replace(rep, '')
                    value = value.replace('+', '\+')
                    logging.debug('add value %s with tag [%s]' %(repr(value), repr(tag)))
                    self._add_value_with_tag(value, tag)

    def is_male(self):
        """从body中he、his和she、her的数量判断是否是男的
        """
        def get_len(patten):
            lst = re.compile(patten, re.I).findall(self.body)
            return len(lst)
        male_num = get_len(r'\bhe\b') + get_len(r'\bhis\b')
        female_num = get_len(r'\bshe\b') + get_len(r'\bher\b')
        return male_num > female_num

    def replace_his_with_name(self):
        if self.is_male():
            logging.debug('male')
            self.body = re.compile(r'\bhe\b', re.I).sub(self.name, self.body)
            self.body = re.compile(r'\bhis\b', re.I).sub(self.name+"'s", self.body)
        else:
            logging.debug('female')
            self.body = re.compile(r'\bshe\b', re.I).sub(self.name, self.body)
            self.body = re.compile(r'\bher\b', re.I).sub(self.name+"'s", self.body)

def get_person_from_wiki():
    #person = 'Bernard_of_Clairvaux'
    #if True:
    deniy_person = ['Dalai_Lama', 'Gloom_%28comics%29', 'Bernard_of_Clairvaux']
    i = 0
    for person in get_people():
        if person in deniy_person:continue
        i += 1
        print str(i), person, 'start'
        logging.debug(person)
        wiki = Wiki(person)
        wiki.get_page()
        tools.save_file(r'record/%s.txt' %person, wiki.html)
        wiki.process()
        tools.save_file(r'record/%s_result.txt' %person, wiki.body)

def parse_from_file(name):
    wiki = Wiki(name)
    wiki.get_mock()
    wiki.process()
    return wiki.body

def main():
    #get_person_from_wiki()

    #name = 'Dwight_L._Moody'
    #body = parse_from_file(name)
    #print body
    #tools.save_file(r'record/%s_result.txt' %name, body)

    extract_sentence_with_keyword('Born')

from nltk.corpus import PlaintextCorpusReader
def extract_sentence_with_keyword(keyword):
    corpus_wiki = 'record' 
    reader = PlaintextCorpusReader(corpus_wiki, '.*_result\.txt') 
    files = reader.fileids()
    r = ''
    for file in files[:50]:
        r += file + '<br/>'
        for sent in reader.sents(file):
            if keyword in sent:
                r += ' '.join(sent) + '<br/>'
                r += '-'*20 + '<br/>'
        r += '='*20 + '<br/>'
    tools.save_file('result.html', r)

import nltk
def extract_sentence_with_keyword2(keyword):
    corpus_wiki = 'record' 
    reader = PlaintextCorpusReader(corpus_wiki, '.*_result\.txt') 
    sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
    files = reader.fileids()
    r = ''
    for file in files[:50]:
        r += file + '<br/>'
        text = reader.raw(file)
        sents = sent_tokenizer.tokenize(text)
        for sent in sents:
            if keyword in sent:
                r += sent + '<br/>'
                r += '-'*20 + '<br/>'
        r += '='*20 + '<br/>'
    tools.save_file('result2.html', r)


if __name__ == '__main__':
    print 'start'
    main()
    print 'over'
