#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update:

'''docstring
'''

__revision__ = '0.1'
__author__ = 'lxd'
import re
from lxml import etree
import nltk
import tools
from web import Web
import cPickle as pickle
import os

from log import getLogger
log = getLogger('wiki')


class Dbpedia(object):
    def get_person_keyword(self, html):
        #TODO:会有多余的name，例如abstract
        def kill_colon(values):
            for v in values:
                if v[0] == ':':
                    yield v[1:]
                else:
                    yield v

        def get_text(nodes):
            return [unicode(node.xpath('text()')[0]) for node in nodes]

        not_need_properties = ['abstract', 'thumbnail', 'wikiPageExternalLink']
        tree = etree.HTML(html)
        d = {}
        for property in tree.xpath('//td[@class="property"]'):
            #only depdia-owl
            if property.xpath('a/small/text()')[0] == 'dbpedia-owl:':
                # delete not need properties
                if property.xpath('a/text()')[0] not in not_need_properties:
                    # delete is .. of
                    if not property.xpath('text()')[0] == 'is ':
                        name = get_text(property.xpath('a'))[0]
                        for ul in property.xpath('following-sibling::td/ul'):
                            normal_values = get_text(ul.xpath('li/span/span'))
                            link_values = get_text(ul.xpath('li/span/a'))
                            values = normal_values or list(kill_colon(link_values))
                        d[name] = values
        return d

    def get_keywords(self):
        """从本地文件中解析所有人页面
        """
        r = {}
        for file in os.listdir(r'result/dbpedia'):
            name = os.path.splitext(file)[0]
            html = tools.open_file(r'result/dbpedia/%s' %file)
            d = self.get_person_keyword(html)
            r[name] = d
        content = pickle.dumps(r)
        tools.save_file(r'result/dbpedia_keywords.pickle', content)

    def get_keywords_from_file(self):
        html = tools.open_file(r'result/dbpedia_keywords.pickle')
        contents = pickle.loads(html)
        print contents.keys()

class Wiki(object):
    def load_people(self):
        """从本地文件中获得所有人物
        """
        html = tools.open_file(r'result/Artist.html')
        p = re.compile(r'http://dbpedia.org/resource/([^"]+)')
        return p.findall(html)

    def get_page(self, name):
        """从page中保存某人的页面
        """
        url = 'http://en.wikipedia.org/wiki/%s' %name
        html = Web(log).fetch(url)
        tools.save_file(r'result/wiki/%s.html' %name, html)

    def get_pages(self):
        """保存所有人页面
        """
        for p in self.load_people()[106:]:
            self.get_page(p)

    def get_lines(self, html):
        """解析html得到每段组成的数组
        """
        tree = etree.HTML(html)
        ptree_list = tree.xpath('//div[@id="bodyContent"]/p')
        p_list = [nltk.clean_html(etree.tostring(p)) for p in ptree_list]
        return p_list


    def load_keywords(self):
        """从本地文件中得到所有keywords
        """
        html = tools.open_file(r'result/dbpedia_keywords.txt')
        keywords = pickle.loads(html)
        return keywords

    def note_keyword_in_lines(self, keyword, lines):
        """将关键词标出来
        """
        def _add_value_with_property(body, property, value):
            if value:
                return re.compile(r'\b%s\b' %value).sub(r'[%s]%s[/%s]' %(property, value, property), body)
            else:
                return body

        for line in lines:
            for (property, values) in keyword.items():
                for value in values:
                        line = _add_value_with_property(line, property, value)
            yield line

    def parse_lines(self, name):
        """从本地文件中解析某人页面
        """
        keywords = self.load_keywords()
        keyword = keywords[name]

        html = tools.open_file(r'result/wiki/%s.html' %name)
        lines = self.get_lines(html)

        return self.note_keyword_in_lines(keyword, lines)

    def get_contents(self):
        """从本地文件中解析所有人页面, 将得到的content并保存到本地
        """
        for file in os.listdir(r'result/wiki'):
            name = os.path.splitext(file)[0]
            lines = list(self.parse_lines(name))
            content = ''.join(lines)
            tools.save_file(r'result/contents/%s.html' %name, content)

    def parse_contents(self):
        """从本地文件中解析所有人页面, 并保存到本地
        """
        r = {}
        for file in os.listdir(r'result/wiki'):
            name = os.path.splitext(file)[0]
            lines = list(self.parse_lines(name))
            r[name] = lines
        content = pickle.dumps(r)
        tools.save_file(r'result/wiki_lines.txt', content)

    def create_statistic_contents(self):
        keywords = self.get_all_keywords()
        html = tools.open_file(r'result/wiki_lines.txt')
        content = pickle.loads(html)
        r = ''
        for key in keywords:
            print key
            for (name, lines) in content.items():
                r += name+' '+key+'<br/>'
                for line in lines:
                    if '[%s]' %key in line:
                        r += line+'<br/>'
                r += '-'*50+'<br/>'
        tools.save_file('1.html', r)
        #for c in content:
            #print c

    def statistic_contents(self):
        """统计在文件中出现次数的列表
        """
        keywords = self.get_all_keywords()
        html = tools.open_file(r'result/wiki_lines.txt')
        content = pickle.loads(html)
        d = {}
        for key in keywords:
            files = {}
            for (name, lines) in content.items():
                num = 0
                for line in lines:
                    if '[%s]' %key in line:
                        num += 1
                if num > 0:
                    files[name] = num
            d[key] = files
        return d

    def statistic_nums(self):
        """统计关键词在多少文章中出现过
        一篇文章中最多出现多少次该关键词
        只出现一次该关键词的文章数
        """
        for key, files in self.statistic_contents().items():
            file_num = 0
            file_max = 0
            file_one_num = 0
            for filename, num in files.items():
                file_num += 1
                if num > file_max:
                    file_max = num
                if num == 1:
                    file_one_num += 1
            yield key, file_num, file_max, file_one_num

    def statistic_first_paragraph(self):
        """统计有多少文章在第一段中出现该关键词
        """
        keywords = self.get_all_keywords()
        html = tools.open_file(r'result/wiki_lines.txt')
        content = pickle.loads(html)
        d = {}
        for key in keywords:
            num = 0
            for (name, lines) in content.items():
                if '[%s]' %key in lines[0]:
                    num += 1
            d[key] = num
        return d

    def get_all_keywords(self):
        """得到不重复的所有关键词
        """
        r = []
        keywords = self.load_keywords()
        for value in keywords.values():
            r.extend(value.keys())
        return set(r)

    def save_statistic(self):
        s = ''
        for (key, file_num, file_max, file_one_num) in self.statistic_nums():
            s += '%s\t%d\t%d\t%d\n' %(key, file_num, file_max, file_one_num)
        tools.save_file(r'result/num.txt', s)

        s = ''
        for (key, num) in self.statistic_first_paragraph().items():
            s += '%s\t%d\n' %(key, num)
        tools.save_file(r'result/first_num.txt', s)

from nltk.corpus import PlaintextCorpusReader
def analyse(key):
    """分析该关键词所在句子的模式
    """
    reader = PlaintextCorpusReader(r'result/contents', '.*\.html') 
    files = reader.fileids()
    for file in files:
        for sent in reader.sents(file):
            text = nltk.Text(sent)
            text.concordance(key)

def test():
    text = 'That U.S.A. poster-print costs $12.40... a, abc"s'
    pattern = r'''(?x)    # set flag to allow verbose regexps
            ([A-Z]\.)+        # abbreviations, e.g. U.S.A.
            | \w+(-\w+)*        # words with optional internal hyphens
            | \$?\d+(\.\d+)?%?  # currency and percentages, e.g. $12.40, 82%
            | \.\.\.            # ellipsis
            | [][.,;"'?():-_`]  # these are separate tokens
            '''
    print nltk.regexp_tokenize(text, pattern)

if __name__ == '__main__':
    #analyse('hometown')

    pedia = Dbpedia()
    pedia.get_keywords()

    #wiki = Wiki()
    #wiki.parse_contents()
    #name = 'Boris_Pasternak'

