#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update:

'''docstring
'''

__revision__ = '0.1'
__author__ = 'lxd'

import re
import tools
from web import Web
import cPickle as pickle
import os

from log import getLogger
log = getLogger('wiki')

def get_people(type):
    """下载该type中所有people
    """
    url = r'http://dbpedia.org/ontology/%s' %type
    html = Web(log).fetch(url)
    tools.save_file(r'result/type/%s.html' %type, html)

def get_all_people():
    """下载所有people
    """
    content = tools.open_file(r'result/types.txt')
    types = content.split(',')
    for type in types:
        get_people(type)

#get_all_people()

def parse_all_people():
    """获得所有人名
    """
    d = {}
    for file in os.listdir(r'result/type'):
        name, ext = os.path.splitext(file)
        if ext == '.html':
            html = tools.open_file(r'result/type/%s.html' %name)
            p = re.compile(r'http://dbpedia.org/resource/([^"]+)')
            d[name] = p.findall(html)
    tools.save_file(r'result/type/people.pickle', pickle.dumps(d))
    r = []
    for value in d.values():
        r.extend(value)
    r = set(r)
    people = ','.join(r)
    tools.save_file(r'result/type/people.txt', people)

#parse_all_people()

def get_people_page(name):
    """下载该name的页面
    """
    url = r'http://dbpedia.org/page/%s' %name
    html = Web(log, 'http://127.0.0.1:1984').fetch(url)
    tools.save_file(r'result/dbpedia/%s.html' %name, html)

def get_all_people_page():
    """下载wiki中对应name的dbpedia页面, 可剔除已下载页
    """
    people = []
    for file in os.listdir(r'result/wiki'):
        name, ext = os.path.splitext(file)
        if ext == '.html':
            people.append(name)

    existed_people = []
    for file in os.listdir(r'result/dbpedia'):
        name, ext = os.path.splitext(file)
        if ext == '.html':
            existed_people.append(name)

    p_list = set(people)-set(existed_people)
    print len(p_list)

    for p in p_list:
        get_people_page(p)

get_all_people_page()

def get_wiki(name):
    """从wiki中保存某name的页面
    """
    url = r'http://en.wikipedia.org/wiki/%s' %name
    html = Web(log).fetch(url)
    tools.save_file(r'result/wiki/%s.html' %name, html)

def get_all_wiki():
    """保存所有name的wiki页面, 可剔除已下载页
    """
    content = tools.open_file(r'result/type/people.txt')
    people = content.split(',')

    existed_people = []
    for file in os.listdir(r'result/wiki'):
        name, ext = os.path.splitext(file)
        if ext == '.html':
            existed_people.append(name)

    p_list = set(people)-set(existed_people)

    for p in p_list:
        get_wiki(p)

#get_all_wiki()

if __name__ == '__main__':
    pass

