#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
获取子网页的模板代码

get_urls: 获取主页中所有url
handle: 处理每一个url
schedule: 调度程序

实例: 爬取网络小说《微微一笑很倾城》

version: 0.2
date: 2018/10/4
"""

import re
import os

import bs4
import requests

def url2soup(url, headers={}, data=None):
    """url -> soup
    
    处理任意编码的网页，返回BeautifulSoup对象
    """

    if data:
        response = requests.post(url, data=data, headers=headers)
    else:
        response = requests.get(url, headers=headers)

    encodings = requests.utils.get_encodings_from_content(response.text)
    if encodings:
        encoding = encodings[0]
    else:
        encoding = response.apparent_encoding
    encode_content = response.content.decode(encoding, 'replace')
    return bs4.BeautifulSoup(encode_content, "lxml")


def get_urls(url, check=True, home=None):
    """获取所有 url
    
    参数:
        url {str} -- url
    
    关键字参数:
        check {bool|function} -- 过滤 url (默认不进行任何过滤)
        home {str} -- 网站主页
    
    返回:
        list -- list of urls
    """
    if home is None:
        home = url

    soup = url2soup(url)
    tag = soup.find('body')
    urls = []
    for a in tag.find_all('a'):
        if a.has_attr('href'):   # 含有 href 的 a 标签
            url0 = a['href']
            if check is True or check(url0):  # 过滤
                urls.append(url0 if url0.startswith(url) else home + url0)
    return urls


def schedule(url, handle=print, check=True, home=None):
    return [handle(url) for url in get_urls(url, check, home)]
        

# ------------- Examples -----------------
def example1():
    def handle(url):
        # 具体操作每个url, 下载小说内容, 并存储
        soup = url2soup(url)
        tag = soup.find('div', {'class':'span12'})
        title = tag.find('li', {'class':'active'}).text
        print(f'loading {title}')
        content = tag.find('p').text

        # 收集其他页面的内容，初学者可不理
        pages = tag.find('div', {'class':'pagination'})
        for page in pages.find_all('li')[2:]:
            soup = url2soup(page.find('a')['href'])
            tag = soup.find('div', {'class':'span12'})
            for c in tag.contents:
                if str(c.string).strip() and isinstance(c, bs4.NavigableString):
                    content += c

        # 存储数据
        novel = '微微一笑很倾城'
        if not os.path.exists(novel):
            os.mkdir(novel)
        with open(f'{novel}/{title}.txt', encoding='utf-8', mode='w') as fo:
            fo.write(title + '\n\n' + content)


    def check(url):
        # 匹配 http://www.wwyxhqc.com/20.html 等 url
        r = re.compile(r'.+\d{1,2}')
        return r.match(url)


    home = 'http://www.wwyxhqc.com'

    schedule(home, check=check, handle=handle)

def example2():
    home = 'https://www.kenshu.cc/nvsheng/'

    def check(url):
        # 匹配 /xiaoshuo/3427 等 url
        r = re.compile(r'/xiaoshuo/\d{1,4}')
        return r.match(url)

    def handle(url):
        soup = url2soup(url)
        print(url)
        title = soup.find('span', {'class':'text'}).text

        # tuijian = soup.find('span', {'class':'num', 'id':'mytuijian'}).text
        # shoucang = soup.find('div', {'class':'zhui lu'}).span.text
        print(title)

    schedule(home, check=check, handle=handle)


def example3():
    home = "http://www.mingxing.com"

    def check(url):
        r = re.compile(r'.+/name/')
        return r.match(url)

    def handle(url):
        soup = url2soup(url)

        try:
            info = soup.find('div', {'class':'data'}).text

            date = re.compile(r'(?P<year>\d{4})年(?P<month>\d{1,2})月(?P<day>\d{1,2})日')
            m = date.search(info)
            name = soup.find('span', {'class':'name'}).text
            if m:
                print(name, int(m['month']))
                return int(m['month'])

        except:
            pass

    months = schedule(home, check=check, handle=handle)

    import collections
    
    counter = collections.Counter(months)
    print(counter)



def example4():
    url = "http://www.99mingxing.com/mxda/nadi/"
    home = "http://www.99mingxing.com"

    def check(url):
        r = re.compile(r'.+\d{6}\.html')
        return r.match(url)

    def handle(url):
        soup = url2soup(url, headers={'User-Agent':''})
        # print(soup)
        try:
            info = soup.find('div', {'class':'jianjie'}).text

            date = re.compile(r'((?P<year>\d{4})年)?(?P<month>\d{1,2})月(?P<day>\d{1,2})日')
            m = date.search(info)
            if m:
                print(int(m['month']))
                return int(m['month'])

        except:
            pass

    months = schedule(url, check=check, handle=handle, home=home)

    import collections
    
    counter = collections.Counter(months)
    print(counter)

example4()