#!/usr/bin/python
# encoding: utf-8
from calibre.web.feeds.recipes import BasicNewsRecipe

class fhyblog(BasicNewsRecipe):
    title = u'玩转 MyBatis：深度解析与定制'
    __author__ = u'飞鸿影'
    description = u'''玩转 MyBatis：深度解析与定制'''

    timefmt = '[%Y-%m-%d]'

    # 抓取页面内容设置
    #keep_only_tags = [{ 'class': 'example' }] # 仅保留指定选择器包含的内容
    no_stylesheets = True # 去除 CSS 样式
    remove_javascript = True # 去除 JavaScript 脚本
    auto_cleanup = True # 自动清理 HTML 代码
    # delay = 5 # 抓取页面间隔秒数
    max_articles_per_feed = 999 # 抓取文章数量

    # 页面内容解析方法
    def parse_index(self):
        start_page = 1
        end_page = 1
        articles = [] 
        for p in range(start_page, end_page + 1):     # 处理每一个目录页
            soup_page = self.index_to_soup('https://xingxing559.github.io/tag/%E7%8E%A9%E8%BD%AC%20MyBatis%EF%BC%9A%E6%B7%B1%E5%BA%A6%E8%A7%A3%E6%9E%90%E4%B8%8E%E5%AE%9A%E5%88%B6/page')
            # soup_page = self.index_to_soup('https://raw.githubusercontent.com/xingxing559/xingxing559.github.io/master/tag/%E7%8E%A9%E8%BD%AC%20MyBatis%EF%BC%9A%E6%B7%B1%E5%BA%A6%E8%A7%A3%E6%9E%90%E4%B8%8E%E5%AE%9A%E5%88%B6/index.html')
            # divs = soup_page.findAll('div', {'class': 'abstract-item'})   
            soup_titles = soup_page.findAll('div', {'class': 'title'})
            for soup_title in soup_titles:
                href = soup_title.a
                title = href.contents[0].strip()
                link = 'https://xingxing559.github.io' + href['href']
                if title == '':
                    continue
                articles.append({'title': title, 'url': link}) 
        articles.reverse()                 # 文章倒序，让其按照时间从前到后排列
        res = [(self.title, articles)]    # 返回tuple，分别是电子书名字和文章列表
        # self.abort_recipe_processing('test')  # 用来中断电子书生成，调试用
        return res