import json5
import time
import datetime
from lxml import etree
from feedgen.feed import FeedGenerator
from common import Config
from html_util import HtmlLinks, HtmlContent


class FeedMaker(Config):
    def __init__(self, save_path):
        self.save_path = save_path + '/'

    def crawl(self):
        feed_list = self.load_config('feed.json5')

        # 如果page没有设置chapterXpath，使用父的chapterXpath
        for feed in feed_list:
            feed_pages = [{'url': feed['url'], 'chapterXpath': feed['chapterXpath'], 
                           'contentXpath': feed.get('contentXpath'), 'top_n': feed.get('top_n')}]
            if 'append_pages' in feed:
                for page in feed['append_pages']:
                    if not 'chapterXpath' in page:
                        page['chapterXpath'] = feed['chapterXpath']
                    if not 'contentXpath' in page:
                        page['contentXpath'] = feed.get('contentXpath')
                    if not 'top_n' in page:
                        page['top_n'] = feed.get('top_n')
                    feed_pages.append(page)
            # 爬取数据
            self.crawl_one(feed['name'], feed['code'], feed['url'], feed_pages)

    def crawl_one(self, name, code, homeUrl, feed_pages):
        print("crawl feed:" + name)
        links = []
        for page in feed_pages:
            chapter = HtmlLinks(page['url'], page['chapterXpath'])
            page_links = chapter.get_links()
            for pl in page_links:
                pl.append(page.get('contentXpath'))
            links.extend(page_links[-page['top_n']:] if page.get('top_n') else page_links)

        for link in links:
            if link[-1]:
                contentXpath = link[-1]
                crawler = HtmlContent(link[0], contentXpath)
                content = crawler.get_content()
                link[-1] = content
                print("已下载： "+link[0])
            else:
                link[-1] = ''

        # print(links)
        self.genFeed(name, code, homeUrl, links)
        return links

    def genFeed(self, name, code, homeUrl, links):
        """
        生成 RSS Feed
        """
        if not links:
            return
        fg = FeedGenerator()
        # fg.id('https://www.gitee.com')
        fg.title(name)
        # fg.author({'name': 'Maxiee', 'email': 'maxieewong@gmail.com'})
        if homeUrl:
            fg.link(href=homeUrl, rel='alternate')
        fg.description(name)

        for item in links:
            fe = fg.add_entry()
            # fe.id(item[1])
            fe.title(item[1])
            # fe.updated(item['timestamp'])
            # fe.content(src=host.format(item['url']))
            fe.link(href=item[0])
            fe.description(item[-1])
            # fe.summary(item['comment'])

        # print(self.save_path.joinpath(code+'.xml'))

        fg.rss_file(self.save_path + code+'.xml')


if __name__ == '__main__':
    print('>>>>>>>>>> feeds:\t' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    home_path = '/home/gjh/mynotes'
    crawler = FeedMaker(home_path)
    # crawler.crawl_one('trdcz', 'https://m.147xs.org/book/136540/', "//div[@class='book_last']/dl/dd/a", '//div[@id="nr"]//text()')
    # crawler.crawl_one('trdcz', 'https://m.147xs.org/book/136540/', "//div[@class='book_last']/dl/dd/a", '//div[@id="nr"]')
    crawler.crawl()
