import json5, time, datetime, ssl
import urllib.request
from lxml import etree
from common import Config, DbHelper
from html_util import HtmlLinks, HtmlContent


class NovelCrawler(Config):
    def __init__(self, save_path):
        self.save_path = save_path + '/rssfiles/'
        self.db = DbHelper()

    def crawl(self):
        novel_list = self.load_config('novel.json5')
        # {'name': '唐人的餐桌', 'code': 'trdcz', 'url': 'https://m.147xs.org/book/136540/', 'chapterXpath': "//div[@class='book_last']/dl/dd/a", 'contentXpath': "//div[@id='nr']//text()"}
        for novel in novel_list:
            text = self.crawl_one(novel['code'], novel['url'], novel['chapterXpath'], novel['contentXpath'])
            if text:
                today = datetime.datetime.now().strftime("%Y-%m-%d")
                filename = '%s_%s.html' % (novel['code'], today)
                with open(self.save_path + filename,'w',encoding='utf8') as fp:
                    fp.write(text)
                self.db.insert_rss_item(novel['name']+today, 'http://dev.cddyys.com:8180/mynotes/rssfiles/'+filename)
        self.db.close()


    def crawl_one(self, code, chapterUrl, chapterXpath, contentXpath):
        chapter = HtmlLinks(chapterUrl, chapterXpath)
        links = chapter.get_links()

        self.db.insert_chapters(code, links)
        links = self.db.get_oldest_chapters(code)
        # print(str(links))

        # (15, 'trdcz', 0, 'https://m.147xs.org/book/136540/204810378.html', '第三十八章还是心思单纯一些好')
        text = ""
        for link in links:
            crawler = HtmlContent(link[3], contentXpath)
            content = crawler.get_content()
            text = text + link[4] + content
            self.db.update_chapter_status(link[0])
            print("已下载： "+link[4])

        return text



def test_NovelCrawler():
    home_path = '/home/gjh/mynotes/'
    crawler = NovelCrawler(home_path)
    # crawler.crawl_one('trdcz', 'https://m.147xs.org/book/136540/', "//div[@class='book_last']/dl/dd/a", '//div[@id="nr"]//text()')
    # crawler.crawl_one('trdcz', 'https://m.147xs.org/book/136540/', "//div[@class='book_last']/dl/dd/a", '//div[@id="nr"]')
    crawler.crawl()

if __name__ == '__main__':
    print('>>>>>>>>>> novel:\t'+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    test_NovelCrawler()