# coding = utf-8

import click, json, importlib, inspect, time, requests, re, datetime, sys, os
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup as bs
from feedgen.feed import FeedGenerator
import os
import importlib.util


def caller_locals(a=3):
    cl = dict(inspect.getmembers(inspect.stack()[a][0]))["f_locals"]
    return cl


class Timer:
    T = 0

    def t(self):
        dur = "%.1f" % ((time.time() - Timer.T) * 1000)
        Timer.T = time.time()
        return dur


class Fetcher:
    def __init__(self, url, method="POST", data={}, headers={}):
        s = requests.Session()
        self.r = ""
        s.mount('http://', HTTPAdapter(max_retries=3))
        s.mount('https://', HTTPAdapter(max_retries=3))
        click.echo("[LOG]Request url:%s" % url, nl=False), Timer().t()
        try:
            if hasattr(s, method):
                fetch = getattr(s, method)
            if method == "get":
                r = fetch(url, params=data, headers=headers, timeout=5)
            else:
                r = fetch(url, data=data, headers=headers, timeout=5)
            self.r = r.text
        except requests.exceptions.RequestException as e:
            click.echo("[ERROR]" + str(e))
        click.echo(" completed in %sms" % Timer().t())


class Cache:
    def __init__(self, data=None):
        if data is None:
            # read cache
            if not os.path.exists('CACHE'):
                open('CACHE', 'w').close()
            self.file = open('CACHE', 'r+')
            self.data = json.loads(self.file.read() or "{}")
        else:
            self.data = data
            self.file = open('CACHE', 'w+')
            self.file.write(json.dumps(data))
            self.file

    def get(self):
        return self.data

    def save(self, bookname, lastchapter):
        data = self.data
        self.file.close()
        bookname = bookname.strip()
        data[bookname] = {}
        data[bookname]['name'] = lastchapter
        data[bookname]['time'] = time.time()
        self.data = data
        self.file = open('CACHE', 'w+')
        self.file.write(json.dumps(data))
        return self

    def __del__(self):
        if self.file is not None:
            self.file.close()


class Rule:
    r = ""

    def __init__(self, rulename):
        spec = importlib.util.spec_from_file_location("mintrss.rule", "rules/" + rulename + ".py")
        m = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(m)
        self.m = m

    def parser(self, parser):
        if parser.startswith('bs:'):
            # beautifulsoup mode
            parser = parser[3:]
            i = bs(self.r, "html.parser")
            o = eval("i" + parser)
            return o
        elif parser.startswith('re:'):
            # regex mode
            parser = parser[3:]
            regex = eval("re.compile(\"%s\")" % parser)
            return regex.findall(self.r)
        elif parser.startswith('rf:'):
            # regex find mode
            parser = parser[3:]
            regex = eval("re.compile(\"%s\")" % parser)
            return regex.find(self.r)
        else:
            # python mode
            i = self.r
            return eval(parser, caller_locals(2), globals())

    def do_book(self, key):
        m = caller_locals()['m']

        def url_check(url):
            if not url.startswith('http'):
                # 相对URL
                if url.startswith('/'):
                    return m.url + url[1:]
                return m.url + url
            return url

        rule_name = caller_locals()['rule_name']
        mode = caller_locals()['mode']
        click.echo("[LOG]Update book %s started..." % (key))
        m.search['data'][m.search['input']] = key
        self.r = Fetcher(m.search['url'], m.search['method'], m.search['data'], m.search['headers']).r  # 获取搜索页数据
        # match bookList in searchPage
        click.echo("[LOG]Parsing html...", nl=False), Timer().t()
        booklists = self.parser(m.searchPattern['bookLists'])
        click.echo(" booklists parsed with %d results in %sms" % (len(booklists), Timer().t()))
        books = []
        for booklist in booklists:
            self.r = str(booklist)
            bn = self.parser(m.searchPattern['bookName'])
            ba = self.parser(m.searchPattern['bookAuthor'])
            bu = self.parser(m.searchPattern['bookUrl'])
            bu = url_check(bu)
            bl = self.parser(m.searchPattern['lastChapter'])
            bc = self.parser(m.searchPattern['bookCover'])
            books.append([str(bn).strip(), str(ba).strip(), str(bu).strip(), str(bl).strip(), str(bc).strip()])
        book = eval(m.searchPattern['whichBook'])
        click.echo("[LOG]Book %s by %s data fetched successfully in %sms" % (book[0], book[1], Timer().t()))
        click.echo("[LOG]Start fetching book page.")
        self.r = Fetcher(book[2], 'get').r  # 获取书页
        Timer().t()
        Cache().save(book[0].strip(), book[3])  # 缓存最新章节
        toc_url = self.parser(m.bookPattern['tocUrl'])
        book.append(self.parser(m.bookPattern['bookIntro']))  # book[5] : book intro
        click.echo("[LOG]Parsing book page html completed in %sms" % Timer().t())
        if toc_url != book[2]:
            toc_url = url_check(toc_url)
            click.echo("[LOG]Start fetching book TOC page.")
            self.r = Fetcher(toc_url, 'get').r  # 获取目录页
        click.echo("[LOG]Parsing book %s TOC page html..." % book[0], nl=False), Timer().t()
        if 'chapterList' in m.tocPattern:
            self.r = self.parser(m.tocPattern['chapterList'])
        cpurls = self.parser(m.tocPattern['chapterUrl'])
        cpnames = self.parser(m.tocPattern['chapterName'])
        click.echo(" completed in %sms with %s chapters" % (Timer().t(), len(cpurls)))
        cpurls.reverse()
        cpnames.reverse()  # 翻转Chapter List
        feed = FeedGenerator()
        feed.id("http://rss.git.kongrui.cn/rss/%s %s.xml" % (book[0], book[1]))
        feed.title("《%s》/ %s著" % (book[0], book[1]))
        feed.link(href="http://rss.git.kongrui.cn/")
        feed.description(book[5])
        # 开始判断运行模式
        if os.path.isfile(os.path.join(os.getcwd(), 'rss/%s %s.xml' % (book[0], book[1]))):
            # running in update mode
            click.echo("[LOG]Running in updating mode,start parsing."), Timer().t()
            fh = open(os.path.join(os.getcwd(), 'rss/%s %s.xml' % (book[0], book[1])), "r", encoding="utf-8")
            xml_data = fh.read()
            fh.close()
            rss_soup = bs(xml_data, "lxml")
            last_chapter_url = rss_soup("item")[0].guid.text
            cpurls = list(map(url_check, cpurls))
            if not cpurls.count(last_chapter_url):
                click.echo(
                    "[ERROR]Cannot udpate,the source site url has changed.Please delete original rss file and regenerate!")
                return

            local_last_chapter_index = cpurls.index(last_chapter_url)

            chapter_count = local_last_chapter_index  # 需要更新章节数量
            last_chapter_name = None
            limit = 50 if mode == "new50" else 100 if mode == "new100" else 999999
            if chapter_count == 0:
                return click.echo("[LOG]Already up to date,nothing to do.")
            click.echo("[LOG]%d chapters need to update." % chapter_count)
            for i in range(0, local_last_chapter_index):
                url = cpurls[i]
                url = url_check(url)
                self.r = Fetcher(url, 'get').r  # 获取章节页
                click.echo("[LOG]Finished updating chapter %s." % cpnames[i])
                content = self.parser(m.chapterPattern['content'])
                content = self.parser(m.chapterPattern['filter'])
                chapter_name = self.parser(m.chapterPattern['name'])
                if last_chapter_name is None:
                    last_chapter_name = chapter_name
                item = feed.add_entry(order='append')
                item.id(url)
                item.title(chapter_name)
                item.link(href="http://rss.git.kongrui.cn/rss/%s %s.xml" % (book[0], book[1]))
                item.author(name=book[1])
                item.description(content)
                item.pubDate(datetime.datetime.now(datetime.timezone(datetime.timedelta(0, 28800))))
                # 获取更新章节结束
            local_chapter_count = len(rss_soup("item"))
            if chapter_count < limit:
                # 开始读取本地章节
                max_index = min (local_chapter_count,limit - chapter_count)
                for i in range(0, max_index):
                    item = feed.add_entry(order='append')
                    item.id(rss_soup("item")[i].guid.text)
                    item.title(rss_soup("item")[i].title.text)
                    item.link(href="http://rss.git.kongrui.cn/rss/%s %s.xml" % (book[0], book[1]))
                    item.author(name=book[1])
                    item.description(rss_soup("item")[i].description.text)
                    item.pubDate(rss_soup("item")[i].pubdate.text)
                # 本地章节读取结束
            click.echo("[LOG]Writing RSS file for %s..." % book[0])
            if not os.path.exists("./rss"):
                os.makedirs("./rss")
            feed.rss_file('rss/%s %s.xml' % (book[0], book[1]))  # 生成RSS格式的xml文件
            click.echo("[LOG]Updating finished,all things up to date.")
        else:
            if mode == "new50":
                cpurls, cpnames = cpurls[:50], cpnames[:50]
            elif mode == "new100":
                cpurls, cpnames = cpurls[:100], cpnames[:100]
            click.echo("[LOG]Start to create RSS,mode %s." % (mode)), Timer().t()
            for i, url in enumerate(cpurls):
                url = url_check(url)
                Timer().t()
                self.r = Fetcher(url, 'get').r  # 获取章节页
                click.echo("[LOG]Fetched chapter %s ,runtime %sms." % (cpnames[i], Timer().t()))
                content = self.parser(m.chapterPattern['content'])
                content = self.parser(m.chapterPattern['filter'])
                item = feed.add_entry(order='append')
                item.id(url)
                item.title(cpnames[i])
                item.link(href="http://rss.git.kongrui.cn/rss/%s.xml" % key.strip())
                item.author(name=book[1])
                item.description(content)
                item.pubDate(datetime.datetime.now(datetime.timezone(datetime.timedelta(0, 28800))))
            click.echo("[LOG]Generating RSS file for %s..." % book[0], nl=False), Timer().t()
            if not os.path.exists("./rss"):
                os.makedirs("./rss")
            feed.rss_file('rss/%s %s.xml' % (book[0], book[1]))  # 生成RSS格式的xml文件
            click.echo(" completed in %sms" % Timer().t())


@click.command()
@click.help_option("-h", "--help")
@click.version_option("0.0.2", "-v", "--version", prog_name="MintRss, a minimal console tool for generating novel rss")
@click.argument("rule_name", required=True)
@click.argument("keys", required=True, nargs=-1)
@click.argument("mode", required=False,
                type=click.Choice(['new50', 'new100', 'all']))
def cli(rule_name, keys, mode):
    r = Rule(rule_name)
    m = r.m
    click.echo("[LOG]Use rule %s at %s" % (m.name, m.url))
    result = [r.do_book(b) for b in keys]


if __name__ == "__main__":
    cli()
