#!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>


import json
from pprint import pprint

from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe
from polyglot.functools import lru_cache

use_wayback_machine = False


def absolutize(url):
    if url.startswith('/'):
        url = 'https://www.nytimes.com' + url
    return url


@lru_cache(2)
def parser_module():
    from calibre.live import load_module
    return load_module('calibre.web.site_parsers.nytimes')


class NewYorkTimesBookReview(BasicNewsRecipe):
    title = u'New York Times Book Review'
    language = 'en_US'
    description = 'The New York Times Sunday Book Review'
    __author__ = 'Kovid Goyal'

    no_stylesheets = True
    no_javascript = True
    ignore_duplicate_articles = {'title', 'url'}
    encoding = 'utf-8'

    extra_css = '''
        .byl, .time { font-size:small; color:#202020; }
        .cap { font-size:small; text-align:center; }
        .cred { font-style:italic; font-size:small; }
        em, blockquote { color: #202020; }
        .sc { font-variant: small-caps; }
        .lbl { font-size:small; color:#404040; }
        img { display:block; margin:0 auto; }
    '''

    articles_are_obfuscated = use_wayback_machine

    if use_wayback_machine:
        def get_obfuscated_article(self, url):
            from calibre.ptempfile import PersistentTemporaryFile
            with PersistentTemporaryFile() as tf:
                tf.write(self.get_nyt_page(url))
            return tf.name

    @property
    def nyt_parser(self):
        return parser_module()

    def get_nyt_page(self, url, skip_wayback=False):
        if use_wayback_machine and not skip_wayback:
            from calibre import browser
            return self.nyt_parser.download_url(url, browser())
        return self.index_to_soup(url, raw=True)

    def preprocess_raw_html(self, raw_html, url):
        return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)

    recipe_specific_options = {
        'res': {
            'short': (
                'For hi-res images, select a resolution from the following\noptions: '
                'popup, jumbo, mobileMasterAt3x, superJumbo'
            ),
            'long': (
                'This is useful for non e-ink devices, and for a lower file size\nthan '
                'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
            ),
        },
        'comp': {
            'short': 'Compress News Images?',
            'long': 'enter yes',
            'default': 'no'
        }
    }

    def __init__(self, *args, **kwargs):
        BasicNewsRecipe.__init__(self, *args, **kwargs)
        c = self.recipe_specific_options.get('comp')
        if c and isinstance(c, str):
            if c.lower() == 'yes':
                self.compress_news_images = True

    def parse_index(self):
        # return [('Articles', [{'url': 'https://www.nytimes.com/2022/09/08/books/review/karen-armstrong-by-the-book-interview.html', 'title':'test'}])]
        soup = self.index_to_soup('https://www.nytimes.com/pages/books/review/index.html')
        # with open('/t/raw.html', 'w') as f: f.write(str(soup))
        feeds = parse_toc(soup)
        for section_title, articles in feeds:
            self.log(section_title)
            for a in articles:
                self.log('\t' + a['title'], a['url'])
        return feeds

    def get_browser(self, *args, **kwargs):
        kwargs['user_agent'] = 'User-Agent: Mozilla/5.0 (compatible; archive.org_bot; Wayback Machine Live Record; +http://archive.org/details/archive.org_bot)'
        br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
        return br

    def preprocess_html(self, soup):
        w = self.recipe_specific_options.get('res')
        if w and isinstance(w, str):
            res = '-' + w
            for img in soup.findAll('img', attrs={'src':True}):
                if '-article' in img['src']:
                    ext = img['src'].split('?')[0].split('.')[-1]
                    img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
        for c in soup.findAll('div', attrs={'class':'cap'}):
            for p in c.findAll(['p', 'div']):
                p.name = 'span'
        return soup


def asset_to_article(asset):
    title = asset['headline']['default']
    return {'title': title, 'url': asset['url'], 'description': asset['summary']}


def preloaded_data(soup):
    candidates = soup.find_all('script', string=lambda x: x and 'window.__preloadedData' in x)
    script = candidates[0]
    script = str(script)
    raw = script[script.find('{') : script.rfind(';')].strip().rstrip(';')  # }
    raw = parser_module().clean_js_json(raw)
    return json.JSONDecoder(strict=False).raw_decode(raw)[0]['initialState']


def parse_toc(soup):
    data = preloaded_data(soup)
    # with open('/t/raw.json', 'w') as f: pprint(data, stream=f)
    article_map = {}
    for k, v in data.items():
        if v['__typename'] == 'Article':
            article_map[k] = asset_to_article(v)
    feeds = []
    for k, v in data['ROOT_QUERY'].items():
        if k.startswith('workOrLocation'):
            for g in data[v['__ref']]['groupings']:
                for c in g['containers']:
                    articles = []
                    for r in c['relations']:
                        ref = r['asset']['__ref']
                        if ref in article_map:
                            articles.append(article_map[ref])
                    if articles:
                        feeds.append(('Highlights', articles))

    articles = []
    for k, v in data['ROOT_QUERY'].items():
        if k.startswith('workOrLocation'):
            c = data[v['__ref']]
            section_title = c['name']
            for k, v in c['collectionsPage'].items():
                if k.startswith('stream'):
                    for k, v in v.items():
                        if k.startswith('edges'):
                            for q in v:
                                r = q['node']['__ref']
                                if r.startswith('Article:'):
                                    articles.append(article_map[r])
            if not articles:
                for c in c['collectionsPage']['embeddedCollections']:
                    for e in c['stream']['edges']:
                        for k, v in e.items():
                            if k.startswith('node'):
                                articles.append(article_map[v['__ref']])
    feeds.append((section_title, articles))
    return feeds


if __name__ == '__main__':
    import sys
    with open(sys.argv[-1]) as f:
        html = f.read()
    soup = BeautifulSoup(html)
    feeds = parse_toc(soup)
    pprint(feeds)
