from datetime import date
import requests
from bs4 import BeautifulSoup
import datetime
import json

from requests.api import get

with open("config.json", "r") as f:
    global_config = json.loads(f.read())

# re_rule = '''.*?(<a href="/story/news/investigations/2020/11/08/voting-expert-says-fraud-not-likely-2020-election/6207988002/").*?'''

url = global_config.get("url")

# logging.captureWarnings(True)

def get_html(url:str, config:dict=global_config):
    headers = config.get("headers")
    return requests.get(url=url, headers=headers).text

def get_dom(html, css_selector):
    return BeautifulSoup(html, 'html.parser').select(css_selector)

# dom = result_soup.select("body > main > div.gnt_pr > div.gnt_m_tt > div.gnt_m_tt_col.gnt_m_tt_col__fc a")
def get_news_href(url):
    # "body > main > div.gnt_pr > div.gnt_m_tt > div.gnt_m_tt_col.gnt_m_tt_col__fc a"
    html = get_html(url=url)
    dom = get_dom(html, "body > main > div.gnt_pr > div.gnt_m_tt > div:nth-child(2) a")
    news_href = []
    for value in dom:
        href = value.get("href")
        if href != None:
            news_href.append(url + href)
    return news_href

def get_news(url:str):
    html = get_html(url=url)
    # body > div.gnt_cw_w > main > article
    # title->body > div.gnt_cw_w > main > article > h1
    # author->body > div.gnt_cw_w > main > article > div.gnt_ar_by ||| body > div.gnt_cw_w > main > article > div.gnt_ar_pb
    # date-> body > div.gnt_cw_w > main > article > div.gnt_ar_dt
    # text->body > div.gnt_cw_w > main > article > div.gnt_ar_b
    
    title_dom = get_dom(html, "div.gnt_cw_w > main > article > h1")
    title = title_dom[0].get_text()

    date_dom = get_dom(html, "body > div.gnt_cw_w > main > article > div.gnt_ar_dt")
    date = date_dom[0].get("aria-label")
    
    try:
        author_dom = get_dom(html, "div.gnt_cw_w > main > article > div.gnt_ar_by")
        author = author_dom[0].get_text("|")
    except Exception as e:
        author_dom = get_dom(html, "body > div.gnt_cw_w > main > article > div.gnt_ar_pb")
        author = author_dom[0].get_text()
    except Exception as e:
        author = None
        print(e)

    text_dom = get_dom(html, "div.gnt_cw_w > main > article > div.gnt_ar_b > p")
    texts = [dom.get_text() for dom in text_dom]

    article = dict(
        title = title,
        author = author,
        date = date,
        text = texts
    )
    return article

# article = get_news(index_hrefs[0])

def flush_news(urls:list):
    today = str(datetime.date.today())
    articles = []
    for url in urls:
        articles.append(get_news(url))
    with open(today + "News.json", "w", encoding="utf-8") as f:
        f.write(json.dumps(dict(enumerate(articles))))

def spider_api():
    try:
        index_hrefs = get_news_href(url)
        flush_news(index_hrefs)
    except Exception as e:
        print(e)
        pass





