import requests
from requests.api import get
from bs4 import BeautifulSoup

from time import time
import datetime
import json
import os

from app.translateSpider import webApi

with open("config.json", "r") as f:
    global_config = json.loads(f.read())


url = global_config.get("url")
# logging.captureWarnings(True)

# 获取网页文本内容
def get_html(url:str, config:dict=global_config):
    headers = config.get("headers")
    print("getting usa today...")
    return requests.get(url=url, headers=headers).text

# 将网页文本解析成dom树
def get_dom(html, css_selector):
    return BeautifulSoup(html, 'html.parser').select(css_selector)

# 从首页获取对应新闻超链接
def get_news_href(url):
    html = get_html(url=url)
    dom = get_dom(html, "body > main > div.gnt_pr > div.gnt_m_tt > div:nth-child(2) a")
    news_href = []
    print(len(dom))
    print("getting news link...")
    for value in dom:
        href = value.get("href")
        if href != None:
            news_href.append(url + href)
    return news_href


def get_news(url:str):
    print("getting news ...")
    html = get_html(url=url)
    try:
        title_dom = get_dom(html, "div.gnt_cw_w > main > article > h1")
        title = title_dom[0].get_text()
    except Exception as e:
        print(e)
        return False
        pass

    date_dom = get_dom(html, "body > div.gnt_cw_w > main > article > div.gnt_ar_dt")
    print(date_dom)
    date = date_dom[0].get("aria-label")
    
    try:
        author_dom = get_dom(html, "div.gnt_cw_w > main > article > div.gnt_ar_by")
        print(author_dom)
        author = author_dom[0].get_text("|")
    except Exception as e:
        author_dom = get_dom(html, "body > div.gnt_cw_w > main > article > div.gnt_ar_pb")
        print(author_dom)
        author = author_dom[0].get_text()
    except Exception as e:
        author = None
        print("get_news"+e)

    text_dom = get_dom(html, "div.gnt_cw_w > main > article > div.gnt_ar_b > p")
    # texts = [{"zh":dom.get_text(),"cn": translate(dom.get_text())} for dom in text_dom]
    texts = []
    for dom in text_dom:
        texts.append({"zh":dom.get_text(),"cn": webApi(dom.get_text())})

    article = dict(
        title = title,
        author = author,
        date = date,
        text = texts
    )
    return article

# 刷新news
def flush_news(urls:list):
    today = str(datetime.date.today())
    # articles = [get_news(url) for url in urls]
    articles = []
    for url in urls:
        news = get_news(url)
        if news:
            articles.append(news)


    path = os.path.join("articalData", today + "News.json")
    with open(path, "w", encoding="utf-8") as f:
        f.write(json.dumps(articles))

# web调用接口
def spider_api():
    try:
        index_hrefs = get_news_href(url)
        flush_news(index_hrefs)
    except Exception as e:
        print(e)
        pass
