# print("Greca pulling his arm...")
# print("Greca pulling his arm into vesti...")

from bs4 import BeautifulSoup
import requests
import re
import time
import logging

logger = logging.getLogger("parser")


def parse_rbc(depth=10):
    """Parser for rbc.ru website"""
    print("Greca pulling his arm into vesti...")

    url = "https://www.rbc.ru/short_news"
    if depth > 99:
        return

    # rbc's aggregator url with specifying time (in seconds) and number of posts (depth < 100)
    url = "https://www.rbc.ru/v10/ajax/get-news-feed-short/project/rbcnews/lastDate/"\
          + str(round(time.time())) + "/limit/" + str(depth)
    headers = {  # for less robot fingerprints
        "Accept": "*/*",
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36"
    }

    # requesting site and editing request
    logger.info("Waiting for server response")
    request = requests.get(url, headers=headers)
    nedo = re.sub(r"( )+", " ", request.text)
    nedo = re.sub(r"\\n", "\n", nedo)
    nedo = re.sub(r'\\"', '"', nedo)

    soup = BeautifulSoup(nedo, "lxml")
    # titles = soup.find_all("div", class_="js-news-feed-item js-yandex-counter")
    titles = soup.find_all("a", class_="item__link")  # finding posts

    logger.info("Processing posts")
    posts = []
    #print(len(titles))
    for title in titles:
        if title:
            # head = title.find("span", class_="item__title rm-cm-item-text").text.strip()
            # link = title.find_all('a')[1].get('href')
            #
            # # find date in link


            head = title.text.strip()
            link = title["href"]
            post_page = requests.get(link)
            page_date = BeautifulSoup(post_page.text, "html.parser").find("span", class_="article__header__date")
            if page_date:
                post_date = page_date.get("content")[:10]
                post = [link, head, post_date]
                posts.append(post)
                logger.debug(post)
    return posts


def parse_nyt():  # depth=10
    """Parser for NYTimes website
    :param depth: posts count (default = 10)
    :return: posts list
    """
    # sites url and full page url
    url = "https://www.nytimes.com"
    catalog = "/section/technology/personaltech"
    headers = {
        "Accept": "*/*",
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36"
    }

    # requesting site
    request = requests.get(url + catalog, headers=headers)
    soup = BeautifulSoup(request.text, "lxml")
    titles = soup.find_all("li", class_="css-ye6x8s")   # finding post with tag <li>

    # find posts
    posts = []
    for title in titles:
        if title:
            head = title.find("h2").text
            link = url + title.find('a').get('href')
            # find date in link
            date_index = link.find("20")
            post_date = link[date_index:date_index+10].replace('/', '-')

            post = [link, head, post_date]
            posts.append(post)

    return posts


def main():
    posts = parse_rbc()
    for post in posts:
        print(post[0], post[1])


if __name__ == "__main__":
    main()
