import requests
import pandas as pd
import time
import datetime
from bs4 import BeautifulSoup as bs4
from config import BUCKETNAME, duckdb

header = {
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
    "X-Requested-With": "XMLHttpRequest",
}


def parsePage(page):
    table = []
    for ultag in page.find_all("ul", {"class": "flex", "class": "flex-col"}):
        for litag in ultag.find_all("li"):
            a = litag.find("a")
            description_el = litag.find("p", {"class": "text-base text-gr-50"})
            description = description_el.text if description_el else None
            lang_el = litag.find(
                "div", {"class": "flex items-center gap-2 text-gr-50"})
            lang = lang_el.text if lang_el else None
            meta = litag.find_all("div", {"class": "flex items-center gap-2"})
            stars = int(meta[2].text)
            forks = int(meta[1].text)
            unknown_meta = int(meta[3].text)
            link = 'https://gitverse.ru' + a["href"]
            [author, title] = meta[0].text.split("/")
            row = {
                "link": link,
                "desc": description,
                "stars": stars,
                "forks": forks,
                "unknown_meta": unknown_meta,
                "author": author,
                "title": title,
                "lang": lang,
                "created_at": datetime.datetime.now().strftime('%Y-%m-%d')
            }
            table.append(row)
    if len(table) == 0:
        return pd.DataFrame()
    df = pd.DataFrame(table)
    df['created_at'] = df['created_at'].astype('datetime64[ns]')
    return df


def extract(url, headers):
    r = requests.get(url, headers=header)
    page = bs4(r.text, "html.parser")
    return parsePage(page)


def scrapePages() -> pd.DataFrame:
    start = time.perf_counter()
    final_df = pd.DataFrame()
    page_num = 1
    while True:
        url = f"https://gitverse.ru/explore/repos?page={page_num}&sort=newest"
        print(url)
        repos_from_page = extract(url, header)
        # print(repos_from_page)
        if repos_from_page.empty:
            break
        final_df = pd.concat([final_df, repos_from_page], ignore_index=True)
        page_num += 1
        # TODO условие временно - требует удаления
        # if page_num > 1:
        #    break
    end = time.perf_counter()
    print(f"Time taken: {end - start} seconds")
    return final_df
