import time
import re
from random import random
from fastapi import HTTPException
from bs4 import BeautifulSoup
from urllib.parse import urljoin

import undetected_chromedriver as uc

from scaner.core.config.log_config import MyLogger
from scaner.core.config.project_config import settings
from .. import crud


main_logger = MyLogger()


class RubricatorCrawler:
    my_user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"

    def __init__(self, start_url: str = "https://www.avito.ru/") -> None:
        self.start_url = start_url
        self.rubric_items = []
        self.logger = main_logger.getChild("scraping catalog")  # type: ignore
        self.sleep = 10

    def get_rubricator(self) -> list[dict]:
        self.logger.info("Создание рубрикатора продлиться примерно 15 минут")

        self.__get_root_urls()
        self.__get_sub_urls()

        data = self.rubric_items.copy()
        return self.__modify_url_in_rubric_items(data)

    def __modify_url(self, url: str) -> str:
        # "/all/kvartiry/sdam/na_dlitelnyy_srok-ASgBAgICAkSSA8gQ8AeQUg?cd=1" -> "/kvartiry/sdam/na_dlitelnyy_srok-ASgBAgICAkSSA8gQ8AeQUg"
        url = re.sub(r"^/\w*/", "/", url)
        url = re.sub(r"\?cd.*", "", url)
        return url

    def __modify_url_in_rubric_items(self, data: list[dict]) -> list[dict]:
        for item in data:
            if "url" in item:
                item["url"] = self.__modify_url(item["url"])
            if "child" in item:
                self.__modify_url_in_rubric_items(item["child"])
        return data

    def __get_page_by_selenium(self, url: str) -> str:
        try:
            browser = uc.Chrome(headless=True, use_subprocess=False)
            browser.get(url)

            delay = self.sleep + (self.sleep * random())
            self.logger.info(f"sleep {delay} sec.")  # type: ignore
            time.sleep(delay)

            return browser.page_source

        except Exception as err:
            self.logger.error(f"Ошибка загрузки страницы {url}, {err}")  # type: ignore
            raise HTTPException(status_code=404)

        finally:
            if browser:
                browser.close()
                browser.quit()

    def __get_root_urls(self) -> None:
        self.logger.info("Создание корневого каталога")  # type: ignore

        html = self.__get_page_by_selenium(self.start_url)
        html_soup = BeautifulSoup(html, "lxml")
        blocks = html_soup.find_all("div", class_="footer-rubricator-block-jFn8W")

        for block in blocks:
            category_name = block.find(
                "div", class_="footer-rubricator-title-UPNik"
            ).a.text
            url = block.find("div", class_="footer-rubricator-title-UPNik").a["href"]
            category_url = url
            rubric_data = {"rubric": category_name, "url": category_url}

            subcategories = []
            for item in block.find_all("li", class_="footer-rubricator-item-yE619"):
                subcategory_name = item.a.text
                subcategory_url = item.a["href"]
                subcategories.append(
                    {
                        "rubric": subcategory_name,
                        "url": subcategory_url,
                    }
                )

            if subcategories:
                rubric_data["child"] = subcategories

            self.rubric_items.append(rubric_data)
            self.logger.info(f"Добавлено: {rubric_data["rubric"]}")

    def __scrap_urls(self, url: str) -> list[dict]:
        html = self.__get_page_by_selenium(url)
        soup = BeautifulSoup(html, "lxml")

        subcategories = []
        try:
            for block in soup.find_all("li", class_="rubricator-list-item-item-WKnEv"):
                subcategory_name = block.a.text
                subcategory_url = block.a["href"]

                subcategory = {
                    "rubric": subcategory_name,
                    "url": subcategory_url,
                }

                subs_ = self.__scrap_urls(subcategory_url)
                if subs_:
                    subcategory["child"] = subs_

                if subcategory_url not in [item.values() for item in subcategories]:
                    subcategories.append(subcategory)
                    self.logger.info(f"Добавлено: {subcategory['rubric']}")

        except Exception as e:
            self.logger.error(f"Ошибка чтения данных: {e} : адрес {subcategory_url}")

        return subcategories

    def __get_sub_urls(self) -> None:
        self.logger.info("Сбор подкаталогов каталога")  # type: ignore
        for root_rubric in self.rubric_items:
            if "child" in root_rubric:
                if root_rubric["rubric"] != "Транспорт":
                    self.__crawl_sub_urls(root_rubric)

    def __crawl_sub_urls(self, root: dict) -> None:
        for item in root["child"]:
            self.logger.info(f"Сбор подкаталогов каталога {item["rubric"]}")  # type: ignore
            subcategories = self.__scrap_urls(urljoin(self.start_url, item["url"]))
            if subcategories:
                item["child"] = subcategories
