import pandas as pd
from bs4 import BeautifulSoup as bs
from fake_useragent import UserAgent
import grequests as req

from tqdm import tqdm

from utils import create_folder_if_not_exist, download_image, normalize_string, normalize_price, \
    current_datetime_to_filename_format

base_domain = "https://www.riolis.ru/"


def parse_good(r)-> dict:
    """Parse one good."""
    soup = bs(r.text, "html.parser")
    res = {}
    try:
        res["price"] = normalize_price(soup.find("span", class_="detail_basket v_price").text.replace("₷", "").strip())
    except:
        res["price"] = None
    res["link"] = r.url
    res["name"] = soup.find("span", itemprop="name").text
    res["image_link"] = soup.find("img", id="zoomit").get("src")
    res["image_filename"] = download_image(res["image_link"])
    details = soup.find("div", class_="detail_descr2")
    params = details.find_all("tr")[:-1]  # Last one is description
    for param in params:
        key, value = param.find_all("td")
        res[key.text.replace(":", "").strip()] = value.text.strip()
    res["description"] = details.find_all("tr")[-1].text.strip()
    return res


def parse_page(url: str) -> list[dict]:
    """Get all items from one page. (https://www.riolis.ru/latest/)"""
    res = []
    r = req.get(url, headers={"User-Agent": UserAgent().random})
    soup = bs(r.text, "html.parser")
    goods_area = soup.find("ul", class_="catalog_main")
    goods = goods_area.find_all("li")
    for good in goods:
        good_link = good.find("a", class_="image").get("href")
        good_data = parse_good(good_link)
        res.append(good_data)
    return res


def convert_str_to_utf8(string: str) -> str:
    """Convert string to utf-8. Ignore wrong characters."""
    return string.encode("utf-8", errors="ignore").decode("utf-8")


def list_of_dict_to_csv_saver(list_of_dict: list[dict], filename: str):
    """Save list of dict to csv file. Column names -  keys"""
    with open(filename, "w", encoding="utf8") as f:
        f.write(",".join(list_of_dict[0].keys()) + "\n")
        for row in list_of_dict:
            f.write(",".join(map(normalize_string, map(convert_str_to_utf8, row.values()))) + "\n")
            f.write(";")


def read_list_from_txt(filname: str) -> list:
    """Read list from txt file."""
    with open(filname, "r") as f:
        return f.read().split("\n")[:-1]


if __name__ == "__main__":
    items = []
    create_folder_if_not_exist("images")
    urls = read_list_from_txt("goods_ids.txt")
    res = []
    params = [{
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"}
        for _ in range(len(urls))]
    rs = [req.get(url, headers=headers) for url, headers in zip(urls, params)]
    for r in tqdm(req.imap(rs, size=32), total=len(urls)):
        res.append(parse_good(r))

    pd = pd.DataFrame(res)
    pd.to_excel(f"res_{current_datetime_to_filename_format()}.xlsx")