from time import sleep
from os import environ, system
from shutil import copyfileobj

from requests import get
from bs4 import BeautifulSoup

from .funcs import countPercent, loadImage, notifySend, checkPathResources


class ParseRanobe():
    """ Getting data about ranobe. """
    def __init__(self):
        super(ParseRanobe, self).__init__()
        self.percent = 0
        self.dead = False
        self.path = f'{environ["HOME"]}/.cache/anima'

    def __parseRulate(self, soup):
        """ Parsing of site 'https://tl.rulate.ru'. """
        name = soup.find('h1').text.split(' / ')[1]
        description_start = soup.find('div', id="Info")
        desi = description_start.find_all('div')[2]
        img = f"https://tl.rulate.ru{desi.find('img')['src']}"
        ran = 12

        while True:
            dess = description_start.find_all('div')[ran]
            descriptions, ran = dess.find_all('p'), ran + 1
            if len(descriptions) == 0 or descriptions is None: continue
            description = []
            for i in descriptions:
                if len(i.text) > 1:
                    description.append(i.text)
            break

        description = "\n".join(description)
        table = soup.find('table', 
            class_="table table-condensed table-striped").find_all('tr')
        toms = soup.find_all('tr', class_="volume_helper")
        tom, set_class, chapters = ([], [], [])
        tr = [i.get('class') for i in table]

        if len(toms) == 0:
            tom = 1
            for i in tr:
                if isinstance(i, list) and 'chapter_row' in i:
                    if " ".join(i) in set_class: continue
                    else: set_class.append(" ".join(i))
        elif len(toms) > 1:
            for i in tr:
                if isinstance(i, list) and len(i) > 1:
                    if " ".join(i) in set_class: continue
                    else: set_class.append(" ".join(i))
            for i in toms:
                if i is not None and len(i.text) > 1:
                    tom.append(i.text.split()[1])

        for i in enumerate(set_class):
            chapter = len(soup.find_all('tr', class_=i[1]))
            if isinstance(tom, int):
                chapters.append(f'{tom}.{chapter}')
            else:
                chapters.append(f'{tom[i[0]]}.{chapter}')
        all = chapter = chapters[-1]

        return name, img, description, chapter, all

    def __parseHub(self, soup):
        """ Parsing of site 'https://ranobehub.org'. """
        name = soup.find('h1', class_="ui huge header").text
        img = soup.find('img', class_="image")['data-src']
        description = soup.find('div',
                                class_="book-description__text").text
        chapters = soup.find('div', class_="book-meta-value book-stats")
        chapter = chapters.find('strong').text

        return name, img, description, chapter, all

    def __parseRu(self, soup):
        """ Parsing of site 'https://ruranobe.ru'. """
        name = soup.find('span', class_="headline__text").text
        url_p = 'https://ruranobe.ru/'
        img = "{0}{1}".format(url_p,
                soup.find_all("img",class_="detail__image")[0]["src"])
        description = soup.find('div', class_="read-more").text
        chapters = soup.find('div', class_="detail__actions")
        chapter = chapters.find('a').text
        chapters = soup.find_all('a', class_="list__item")
        for i in chapters:
            tom = i.find('span',
                class_="list__item-number").text.split()[1].split(":")[0]
            desc = i.find('span', class_="list__name").text
            if chapter == desc[1:-1:]: break

        return name, img, description, chapter, all

    # TODO: check validation data
    def __parseRf(self, soup):
        """ Parsing of site 'https://ранобэ.рф'. """
        n_head = 'cursor-default md:cursor-pointer font-bold text-2xl '
        n_body = 'md:text-3xl sm:leading-7 lg:leading-10 xl:leading-9 pt-1'
        n_footer = ' text-black-0 dark:text-grayNormal-200 truncate'
        name = soup.find('h1', class_=f"{n_head}{n_body}{n_footer}").text
        description = soup.find('div', class_="BookPage_desc__2rsZC").text
        img = soup.find('img',
            class_="xs:rounded-md md:w-[180px] lg:w-[220px]")['src']
        c_head = 'text-black-0 dark:text-grayNormal-200 '
        c_body = 'hover:text-primary cursor-default md:cursor-pointer'
        c_footer = ' dark:hover:text-primary truncate text-sm md:text-base'
        chapters = soup.find_all('a', class_=f"{c_head}{c_body}{c_footer}")

        all = [i for i in chapters[0].text if i.isnumeric()]

        return name, img, description, "".join(all), "".join(all)

    def __iteration(self, link):
        """ Getting data from web scraping. """
        soup = BeautifulSoup(get(link).text, "html.parser")

        return self.__parseRulate(soup) if 'tl.rulate.ru' in link else \
               self.__parseRf(soup) if 'xn--80ac9aeh6f.xn-' in link else \
               self.__parseHub(soup) if 'ranobehub.org' in link else \
               self.__parseRu(soup)

    def default(self):
        self.percent, self.dead = 0, False

    def getPercent(self):
        return self.percent

    def breaking(self):
        self.dead = True

    def run(self, data, check):
        """ Check or update data of ranobe. """
        msg, log = [], ''
        self.default()
        for i, v in enumerate(data['ranobe']['urls']):
            try:
                self.percent = countPercent(i, len(data['ranobe']['urls']))
                if self.dead: break
                if data['ranobe']['names'][i] and \
                   'description' in data['ranobe']['images'][i] and \
                   data['ranobe']['description'][i] and check or \
                   data['ranobe']['ended'][i] == 'end':
                    sleep(.1 if len(data['ranobe']['urls']) // 60 > 0 else .4)
                    continue
                name, img, description, chapter, all = self.__iteration(v)
                image = f'{"_".join(name.split())}_{img.split("/")[-1]}'

                note, loger, single = notifySend(data['notify']['ranobe'],
                                        data['ranobe']['chapters'][i],
                                        float(chapter), name, "R")
                log += f'{name} = {data["ranobe"]["chapters"][i]}\n' \
                            if loger is None else f'{loger}\n'
                msg.append(note) if note is not None else None

                if check:
                    checkPathResources()
                    r, imgs = get(img, stream=True), None
                    if r.status_code == 200:
                        with open(f'{self.path}/description/raw/{image}',
                            'wb') as f:
                            r.raw.decode_content = True
                            copyfileobj(r.raw, f)
                        imgs = loadImage(image)
                    element = (imgs or f'icons/ranobe.png', name, description)
                    for j, c in enumerate(('images', 'names', 'description')):
                        data['ranobe'][c][i] = element[j]

                for k, o in enumerate(('access-chapters', 'future-chapters')):
                    data['ranobe'][o][i] = (chapter, all)[k]
            except Exception as e:
                data['ranobe']['log'] = f'Error ==>\n{e}' 
                system(f'notify-send "Error for get data about <ranobe>\n{e}"')
        data['ranobe']['log'] = log
        data['notify']['ranobe'] = single + msg

        return data, (len(msg), 2)
