import json
from urllib.parse import urlencode
import requests as rq
from bs4 import BeautifulSoup as bs
import lxml
import os
from utils import *

url = 'https://getyarn.io/yarn-find'
down_url = 'https://y.yarn.co'


def encode(text, page):
    data = {
        'text': text,
        # 'decades':'2020',
        'genres':'Animation',
        # 'type':'movies'
    }
    if page > 0:
        data['p'] = page
    return urlencode(data)


def make_url(text, page):
    return f'{url}?{encode(text, page)}'


def get_down_url(link):
    s = link.split('/')
    return s[len(s) - 1]


def download_file(link, phrase):
    target = concat_phrase(phrase)
    try:
        if not os.path.exists(target):
            # The directory does not exist, so create it
            os.makedirs(target)
        local_filename = os.path.join(target, link.split('/')[-1])
        with rq.get(link, stream=True) as r:
            r.raise_for_status()
            with open(local_filename, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    f.write(chunk)
        # confirm download is complete
        return local_filename
    except rq.exceptions.HTTPError as e:
        # HTTP error occurred
        print(f"HTTP error: {e}")
    except rq.exceptions.ConnectionError as e:
        # A Connection error occurred
        print(f"Connection error: {e}")
    except rq.exceptions.Timeout as e:
        # The request timed out
        print(f"Timeout error: {e}")
    except rq.exceptions.RequestException as e:
        # There was an ambiguous exception that occurred while handling your request.
        print(f"Error downloading file: {e}")
    except Exception as e:
        # Handle any other exceptions
        print(f"An error occurred: {e}")
    return None


def find_videos(keyword, page):
    res = rq.get(make_url(keyword, page))
    soup = bs(res.content, 'lxml')
    all_a = soup.find_all('div', 'clip')
    dataset = []
    for v in all_a:
        two_a = v.find_all('a')
        if len(two_a) > 1:
            second_a = two_a[1]
            title_a = v.find('a')
            title = title_a.find('div', class_='title').text
            transcript = second_a.text
            href = second_a['href']
            download_url = down_url + '/' + get_down_url(href) + '.mp4'
            dataset.append({
                'title': title.strip(),
                'transcript': transcript.strip(),
                'link': download_url
            })
    return dataset


def download_files(dataset, keyword):
    ok_datasets = []
    for d in dataset:
        try:
            download_file(d['link'], keyword)
            print(f'{d["title"]} downloaded')
            ok_datasets.append(d)
        except rq.exceptions.HTTPError as e:
            print(f'error downloading {d["title"]}')
            print(e)
        except Exception as e:
            print(f'error downloading {d["title"]}')
            print(e)
    return ok_datasets


def download_npages(keyword, n):
    all_meta = []
    folder_name = concat_phrase(keyword)
    for i in range(n):
        print(f'downloading page: {i}')
        dd = find_videos(keyword, i)
        meta = download_files(dd, keyword)
        print(f'page {i} downloaded')
        all_meta.extend(meta)
    with open(f'{folder_name}/{keyword}.json', 'w') as f:
        f.write(json.dumps(all_meta, indent=4))
        print(f'all done, {len(all_meta)} files downloaded')


download_npages(get_phrase(), 1)
