# Downloads a bundle of resources, recursively traversing.
# Particularly useful for the Haiku backend, which uses nested resources
# extensively

import sys
import os
import urllib
import subprocess
import random

from . import aggregator
from .backends.support import ResourceCategory, ResourceRawHTML, Quiz, Forum, FileUpload, ResourcePlain, ResourceLink

from lxml.html import parse
from io import StringIO

# Authenticate with the backend
backend = aggregator.load_backend(sys.argv[1])

config = {
    "Email": sys.argv[2],
    "Username": sys.argv[2],
    "Password": sys.argv[3],
    "School": sys.argv[4]
}

aggregator.authenticate(backend, config)

# Find course
courses = backend.courses
course = None

for c in courses:
    if c.title == sys.argv[5]:
        course = c

if course is None:
    print("Course not found -- maybe you were signed out?")
    sys.exit(1)

# XXX: Code from palette should be refactored XXX
resource_cache = {}

def path_serialise(path):
    return "/".join(map(str, path))

def find_resource(root, path, ref):
    if len(path) == 0:
        return root

    if len(ref) == 0:
        try:
            almost = resource_cache[path_serialise(path[:-1])]
            return find_resource(almost, [path[-1]], path[:-1])
        except KeyError:
            pass

    if isinstance(root, ResourceCategory):
        for child in root.children:
            if child.name == path[0]:
                rsrc = find_resource(child, path[1:], ref + [path[0]])
                resource_cache[path_serialise(ref + path)] = rsrc
                return rsrc

        print("Can't find " + path[0])
        sys.exit(1)

    print("Wrong rsrc type with " + ",".join(path))


rsrcs = ResourceCategory("Root", course.resources)
path = sys.argv[6].split("/")
print(path)
root = find_resource(rsrcs, path[1:], [])

# XXX: Code from palette over

# Special mode where only URLs that we transform (like Google Docs) get
# downloaded, rather than arbitrary external links. Probably safer for
# bandwidth.

DOWNLOAD_TRANSFORMED_ONLY = True

# Recursively download resource to path BASE

def write_to(path, content, binary=False):
    with open(path, "w" + ("b" if binary else "")) as f:
        f.write(content)

def download_external_url(_url, path, name):
    url = aggregator.transform(_url)
    name = name.split("/")[0]

    if DOWNLOAD_TRANSFORMED_ONLY:
        if url == _url:
            return

    parts = urllib.parse.urlparse(url)
    domain = parts.netloc.split("www.")[-1]

    if domain == "youtube.com":
        # Download it with youtube-dl
        subprocess.call(["youtube-dl", "-o", path + "/%(title)s.%(ext)s", url])
    else:
        # Otherwise, just wget _in the context of the backend_.
        # By reusing the backend's session, we access to authenticated resources.

        u = backend.session.get(url)
        write_to(path + "/" + name, u.content, binary=True)

COUNT = 1

def download_resource(rsrc, base, download_external=True):
    global COUNT

    nom = rsrc.name
    nom = nom.replace('/', ' slash ') # Escape

    path = base + "/" + nom

    if isinstance(rsrc, ResourceCategory):
        # Categories are dumped recursively but correspond to filesystem folder

        # Create corresponding folder
        try:
            os.mkdir(path)
        except FileExistsError:
            path = path + str(COUNT)

            os.mkdir(path)
            COUNT = COUNT + 1

        # Recurse down
        children = rsrc.children

        for r in children:
            download_resource(r, path)
    elif isinstance(rsrc, ResourceRawHTML):
        urls = []
        names = {}
        html = rsrc.html

        # Download linked content, but not too deep
        if download_external:
            tree = parse(StringIO(html)).getroot()

            for link in tree.cssselect("a"):
                if "href" in link.attrib:
                    urls += [link.attrib["href"]]

                    name = link.text_content() if (link.text_content() is not None and len(link.text_content()) > 0) else str(random.randint(100000, 999999))
                    names[link.attrib["href"]] = name

        if len(urls) > 0:
            # If there are attachments, make a folder for the
            os.mkdir(path)
            write_to(path + "/index.html", html)
        else:
            # Otherwise, make it a bare file
            write_to(path + ".html", html)

        for url in urls:
            download_external_url(url, path, names[url])
    elif isinstance(rsrc, ResourcePlain):
        write_to(path + ".txt", rsrc.text)
    elif isinstance(rsrc, ResourceLink):
        # Download the contents only if permitted
        # Otherwise, just download the URL and let the user deal with it

        if download_external:
            url = rsrc.url
            download_external_url(url, base, rsrc.name)
        else:
            write_to(path, rsrc.url)
    elif isinstance(rsrc, Quiz) or isinstance(rsrc, Forum) or isinstance(rsrc, FileUpload):
        write_to(path + ".txt", "Psst! There's an online " + type(rsrc).__name__ + " waiting. You should check it out!")
    else:
        write_to(path, "Unknown element of type " + type(rsrc).__name__)

download_resource(root, "/dev/shm")
