import sys
import subprocess
import json
import re

import youtube_dl
ydl_opts={"ignoreerrors" : True,
          "extract_flat" : True}

# error flags:
# 000  ie. 0   : no errors
# xx1  ie. 1,3 : TubeUp error or uncategorised youtube-dl/video error
# x1x  ie. 2,3 : could not normalise video ID

exit_code=0

max_playlist=100

links={}
videoIDs={}

error={}        # for links which failed due to TubeUp error
unavailable={}  # for links which were otherwise unavailable
geoblock={}     # for links which failed due to geo-blocking
already={}      # for links which have already been archived
ignored={}      # for links which did not attempt to archive
success={}      # for links which were successfully archived

# load in list of URLs to archive
try:
    with open("links.json") as linksFile:
        links = json.load(linksFile)
except:
    print("ERROR: Unexpected error opening 'links.json' for read:", sys.exc_info()[0])
    raise

# read master list of previously downloaded links to prevent redundant downloads
try:
    with open("lists/masterList", "r+") as masterFileR:
        doneList = masterFileR.read()
except:
    print("ERROR: Unexpected error opening 'masterList' for read:", sys.exc_info()[0])
    raise

with youtube_dl.YoutubeDL(ydl_opts) as ydl:
    # iterate through the target url and source page pairs
    for url, page in links.items():
        # standardise page links
        page = re.sub("^http://", "https://", page) # change http:// to https://
        page = re.sub("/Talk:", "/", page) # remove 'Talk:' page prefix 
        
        # extract videos from playlists
        # TODO test with channels and playlists on ALL services
        
        # if the url is a YouTube search, ignore
        if "youtube.com/results" in url:
            ignored[url] = " : REASON is a search url. (" + page + ")"
            continue
        # if the url is a YouTube URL likely to be problematic, ignore
        # TODO overreach is possible! valid channel names may be included in this branch
        if "..." in url or "youtube.com/#" in url or "youtube.com/edit" in url or "youtube.com/embed" in url or "youtube.com/index" in url or "youtube.com/profile" in url or "youtube.com/redirect" in url or "youtube.com/subscription_center" in url or "youtube.com/view_play_list" in url:
            ignored[url] = " : REASON is an unhandled YouTube url. (" + page + ")"
            continue
            
        
        # if YouTube username is given instead of channel ID, get the channel ID
        # this is needed because youtube-dl currently cannot find the number of videos
        #     a channel has from the channel name.
        if "youtube.com" in url and not "/watch?" in url and not "/playlist?" in url:
            print(url)
            wget = subprocess.run(["wget", "-O-", url], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
            page = page + " (appeared as " + url + ")"
            cid = re.search('<link rel="canonical" href="([^"]*)', wget.stdout.decode("utf-8"))
            if cid:
                url = cid.group(1)
            else:
                error[url] = "ERROR: could not extract cid (" + page + ")"
                continue
            
        # get information for this URL
        info_dict = ydl.extract_info(url, download=False)
        
        if (info_dict):
            # if channel ID or similar, replace with the playlist of all the channel's videos
            if "extractor" in info_dict and "youtube:channel" in info_dict["extractor"]:
                # get the channel playlist info instead of the channel info
                info_dict = ydl.extract_info(info_dict['url'], download=False)
            # add video/s into videoIDs dictionary
            if "_type" in info_dict and info_dict['_type'] == 'playlist':
                # ignore playlist if it has too many videos
                if (len(info_dict['entries']) > max_playlist):
                    print("ignoring " + url + ": contained " + str(len(info_dict['entries'])) + " videos.")
                    ignored[url] = " : REASON contained " + str(len(info_dict['entries'])) + " videos. (" + page + ")"
                    continue
                # iterate through the playlist and add all video ids to videoIDs
                for video in info_dict['entries']:
                    if video:
                        #TODO confirm assumption that source url will never contain space character
                        if "(appeared as " in page:
                            videoIDs[video["url"]] = page
                        else:
                            videoIDs[video["url"]] = page + " (appeared as " + url + ")"
                    else:
                        print("ERROR: invalid video in playlist " + url + ". Dumping...")
                        print(info_dict)
            else:
                # add video id to videoIDs
                videoIDs[url] = page
        else:
            # link is erroneous, but continue into next loop anyway to allow
            # proper caching and error reporting
            videoIDs[url] = page

# iterate through the target video id and source page pairs
for vid, page in videoIDs.items():
    # normalise format of IDs
    # TODO check Dailymotion and Vimeo, especially channels/playlists
    vid = re.sub(".*list=([^=&?#]+).*", r"\1", vid)       # isolate playlist ID
    vid = re.sub(".*v=([^=&?#]+).*", r"\1", vid)          # isolate video ID
    vid = re.sub(".*youtu\.be/([^=&?#]+).*", r"\1", vid)  # isolate video ID
    vid = re.sub(".*dai.ly/", "dailymotion/video/", vid)  # revert shortened url domain
    vid = re.sub("^https?://", "", vid)                   # remove protocol
    vid = re.sub("^(www|m)\.", "", vid)                   # remove some third-level domains
    # check to find cases that aren't normalised
    if (re.search("[=&?#]", vid)):
        print("WARNING: got non-normalised ID: " + vid)
        # signal non-fatal issue
        exit_code+=2
    
    # check if video id has not already been archived sucessfully
    if vid not in doneList:
        #TODO swap out collection for a custom one
        tubeupCommand = ["tubeup", "--metadata=collection:opensource_movies", "--source", page]

        # TODO remove snake_case
        print(vid + " not in list!")
        # check if this video already exists on archive.org
        already_on_IA = False
        # youtube videos
        if len(vid) == 11:
            check_url = "https://archive.org/details/youtube-" + vid
            print("checking " + check_url)
            wget = subprocess.run(["wget", "-O-", check_url], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
            print(wget.returncode)
            if (wget.returncode == 0):
                out = wget.stdout.decode("utf-8")
                if re.search('Mandy, Defeater', out):
                    print("I've uploaded this! Not skipping, to fix errors and add to SuccessList")
                else:
                    already_on_IA = True
        # dailymotion
        elif "dailymotion.com/video/" in vid:
            vid_ID = vid.split('/')[2]
            check_url = "https://archive.org/details/dailymotion-" + vid_ID
            print("checking " + check_url)
            wget = subprocess.run(["wget", "-O-", check_url], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
            print(wget.returncode)
            if (wget.returncode == 0):
                out = wget.stdout.decode("utf-8")
                if re.search('Mandy, Defeater', out):
                    print("I've uploaded this! Not skipping, to fix errors and add to SuccessList")
                else:
                    already_on_IA = True
        # vimeo
        elif "vimeo.com" in vid:
            vid_ID = vid.split('/')[1]
            if vid_ID.isdecimal():
                check_url = "https://archive.org/details/vimeo-" + vid_ID
                print("checking " + check_url)
                wget = subprocess.run(["wget", "-O-", check_url], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
                print(wget.returncode)
                if (wget.returncode == 0):
                    out = wget.stdout.decode("utf-8")
                    if re.search('Mandy, Defeater', out):
                        print("I've uploaded this! Not skipping, to fix errors and add to SuccessList")
                    else:
                        already_on_IA = True
            else:
                print("ERROR: could not extract vimeo video ID: " + vid_ID)            
        else:
            print("not a known video ID, skipping archive.org check")
        if already_on_IA:
            print(vid + " is already on archive.org")
            already[vid] = page
            continue
        # if the video ID begins with a hyphen, it will be interpretted as an option causing an error
        # prevent this by adding the youtube URL to the ID
        if vid[0] == "-":
            tubeupCommand.append("https://youtube.com/watch?v=" + vid)        
        else:
            tubeupCommand.append(vid)
        # archive using modified tubeup
        proc = subprocess.Popen(tubeupCommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
        
        out = ""
        #TODO refactor this? / TODO might not capture every single line if multiple
        #   are printed fast enough / TODO: separate stdout and stderr (requires changing above line)
        while True:
            nextline = proc.stdout.readline().decode("utf-8")
            if nextline == "" and proc.poll() is not None:
                break
            sys.stdout.write(nextline)
            sys.stdout.flush()
            out += nextline
        
        # if tubeup was unsuccessful, report error and continue
        if (proc.returncode != 0):
            errorMatches = re.search('(ERROR: .*)|(error.*)', out)
            if (errorMatches):
                errorMsg = errorMatches.group(0)
                #TODO find all possible error messages!
                #TODO categorise errors TODO check different output for different sites?
                
                # as of 2018, these are the discovered YouTube unavailable messages
                #
                # ERROR: This video is unavailable.
                # ERROR: This video has been removed by the user
                # ERROR: The uploader has not made this video available in your country.
                # ERROR: This video contains content from [company], who has blocked it in your country on copyright grounds.
                # ERROR: "[video title] ..." The YouTube account associated with this video has been terminated due to multiple third-party notifications of copyright infringement.
                
                # known Vimeo messages
                # ERROR: Unable to download webpage: HTTP Error 404: Not Found (caused by <HTTPError 404: 'Not Found'>); please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see  https://yt-dl.org/update  on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
                
                # known Dailymotion messages
                
                # detect archive.org Access Denied error and ignore, as it implies the file is already archived
                if "Access Denied - You lack sufficient privileges to write to this item" in errorMsg:
                     pass
                # detect geoblock
                elif "in your country" in errorMsg:
                    geoblock[vid] = " : " + errorMsg + " (" + page + ")"
                # detect YouTube unavailable
                elif "unavailable" in errorMsg:
                    unavailable[vid] = " : " + errorMsg + " (" + page + ")"
                elif "removed" in errorMsg:
                    unavailable[vid] = " : " + errorMsg + " (" + page + ")"
                elif "terminated" in errorMsg:
                    unavailable[vid] = " : " + errorMsg + " (" + page + ")"
                elif "404: Not Found" in errorMsg:
                    unavailable[vid] = " : " + errorMsg + " (" + page + ")"
                # otherwise categorise as generic error
                else:
                    error[vid] = " : " + errorMsg + " (" + page + ")"
            else:
                error[vid] = " : [non-zero exit status from tubeup, no message detected] (" + page + ")"
        # otherwise report success
        else:
            # TODO better way to store the "appeared as" data, maybe own dictionary?
            archived_from = page.split(" ", 1)[0]
            appeared_as = ""
            if (archived_from != page):
                appeared_as = page.split(" ", 1)[1] + " "
            
            archivedLinkMatch = re.search("https://archive.org/details/.*", out)
            archivedLink = archivedLinkMatch.group(0)
            success[vid] = " : " + archivedLink + " " + appeared_as
    else:
        print("already downloaded " + vid)
        already[vid] = page

# open list of previously downloaded links for appending new links
try:
    with open("lists/errorList", "a") as errorFileA:
        for key, value in error.items():
            print(key + value, file=errorFileA)
except:
    print("ERROR: Unexpected error opening 'errorList' for append:", sys.exc_info()[0])
    raise

# open list of unavailable links for appending new links
try:
    with open("lists/unavailableList", "a") as unavailableFileA:
        for key, value in unavailable.items():
            print(key + value, file=unavailableFileA)
except:
    print("ERROR: Unexpected error opening 'unavailableList' for append:", sys.exc_info()[0])
    raise

# open list of geo-blocked links for appending new links
try:
    with open("lists/geoblockList", "a") as geoblockFileA:
        for key, value in geoblock.items():
            print(key + value, file=geoblockFileA)
except:
    print("ERROR: Unexpected error opening 'geoblockList' for append:", sys.exc_info()[0])
    raise

    
# open list of successfully downloaded links for appending new links
try:
    with open("lists/successList", "a") as successFileA:
        for key, value in success.items():
            print(key + value, file=successFileA)
except:
    print("ERROR: Unexpected error opening 'successList' for append:", sys.exc_info()[0])
    raise

# open list of ignored links for appending new links
try:
    with open("lists/ignoredList", "a") as ignoredFileA:
        for key, value in ignored.items():
            print(key + value, file=ignoredFileA)
except:
    print("ERROR: Unexpected error opening 'ignoredList' for append:", sys.exc_info()[0])
    raise

# store list of previously downloaded links to prevent redundant downloads
try:
    with open("lists/doneList", "a") as doneFileA:
        # only add successful downloads and links already archived
        for vid in already:
            print(vid, file=doneFileA)
        for vid in success:
            print(vid, file=doneFileA)
        #for vid in error:
        #    print(vid, file=doneFileA)
        #for vid in unavailable:
        #    print(vid, file=doneFileA)
        #for vid in geoblock:
        #    print(vid, file=doneFileA)
except:
    print("ERROR: Unexpected error opening 'doneList' for append:", sys.exc_info()[0])
    raise


# report analytics
totalLinks = str(len(error) + len(unavailable) + len(geoblock) + len(ignored) + len(success) + len(already))
print("")
print("---REPORT---")
print("Error:       " + str(len(error))      + "/" + totalLinks)
print("Unavailable: " + str(len(unavailable))+ "/" + totalLinks)
print("Geo-blocked: " + str(len(geoblock))   + "/" + totalLinks)
print("Ignored:     " + str(len(ignored))    + "/" + totalLinks)
print("Success:     " + str(len(success))    + "/" + totalLinks)
print("Already:     " + str(len(already))    + "/" + totalLinks)
print("")

# signal errors if found
if len(error) > 0:
    exit_code += 1
exit(exit_code)
