import json
import re
import sys

###
### Regex patterns for recognising links
###

# note: youtube-dl does a great job of ID processing and handling '&' and '?' parameters in the URL which sometimes denote the playlist a video is in
#        because of this, no attempt is made to remove or parse them in this script
nonSeparatorCharacter = "[^\s\"'`()[\]{}!.,;<>|~%*\\\\+]"

# the (?:youtu.be)? component is to allow theoretically possible URLs such as: https://www.youtube.com/watch?v=SORO_rZiy9E&feature=youtu.be&list=PL2vA6ImrBrtctt-hj1mtvqypVpEKjqdEg ,
# since '.' is considered a separating character, such a link would otherwise be interupted before the playlist ID, so only the single video would be archived.
youtubeNormalRegex = re.compile("youtube\.com/(?:" + nonSeparatorCharacter + "(?:youtu.be)?)+", re.IGNORECASE)
youtubeShortenedRegex = re.compile("youtu\.be/(?:" + nonSeparatorCharacter + "(?:youtu.be)?)+", re.IGNORECASE)
youtubePlaylistRegex = re.compile("PL[0-9A-Za-z_-]{32}")

# assumption: valid characters for vimeo channels, video and album IDs are a subset of the valid characters for a youtube ID
# basis: video and album IDs are numeric, channel URLs containing the separator characters were all unavailable
vimeoRegex = re.compile("vimeo\.com/" + nonSeparatorCharacter + "+", re.IGNORECASE)

# assumption: valid characters for dailymotion channels, video and album IDs are a subset of the valid characters for a youtube ID
# basis: none
dailymotionNormalRegex = re.compile("dailymotion\.com/" + nonSeparatorCharacter + "+", re.IGNORECASE)
dailymotionShortenedRegex = re.compile("dai\.ly/" + nonSeparatorCharacter + "+", re.IGNORECASE)

# assumption: valid characters for soundcloud channels, video and album IDs are a subset of the valid characters for a youtube ID
# basis: song titles may only contain letters, numbers, hyphens or underscores. assuming same applies to account URLs
soundcloudRegex = re.compile("soundcloud\.com/" + nonSeparatorCharacter + "+", re.IGNORECASE)

def findAndAddLinks(url, content, link_pairs):
    item_links_list = []
        
    # substitute XML predefined entities which may interrupt links
    # quantifier is to fix double-encodings such as "&amp;amp;"
    content = re.sub('&(amp;)+','&',content)
    content = re.sub('&lt;','<',content)
    content = re.sub('&gt;','>',content)
    content = re.sub('&quot;','"',content)
    content = re.sub('&apos;','\'',content)
    content = re.sub('&#039;','\'',content)

    # substitute unicode
    content = re.sub('\\\\n','\n',content)
    content = re.sub('\\\\u0026','&',content)
    content = re.sub('\\\\u003C','<',content)
    content = re.sub('\\\\u003E','>',content)
    content = re.sub('\\\\u0022','"',content)
    content = re.sub('\\\\u0027','\'',content)
    
    # remove all tags which may interrupt links
    # TODO BUG does this commented line remove all hrefs??? horribly destructive!
    #    not much more comuting power to just check both with and without
    #content = re.sub('<[^>]*>','',content)
    content += re.sub('<[^>]*>','',content)
    
    # use compiled regex to catch media links
    
    # TODO detect and fix or add warning when limitations are triggered
    # limitation: malformed urls over multiple lines aren't matched. not fixed as removing newlines can corrupt actual links at the end of a line
    # limitation: non-separated links such as "youtube.com/watch?aaaaaaaaaaayoutube.com/watch?bbbbbbbbbbb" 
    #              will be misinterpreted as "youtube.com/watch?aaaaaaaaaaayoutube" only
    
    item_links_list += re.findall(youtubeNormalRegex, content)          # youtube.com/...
    item_links_list += re.findall(youtubeShortenedRegex, content)       # youtu.be/...
    item_links_list += re.findall(vimeoRegex, content)                  # vimeo.com/...
    item_links_list += re.findall(dailymotionNormalRegex, content)      # dailymotion.com/...    
    item_links_list += re.findall(dailymotionShortenedRegex, content)   # dai.ly/...
    item_links_list += re.findall(soundcloudRegex, content)             # soundcloud.com/...
    item_links_list += re.findall(youtubePlaylistRegex, content)        # PL#######...
    # TODO find potential vIDs, solves above issue.

    for link in item_links_list:
        link_pairs[link] = url

def readLast(filename):
    try:
        with open(filename, "r+") as last_file_r:
            return last_file_r.read()
    except:
        print("ERROR: Unexpected error opening 'last' for read:", sys.exc_info()[0])
        raise

def writeLast(filename, last):
    try:
        with open(filename, "w") as last_file_w:
            last_file_w.write(last)
    except:
        print("ERROR: Unexpected error opening 'last' for write:", sys.exc_info()[0])
        raise

def dumpJSON(link_pairs):
    # dump list of links to a .json file
    try:
        with open("links.json", 'w') as out_file:
            json.dump(link_pairs, out_file)
    except:
        print("ERROR: Unexpected error opening 'links.json':", sys.exc_info()[0])
        raise

def endCrawl(link_pairs):
    # print statistics
    print(str(content_count) + " had content link")
    print(str(upload_count) + " had upload log")
    print(str(ignore_count) + " had ignored log")
    print(str(none_count) + " had no link")

    print(str(len(link_pairs)) + " links recognised.")
    
    if fails > 0:
        print("failure*" + str(fails))
        if fails == 1:
            exit(211)
        if fails < 10:
            exit(212)
        exit(213)
    
    # exit without error
    exit(0)
