import json
import re
import subprocess
from subprocess import run
import sys
import urllib.request
from bs4 import BeautifulSoup

import parsingHelpers

recent_comments_URL = "https://lostmediawiki.com/index.php?title=Special:Log&limit=500&type=commentstreams"
user_agent = "mandy-bot (mandy-bot.neocities.org/intro.html)"

def endCrawl():
    # exit without error
    exit(0)

def getLinksFromRecentComments():
    # download recent comments page
    req = subprocess.run(["wget", "-O-", recent_comments_URL], stdout=subprocess.PIPE)
    print("got url")
    if req.returncode == 0:
        # create BeautifulSoup object
        soup = BeautifulSoup(req.stdout.decode('utf-8'), 'html.parser')
        print("got soup")

        # check to see if the last comment has changed since our last run
        first = soup.find("li", class_="mw-logline-commentstreams")
        if (first is None):
            print("ERROR: could not find changes")
            exit(109)
        global last
        if (str(first) == last):
            # if this exact entry was checked last run, end the run early to 
            #    avoid redundant requests
            print("ending crawl early: already checked " + last[:256] + "...")
            parsingHelpers.dumpJSON(link_pairs)
            endCrawl()
        last = str(first)

        # find and download all comment URLs
        comments = soup.find_all("li", class_="mw-logline-commentstreams")
        for links in comments:
            link = links.find('a', class_=None)
            if (link is None):
                continue
            comment_url = "https://www.lostmediawiki.com" + link.get('href')
            comment_req = subprocess.run(["wget", "-O-", comment_url], stdout=subprocess.PIPE)
            comment = comment_req.stdout.decode("utf-8")
            print("got " + str(link.get('href')))
            if comment_req.returncode == 0:
                comment_soup = BeautifulSoup(comment, 'html.parser')
                wiki_page_link = comment_soup.find(id="contentSub").a
                # parse comment for links
                if (wiki_page_link is not None):
                    parsingHelpers.findAndAddLinks(wiki_page_link.get('href'), comment, link_pairs)
                else:
                    parsingHelpers.findAndAddLinks(comment_url, comment, link_pairs)
                
            else:
                print("ERROR: " + str(comment_url) + " sent code " + str(comment_req.returncode))
            
    else:
        print("ERROR: Recent comments page sent code " + str(req.returncode))
        return False
    return True

link_pairs = {}
last = ""

print("about to read last")

# read file listing the last entry processed to prevent redundant checks
last = parsingHelpers.readLast("last_comment")

print("read last")

if(getLinksFromRecentComments()):	
    parsingHelpers.writeLast("last_comment", last)
    parsingHelpers.dumpJSON(link_pairs)
    endCrawl()
else:
    exit(1)
