print("started parse")
import json
import re
import subprocess
from subprocess import run
import sys
import urllib.request
from bs4 import BeautifulSoup

import parsingHelpers

recent_changes_URL = "https://www.lostmediawiki.com/index.php?title=Special:RecentChanges&days=30&limit=75"
user_agent = "mandy-bot (mandy-bot.neocities.org/intro.html)"

def endCrawl():
    # exit without error
    exit(0)

def getLinksFromRecentChanges():
    # download recent changes page
    proc = subprocess.run(["wget", "-O-", recent_changes_URL], stdout=subprocess.PIPE)
    if (proc.returncode != 0):
        print("ERROR: wget returned " + str(proc.returncode) + " for recent changes.")
        exit(100 + proc.returncode)
    recent_changes = proc.stdout.decode('utf-8')
    print("got url")
    if proc.returncode == 0: # TODO tautology
        # create BeautifulSoup object
        soup = BeautifulSoup(recent_changes, 'html.parser')
        print("got soup")

        # check to see if the last edit has changed since our last run
        first = soup.find("td", class_="mw-changeslist-line-inner")
        if (first is None):
            print("ERROR: could not find changes")
            exit(109)
        global last
        if (str(first) == last):
            # if this exact entry was checked last run, end the run early to 
            #    avoid redundant requests
            print("ending crawl early: already checked " + last[:256] + "...")
            parsingHelpers.dumpJSON(link_pairs)
            endCrawl()
        last = str(first)

        # find and download all article URLs
        titles = soup.find_all("a", class_="mw-changeslist-title") #TODO this may not be all items of interest!!!!!!!!!!!!
        for link in titles:
            article_url = "https://www.lostmediawiki.com" + link.get('href')
            article_req = subprocess.run(["wget", "-O-", article_url], stdout=subprocess.PIPE)
            print("got " + str(link.get('href')))
            if article_req.returncode == 0:
                # parse article for links
                article_text = article_req.stdout.decode('utf-8')
                parsingHelpers.findAndAddLinks(article_url, article_text, link_pairs)
            else:
                print("ERROR: " + str(article_url) + " sent code " + str(article_req.returncode))
            
    else:
        print("ERROR: Recent Changes page sent code " + str(proc.returncode))
        return False
    return True

link_pairs = {}
last = ""
print("call readLast")
# read file listing the last entry processed to prevent redundant checks
last = parsingHelpers.readLast("last_article")
print("if getLinks")
if(getLinksFromRecentChanges()):
    parsingHelpers.writeLast("last_article", last)
    parsingHelpers.dumpJSON(link_pairs)
    endCrawl()
print("fell through")
exit(1)

