#!/usr/bin/python
import sys,urllib
from time import sleep

def usage():
    print "%s <volume #> <issue #>"

#Given a URL (starts with http://), returns the html code for that page
def grabPage(url):
    page = urllib.urlopen(url)
    pagedata = page.readlines()
    page.close()
    return pagedata

#Given a list of urls, go through them and grab their html with some sleep time between commands
sleepytime=0
def grabPages(urls):
#debug code
#    return [grabPage(urls[0])]
#real code    
    return [grabPage(url) for url in urls if not(sleep(sleepytime))]

#Given a PRL issue page's html, find all the relevant Letter URLS
def parsePrlIssue(issue_html):
    urlbase="http://prl.aps.org"
    #find the "Letters" section
    for i,line in enumerate(issue_html):
        if "<h2>LETTERS" in line:
            start=i
            break
    #find the end of the letters section (next h2-header)
    for i,line in enumerate(issue_html[start+1:]):
        if "<h2>" in line:
            end=i
            break

    #grep for abstract inside the letters section
    return [urlbase+line.split("\"")[1] for line in issue_html[start:end] if "abstract" in line]

#Given a PRL entry page html, generate a file and dump it in the paperDB folder
#File name follows description in PaperScraper Doc
def parsePrlEntry(page_html):
    title=""

    authors=list()
    institutes=list()
    auth_inst=list() #tells you which author belongs to which institute

    date="" #this is ill defined thus far, is it the recieved date or the published date, can we assume we'll get either of these and in what format?

    abstract=""

    url="" #PRL has a static url assigned to each entry, use this instead of the table of contents url
    doi=""

    #Get Title
    for i,line in enumerate(page_html):
        if "<h1>" in line:
            trim=i
            title=line.split("<h1>")[1].split("</h1>")[0]
    #Trim
    page_html=page_html[trim:]

    #Get Authors and Institutes
    for i,line in enumerate(page_html):
        if "id=\'aps-authors\'" in line:
            start=i
            break
    for i,line in enumerate(page_html[start:]):
        if "</div>" in line:
            end=i+start
            break
    cnt=0
    for line in page_html[start:end]:
        if "href=" in line:
            new_auth=[obf.split(">")[1].split("<")[0] for obf in line.split("href=")[1:]]
            authors+=new_auth
            auth_inst+= [cnt]*len(new_auth)
        if "span " in line:
            cnt+=1
            institutes.append(line.split("span")[1].split(">")[1].split("<")[0])
    #Trim
    page_html=page_html[end:]

    #Get Date
    for i,line in enumerate(page_html):
        if "aps-marker-logos" in line:
            trim=i
            break
    date=page_html[trim+1]

    #Get Abstract
    for i,line in enumerate(page_html):
        if "aps-abstractbox" in line:
            abstract=line.split("<p>")[1].split("<\p>")[0]
            trim=i
            break
    if len(abstract.strip())<3:
        abstract="No Abstract"
    #Trim
    page_html=page_html[trim:]

    #Get URL,DOI
    for i,line in enumerate(page_html):
        if "aps-article-info" in line:
            trim=i
            break
    url=page_html[trim+3].split("table-cell\'>")[1].split("</div>")[0]
    doi=page_html[trim+7].split("table-cell\'>")[1].split("</div>")[0]

    print title
    print authors
    print institutes
    print auth_inst
    print date
    print abstract
    print url
    print doi
    print "="*50
    
####################
# MAIN STARTS HERE #
####################

#Database where the scraped data will be written to
paperDB="./paperDB"

if len(sys.argv)!=3:
    usage()
    exit()
volume=int(sys.argv[1])
issue=int(sys.argv[2])

#Grab the issue page
issue_url = "http://prl.aps.org/toc/PRL/v%d/i%d"%(volume,issue)
issue_html = grabPage(issue_url)

#parse the issue html, start scraping papers
papers_url = parsePrlIssue(issue_html)
papers_html = grabPages(papers_url)

#parse each paper-html and generate the file    

[parsePrlEntry(i) for i in papers_html]
