# Writes a list of proteins to generate images for.
# Should only need to be run once.
#
# With occasional exceptions, each element in the list has a 
# uniquely identifying 2-tuple consisting of 
# a protein name (actually a HUGO gene symbol) and 
# a four-character PDB ID
#
# This list output by the code below can then be
# fed as input into batch_getPDBImage.py
#
# Author: emw

import urllib.request, re

from optparse import OptionParser

renderingRE_1 = re.compile("rendering based on \w{4}")
renderingRE_2 = re.compile("rendering based on ")
exceptionRE = re.compile("Wikipedia does not have an article with this exact name")
file_linksRE = re.compile("\<h2 id=\"filelinks\">File links\</h2\>")
wiki_href_openRE = re.compile("\<a href=\"/wiki/\w+\"")


# Wikipedia makes it difficult for screen scrapers; setting a mask 'user-agent' bypasses this obstacle
class AppURLopener(urllib.request.FancyURLopener):
	 version = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 (.NET CLR 3.5.30729)'

urllib._urlopener = AppURLopener()

def scrape_pdb_id(protein_abbreviation):
	print(protein_abbreviation.rstrip("\n"))
	try: 
		fp = urllib._urlopener.open("http://en.wikipedia.org/wiki/" + protein_abbreviation)
		mybytes = fp.read()
		wikiString = str(mybytes.decode("utf8"))
		fp.close()
		#return wikiString
		return renderingRE_2.split(renderingRE_1.findall(wikiString)[0])[1]
	except ValueError:
		# No PDB value found, note this in output file
		# Article probably moved to use a different title
		return "   *** No article ***"
	except IndexError:
		try:
			# Tries a common alternative way of naming gene articles,
			# which is typically applied when the HUGO gene symbol is a common acronym.
			fp = urllib._urlopener.open("http://en.wikipedia.org/wiki/" + protein_abbreviation + " (gene)")
			mybytes = fp.read()
			wikiString = str(mybytes.decode("utf8"))
			fp.close()
			# Eventually found PDB ID.  
			# Outputs PDB and HUGO gene identifier (protein_abbreviation) to output file
			# Noting which genes have disambiguated article titles will probably be useful
			# for determining which article to update later.
			return renderingRE_2.split(renderingRE_1.findall(wikiString)[0])[1] + "   *** Diambiguated; title = " + protein_abbreviation + " (gene) ***"
		except ValueError:
			# No PDB value found, note this in output file
			# The standard PBB template which contains a corresponding PDB identifier was
			# not found
			#return "   *** Missing PBB template? (value error) ***"
			fp = urllib._urlopener.open("http://en.wikipedia.org/wiki/File:PBB_Protein_" + protein_abbreviation + "_image.jpg")
			mybytes = fp.read()
			wikiString = str(mybytes.decode("utf8"))
			fp.close()
			file_links = wiki_href_openRE.findall(file_linksRE.split(wikiString)[1])
			for link in file_links:
				wiki_link = link.lstrip("\<a href=\"/wiki/").rstrip("\"")
				if not (wiki_link.startswith("User:") and wiki_link.startswith("Template:") and wiki_link.startswith("Main_Page")):
					return wiki_link
		except IndexError:
			# No PDB value found, note this in output file
			# The standard PBB template which contains a corresponding PDB identifier was
			# not found
			return "   *** Missing PBB template? (index error) ***"


output = ""

'''
with open("value_error_proteins.txt", "r") as input_file:
		for line in input_file:
			#print(scrape_pdb_id(line))
			output = output + line.rstrip("\n") + ", " + scrape_pdb_id(line.rstrip("\n")) + "\n"
open("value_error_protein_and_pdb_list.txt", "w").write(output)
'''


print("scrape_pdb_id(MYL6B): " + scrape_pdb_id("MMP9"))