#!C:\Python26


import os.path
from pluginManage import Plugin


from bs4 import BeautifulSoup
#import html5lib

#import sys
import time
import re
import urlparse
#import os.path
import cPickle

import urllib
import urllib2

import traceback
#from pluginManage import Plugin


import logmanager
import logging


class getDMoe(Plugin):

	capabilities			=	"wScraperDisabled"								# Used for detecting ifany random .py file is a plugin file
	version				=	1.4									# Used for detecting available configuration options
	pluginName			=	"dMoeScrape"								# Plugin Name variable
	siteName			=	"Doujin-Moe.us Scraper"							# Value for populating "Art Site" dropbox
	nameSource			=	["All Archives"]							# namesource value passed to plugin

	overwriteModes			=	["Check Files", "Check Files with Memory", "Overwrite Files"]		# Determines if overwrite mode dropbox is enabled for plugin
	retrievalHandles		=	1									# How may http retrieval handles app wants

	DLFolder			=	"Doujin-Moe"
	logFileName			=	"LogFile.txt"
	pickleFileName			=	"Pickle.dat"

	cookieCapable = 1

	aNameLabeltxt			=	1
	aListLabeltxt			=	1
	nameSourceEN			=	0									# Determines if Name Source input box is enabled (Boolean, 0-1)
	artistNameEN			=	0									# Determines if Artist Name input box is enabled (Boolean, 0-1)
	artistListEN			=	0

	instructionText			=	"To Come"
	genericlog = logging.getLogger("Main.Plugin")

	testing				= False			# Used to disable progress bar functions when testing

	urlRoot				= None

	debug				= False

	def checkCookie(self, cookiejar):
		userID = re.search("<Cookie doujinuser=[\w]*? for .*?\.members\.doujin-moe\.us/members>", "%s" % cookiejar)

		if userID:
			return "Have DMoe Cookie:\n	%s" % (userID.group(0))

		return "Do not have DMoe Cookie"



	def getCookie(self, userName, password, opener):
		try:

			# First we clear the old cookie(if any)
			#Doujin-moe cookies have an expire date of ~30 years, so we have to purge them manually



			for handler in opener.handlers:
				if hasattr(handler, "cookiejar"):
					for cookie in handler.cookiejar:
						if cookie.domain.find("doujin-moe.us") + 1:
							handler.cookiejar.clear(cookie.domain)


			browserHeaders = {	'User-Agent'		:	'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
						'Accept-Language'	:	'en-us,en;q=0.5',
						'Accept'		:	'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
						'Accept-Encoding'	:	'gzip, deflate',
						'Accept-Charset'	:	'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
						'Referer'		:	'http://www.doujin-moe.us/login.html'
					}

			logondict = {"uname" : userName, "pword" : password, "savepassword" : "on", "Submit1" : ""}
			pgreq = urllib2.Request('http://members.doujin-moe.us/cgi-bin/sblogin/login.cgi', headers=browserHeaders)


			params = urllib.urlencode(logondict)
			page = opener.open(pgreq, params)
			pagetext = page.read()
			page.close()

			if re.search("login Failed", pagetext):
				return "Login Failed"
			else:
				return "Logged In"
		except:
			print "Error"
			traceback.print_exc()
			return "Login Failed"


	def getGalleryLst(self):

		artlinks = []								# Declare array of links

		nextUrl = 'http://' + self.subdomain + '.members.doujin-moe.us/members/index.php?page=galleries&report=best_galleries&dir='
		#nextUrl = 'http://'+self.subdomain+'.members.doujin-moe.us/members/index.php?page=galleries&dir=&report=best_galleries&startdir=12300'
		self.urlRoot = nextUrl.rsplit("/", 1)[0] + "/"
		print self.urlRoot

		#nextUrl = False

		while nextUrl:

			self.log.info("Getting = " + nextUrl)
			mpgctnt = self.getHandle.getpage(nextUrl)							# Request Image

			if mpgctnt == "Failed":
					self.log.error("Cannot get Page")
					return "Failed"



			nextUrl = ""

			soup = BeautifulSoup(''.join(mpgctnt), "html5lib")

			# //a/img[@class='dirthumbnail']				#Xpath for the links we want

			anchortags = soup("img", attrs={"class" : "dirthumbnail"})
			for tag in anchortags:
				artlinks.append(tag.parent['href'])

			self.log.info("Found %d new pages, %d total pages so far" % (len(anchortags), len(artlinks)))

			nextPageTag = soup.find(text=re.compile("Next page"))
			if nextPageTag:
				#print nextPageTag, type(nextPageTag)
				nextUrl = urlparse.urljoin(self.urlRoot, nextPageTag.find_parent("a")["href"])
			else:
				self.log.info("At the end of the gallery")
				nextUrl = False



			if self.debug == True:
				if len(artlinks) > 500:
					break





		self.log.info("%s%s%s" % ("Found ", len(artlinks), " links"))

		#for link in artlinks:
		#	print link


		return artlinks

	def linkDownloadProgressCallback(self, bytesSoFar, chunkSize, totalSize):
		self.config.progBars.setSubBarLen(totalSize, bytesSoFar)

	def getPage(self, link):

		if not self.urlRoot:
			return "Failed"

		link = urlparse.urljoin(self.urlRoot, link)

		#print rootPath

		nextUrl = link
		contentLinks = set()

		#print "Cleaned - ",  nextUrl.replace(" ", "%20")



		#return

		while nextUrl:
			nextUrl = nextUrl.replace(" ", "%20")
			mpgctnt = ""

			mpgctnt = self.getHandle.getpage(nextUrl)					# Get Webpage
			if mpgctnt == "Failed":
				self.log.error("cannot get page")
				return "Failed"
							#Readout Webpage filehandle into string
			soup = BeautifulSoup(mpgctnt, "html5lib")

			contentTable = soup.find("table", {"id" : "dircontent"})

			if not contentTable:
				self.log.critical("you need to log in again")
				#print mpgctnt
				errf = open("errPg.html", "w")
				errf.write(mpgctnt)
				errf.close()
				#print soup.find_all("table")
				return "LoginFailure"

			contentLinksRaw = contentTable.find_all("a")


			for link in contentLinksRaw:
				contentLinks.add(urlparse.urljoin(self.urlRoot, link["href"]))

			self.log.info("Found %d links" % len(contentLinks))

			nextPageTag = soup.find(text=re.compile("Next page"))
			if nextPageTag:

				nextUrl = urlparse.urljoin(self.urlRoot, nextPageTag.find_parent("a")["href"])
			else:
				self.log.info("At the end of the gallery")
				nextUrl = False

			if self.getHandle.testMode:
				nextUrl = False


		archiveRE = re.compile("(zip|rar|7z|gz|cbr|cbz)$")
		arches = []

		for link in contentLinks:
			if archiveRE.search(link):

				arches.append(link)
		if not arches:
			self.log.critical("No archives on page: %s" % link)
			return "Failed"

		tags = soup.find("p", {"id" : 'tag_edit_list'}).contents[0].rstrip().lstrip()

		self.log.info("Found %d archive files" % len(arches))
		for archUrl in arches:
			self.log.info("			Archve: %s" % archUrl)

		#return				#------------------------------------------------------------------------------------------------------------------------------------------------------------------

		for archiveUrl in arches:

			fNameRegex		= re.compile("http://.+/")

			fname, ftype		= fNameRegex.sub("" , archiveUrl.replace("#top", "")).rsplit(".", 1)


			archiveUrl = archiveUrl.replace(" ", "%20")


			fname = "%s {(Tags) %s}" % (fname, tags)
			if fname != None:
				fname = "%s.%s" % (re.sub('[^a-zA-Z0-9\-_.()\[\]{} ]', '', fname), ftype)


			self.log.info("			Filename			= %s" % fname)
			self.log.info("			FileURL				= %s" % archiveUrl)
			self.log.info("			FileType			= %s" % ftype)


			filePath = os.path.join(self.DLFolder, fname)

			if (not os.access(filePath, os.W_OK)) or self.config.ovwMode == "Overwrite Files" :

				try:
					fp = open(filePath, "wb")								# Open file for saving image (Binary)
				except:
					self.log.critical("Error opening file for writing")
					try:
						fp.close()
					except:
						pass
					try:
						fp = open(filePath, "wb")
					except:
						self.log.critical("Failed to open file for writing. Exiting")
						return "Failed"
					fp.close()

				self.log.info("Downloading File")
				self.config.progBars.addSubProgressBar()
				imgdath = self.getHandle.getpage(archiveUrl, callBack = self.linkDownloadProgressCallback)							# Request Image
				self.config.progBars.removeSubProgressBar()

				self.log.info("File Downloaded")

				if imgdath == "Failed":
					self.log.error("cannot get file")

					return "Failed"



				errs = 0
				fp = None

				while not fp:
					try:
						fp = open(filePath, "wb")								# Open file for saving image (Binary)
						fp.write(imgdath)									# Write Image to File
						fp.close()
					except IOError:
						try:
							fp.close()
						except:
							pass
						errs += 1
						self.log.critical("Error attempting to save image file - %s" % filePath)
						if errs > 3:
							self.log.critical("Could not open file for writing!")
							return "Failed"


								#Return Success
			else:
				self.log.info("Exists, skipping...")




		self.log.info("Successfully got: %s" % arches)
		return

	def go(self):


		# Doujuin moe uses a randomly generated sub-domain address, that changes, and in embedded in the cookie they give you
		# as such, we need to extract this sub-domain from the cookie, and use it in the urls we request,
		# so you don't produce an authentication error

		# basically, it's a giant pain in the ass

		self.subdomain = False

		for cookie in self.getHandle.cj:
			#print cookie.domain.find("members.doujin-moe")+1
			if cookie.domain.find("members.doujin-moe") + 1:
				#print "DMoe Cookie: ", cookie
				self.subdomain = cookie.domain.split(".")[0]
				#print subdomain

		if not self.subdomain:
			print self.getHandle.cj
			self.log.critical("Can't find auth cookie!")
			return []


		if not os.access(self.DLFolder, os.W_OK):									# Creates General Directory
			try:
				os.mkdir(self.DLFolder)
			except:
				self.log.error("Need write Permissions")
				print "Directory Issues!"
				return


		fileLinks = self.getGalleryLst()

		fileLinksAll = []

		if fileLinks == "Failed":
			self.log.critical("Failed to get gallery listing")
			logmanager.clearThreadLog(self.log)
			return

		if self.config.ovwMode == "Check Files with Memory":
			try:
				pickleFile = open(os.path.join(self.DLFolder, self.pickleFileName), "rb")
				fileLinksAll = cPickle.load(pickleFile)
				pickleFile.close()

				print "Loading historical cache data"


			except:
				self.log.info("No Pickle file - Is this the first run on this username?")

				fileLinksAll = []

		else:

			fileLinksAll = []


		newFileLinks = []



		skippedCount = 0

		for link in fileLinks:
				if link not in fileLinksAll:

					newFileLinks.append(link)
				else:
					skippedCount += 1

		if skippedCount:
			self.log.info("Number of previously retrieved links skipped: %d " % skippedCount)

		print "len existing links: ", len(fileLinksAll)
		print "len new links: ", len(newFileLinks)


		if not self.testing:
			self.config.progBars.setupMainBar(len(newFileLinks))								# Initialise Progress bars

		self.config.stopChildThreads = False
		totalfaillinks = []

		errorCounter = 0

		for pageLink in newFileLinks:
				if self.config.stopChildThreads:
					break
				self.log.info("Getting - %s" % pageLink)

				try:
					status = (self.getPage(pageLink))								# Downloads gallery. Returns pages that failed for logging purposes
				except:
					self.log.critical("Python exception when trying to get page - %s" % pageLink)
					self.log.critical(traceback.format_exc())
					status = "Failed"
					try:
						self.config.progBars.removeSubProgressBar()
					except:
						pass

				if status == "Failed":
					totalfaillinks.append(pageLink)
					errorCounter = 0

				elif status == "LoginFailure":
					totalfaillinks.append(pageLink)
					errorCounter += 1
					self.log.critical("Error %d. If there are more then 10 sequential errors, the retrieval will exit." % errorCounter)

				else:
					fileLinksAll.append(pageLink)
					errorCounter = 0

				if not self.testing:
					self.config.progBars.incMainBar()


				if errorCounter > 10:
					self.log.critical("Encountered more then 10 errors in a row. Exiting early.")
					break


		self.log.info("%s%s%s%s%s" % ("\n\n\n- - - - - - - - - - - Successful Pages: ", len(totalfaillinks), " at ", time.ctime(time.time()), " - - - - - - - - - - -\n"))

		self.log.info("")
		self.log.info("%s%s%s" % ("- - - - - - - - - - -Overall Failed Pages: ", len(totalfaillinks), " - - - - - - - - - - -"))		# Print list of all pages that the plugin couldn't find an image on.
		self.log.info("")



		for link in totalfaillinks:
			self.log.info("		" + link)
		self.log.info("- - - %s Pages previously Retrieved - - -" % skippedCount)


		self.log.info("")
		self.log.info("Done!")
		self.log.info("")




		pickleFile = open(os.path.join(self.DLFolder, self.pickleFileName), "wb")
		cPickle.dump(fileLinksAll, pickleFile)
		pickleFile.close()




	def main(self, config, getHandles, start=True):

		self.getHandle = getHandles

		self.log = logmanager.ThreadLog(self.retrievalHandles)

		self.config = config

		self.start = start

		self.log.info("Starting")


		self.DLFolder = os.path.join(self.config.downloadDir, self.DLFolder)

		if not self.start:
			return

		self.go()

		logmanager.clearThreadLog(self.log)
