#!C:\Python26
# -*- coding: UTF-8 -*-

import os.path
from pluginManage import Plugin


import BeautifulSoup							# To get everything
import bs4
import sys
import time
import re
import urlparse
import os
import cPickle

import urllib

import logmanager

import Queue
import threading



def debugPr(*args):
	if False:
		for item in args:
			print item

		print

import traceback



class getPx(Plugin):

	capabilities			=	"wScraper"										# Used for detecting ifany random .py file is a plugin file
	version				=	1.3											# Used for detecting available self.configuration options
	pluginName			=	"pixivScrape"										# Plugin Name variable
	siteName			=	"Get Pixiv"										# Value for populating "Art Site" dropbox
	overwriteModes			=	["Check Folders", "Check Files with Memory", "Check Files", "Overwrite Files"]		# Determines if overwrite mode dropbox is enabled for plugin
	retrievalHandles		=	4											# How may http retrieval handles app wants

	DLFolder			=	"Pixiv"
	logFileName			=	"LogFile.txt"
	pickleFileName			=	"Pickle.dat"

	# V1.1 variables:

	aNameLabeltxt			=	1
	aListLabeltxt			=	1
	nameSourceEN			=	1										# Determines if Name Source input box is enabled (Boolean, 0-1)
	artistNameEN			=	1										# Determines if Artist Name input box is enabled (Boolean, 0-1)
	artistListEN			=	1

	# V1.2 variables:
	cookieCapable			=	1
	instructionText			=	"To Come"

	# V1.3 variables:
	nameSource				=	["Name List File", "Favorite Artists"]				# namesource value passed to plugin
	nameSourceEN			=	1								# Determines if Name Source input box is enabled (Boolean, 0-1)

	loopDelay			=	1



	saveHTML			=	False		# Used to save the page html content, for debugging


	def checkCookie(self, cookiejar):
		userID = re.search("<Cookie PHPSESSID=[0-9a-f_]*? for \.pixiv\.net/>", "%s" % cookiejar, re.IGNORECASE)
		if userID:
			return "Have Pixiv Cookie:\n	%s" % (userID.group(0))

		return "Do not have Pixiv Cookies"



	def getCookie(self, userName, password, opener):
		# print "Getting Entrance Cookie"
		# page = opener.open('http://www.pixiv.net/')
		# print "Got Entrance Cookie, logging in"



		logondict = {"mode" : "login", "pixiv_id" : userName, "pass" : password, "skip" : "1"}

		debugPr(logondict)

		params = urllib.urlencode(logondict)
		page = opener.open('http://www.pixiv.net/index.php', params)
		pagetext = page.read()
		page.close()
		# print pagetext
		if re.search("Logout", pagetext):
			return "Logged In"
		else:
			return "Login Failed"



	def getGalleryLst(self, artistName, getHandle, config):

		# re string is "該当ユーザーのアカウントは停止されています。" escaped, so the encoding does not mangle it.
		# It translates to "This account has been suspended"
		suspendedAcctRe = re.compile("\xe8\xa9\xb2\xe5\xbd\x93\xe3\x83\xa6\xe3\x83\xbc\xe3\x82\xb6\xe3\x83\xbc\xe3\x81\xae\xe3\x82\xa2\xe3\x82\xab\xe3\x82\xa6\xe3\x83\xb3\xe3\x83\x88\xe3\x81\xaf\xe5\x81\x9c\xe6\xad\xa2\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82")


		iter = 1


		artlinks = []								# Declare array of links

		while iter:
			turl = "http://www.pixiv.net/member_illust.php?id=%s&p=%s" % (artistName, iter)
			self.log.info("Getting = " + turl)
			mpgctnt = getHandle.getpage(turl)							# Request Image
			if mpgctnt == "Failed":
				self.log.info("Cannot get Page")
				return 0
			if suspendedAcctRe.search(mpgctnt):
				self.log.critical("Account has been suspended. You should probably remove it from your favorites")
				self.log.critical("Account # %s" % artistName)
				self.log.critical("Gallery URL - %s" % turl)
				return 0

			config.progBars.pulseSubBar()


			mpgctnt = mpgctnt.replace("\\", "")					# Scrub out \/ negations from links in javascript
			regx = re.compile("<script.+?>")					# Scrub Out <Script> Tags, which break Beautiful Soup
			mpgctnt = regx.sub("" , mpgctnt)




			soup = BeautifulSoup.BeautifulSoup(''.join(mpgctnt))
			anchortags = soup("a")

			temp = ""								# Temporary value for removing duplicate links
			artcnt = 0								# track number of art pages (for determing gallery size)

			config.progBars.pulseSubBar()
			for tag in anchortags:
				try:
					nudderlink = urlparse.urljoin("http://www.pixiv.net/", tag['href'])			 # Extract link
				except KeyError:								# badly formed link ? probably just a named anchor like '<a name="something">'
					continue
				if (nudderlink.find("member_illust.php?mode=medium&illust_id") + 1) and nudderlink != temp:				# See if link is to art page, and if it's a duplicate
					temp = nudderlink
					artcnt = artcnt + 1
					artlinks.append(nudderlink)
					# print temp




			iter = iter + 1
			if artcnt == 0:
				iter = 0

			config.progBars.pulseSubBar()

		self.log.info("%s%s%s" % ("Found ", len(artlinks), " links"))
		if ((iter * 20) - len(artlinks)) > 20:
			self.log.error("We seem to have found less than 20 links per page. are there missing files?")
			self.log.info("Found %s links on %s pages. Should have found %s - %s links" % (len(artlinks), iter, iter * 20, (iter - 1) * 20))

		return artlinks

	def getManga(self, link, artistName, getHandle):

		titleRE = re.compile("<title>(.*?)</title>", re.IGNORECASE)

		successed = True


		mangaAddr = link
		# print mangaAddr,

		mangaPgCtnt = getHandle.getpage(mangaAddr, addlHeaders={'Referer': link})			# Spoof the referrer to get the big image version
		if mangaPgCtnt == "Failed":
			self.log.info("cannot get manga page")

		else:
			titleReResult = titleRE.search(mangaPgCtnt)

			if titleReResult:
				self.log.info("%s%s" % ("Found file Title : ", titleReResult.group(1)))
				mangaTitle = titleReResult.group(1)
				mangaTitle = re.sub('[\\/:*?"<>|]', "", mangaTitle)
			else:
				mangaTitle = None

			soup = BeautifulSoup.BeautifulSoup(mangaPgCtnt)
			imageSections = soup.findAll('div', attrs={"class" : "image-container placeholder"})

			# print "ImageSections", imageSections
			scriptLinkRE = re.compile("pixiv.context.images\[\d*\].unshift\('(.*?)'\)")
			linkSet = set()
			for subsection in imageSections:
				scriptSection = subsection.findAllNext("script")
				for script in scriptSection:
					debugPr(type(script.contents[0]))
					debugPr(script.contents[0])
					links = scriptLinkRE.findall(script.contents[0])
					for link in links:
						if link not in linkSet:
							self.log.info("	ImageLink - %s" % link)
							linkSet.add(link)

			# print linkSet


			regx4 = re.compile("http://.+/")				# FileName RE

			self.log.info("Found %s page manga!" % len(linkSet))
			if len(linkSet) < 1:
				self.log.error("No Images on page?")
				return "Failed"

			for link in linkSet:
				filename = regx4.sub("" , link)
				filename = filename.rsplit("?")[0]

				if mangaTitle != None:
					pass
					# self.log.info("%s - %s" % (regx4.sub("" , link), mangaTitle))

				# print link, filename


				if (not os.access(os.path.join(self.DLFolder, artistName, filename), os.W_OK)) or self.config.ovwMode == "Overwrite Files" :

					self.log.info("Waiting...")
					time.sleep(self.loopDelay)

					imgdath = getHandle.getpage(link, addlHeaders={'Referer': mangaAddr})							# Request Image

					if imgdath == "Failed":
						self.log.error("cannot get manga page")
						successed = False

					else:
						self.log.info("Successfully got: " + filename)
						imPath = os.path.join(self.DLFolder, artistName, filename)
						#print os.access(imPath, os.W_OK), imPath
						#print "Saving"
						writeErrors = 0
						while writeErrors < 3:
							try:
								fp = open(imPath, "wb")									# Write Image to File
								fp.write(imgdath)
								fp.close()
								break
							except:

								pass
						else:
							self.log.critical("Could notsave file - %s " % imPath)
							return "Failed"

						self.log.info("Successfully got: " + link)

				else:
					self.log.info("%s Exists, skipping..." % filename)




			self.log.info("Total %s " % len(linkSet))
			if successed:
				return "Downloaded"

			else:
				return "Failed"

		pass


	def getpic(self, link, artistName, getHandle):
		'''
		Pixiv does a whole lot of referrer sniffing. They block images, and do page redirects if you don't submit the correct referrer.
		Also, I *think* they will block flooding, so that's the reason for the delays everywhere.
		'''

		imgurl = ""
		mpgctnt = ""


		self.log.info("Waiting...")
		time.sleep(self.loopDelay)

		self.log.info("Getting = %s" % link)
		refL = link
		debugPr("TargetPage", link)
		debugPr("Referrer", refL)


		basePageCtnt = getHandle.getpage(refL)

		if basePageCtnt == "Failed" or not basePageCtnt:
			self.log.info("cannot get manga test page")

		else:
			if self.saveHTML:
				fp = open("tempPgSm.html", "wb")
				fp.write(basePageCtnt)
				fp.close()

			try:
				soup = BeautifulSoup.BeautifulSoup(basePageCtnt)
				mainSection = soup.find('div', attrs={"class" : "works_display"})
				link = "%s%s" % ("http://www.pixiv.net/", mainSection.find("a")["href"])

			except:
				print "link - ", link
				print "Mainsection - ", mainSection
				print "Soup - ", soup

				traceback.print_exc()

				return "Failed"

			if link.find("manga") + 1:
				self.log.info("Multipage/Manga link")
				return self.getManga(link, artistName, getHandle)
			else:

				titleRE = re.compile("<title>(.*?)</title>", re.IGNORECASE)

				mpgctnt = getHandle.getpage(link, addlHeaders={'Referer': refL})			# Spoof the referrer to get the big image version

				if self.saveHTML:
					fp2 = open("tempPgLg.html", "wb")
					fp2.write(mpgctnt)
					fp2.close()
																												# Yeah, it's suposed to be misspelled. The misspelling occured IN THE STANDARD, and now it's stuck
				mpgctnt = mpgctnt.decode("utf-8")
				if mpgctnt == "Failed":
					self.log.info("cannot get page")
					return "Failed"


				soup = bs4.BeautifulSoup(''.join(mpgctnt), "lxml")
				imgPath = soup.find("img")


				if imgPath:
					self.log.info("%s%s" % ("Found Image URL : ", imgPath["src"]))

					imgurl = imgPath["src"]

				regx4 = re.compile("http://.+/")				# FileName RE
				fname = regx4.sub("" , imgurl)

				titleReResult = titleRE.search(mpgctnt)

				if titleReResult:
					self.log.info("%s%s" % ("Found file Title : ", titleReResult.group(1)))

				imgTitle = fname		# No imagename on page




				if imgurl == "" :
					self.log.error("OH NOES!!! No image on page = " + link)

					return "Failed"										# Return Fail
				else:

					# fname = "%s.%s" % (fname, ftype)
					fname = fname.rsplit("?")[0] 		# Sometimes there is some PHP stuff tacked on the end of the Image URL. Split on the indicator("?"), and throw away everything after it.

					self.log.info("			Filename = " + fname)
					self.log.info("			Page Image Title = " + imgTitle)
					self.log.info("			FileURL = " + imgurl)


					if (not os.access(os.path.join(self.DLFolder, artistName, fname), os.W_OK)) or self.config.ovwMode == "Overwrite Files" :

						self.log.info("Waiting...")
						time.sleep(self.loopDelay)

						imgdath = getHandle.getpage(imgurl, addlHeaders={'Referer': link})							# Request Image

						if imgdath == "Failed":
							self.log.info("cannot get image")
							return "Failed"
						self.log.info("Successfully got: " + fname)
						# print fname
						try:
							imagePath = os.path.join(self.DLFolder, artistName, fname)
							fp = open(imagePath, "w+b")								# Open file for saving image (Binary)
							fp.write(imgdath)
							fp.close()
						except:
							self.log.critical("cannot save image")
							traceback.print_exc()
							print "cannot save image"

							return "Failed"

						self.log.info("Successfully got: " + imgurl)
						return "Downloaded"									# Return Success
					else:
						self.log.info("Exists, skipping...")
						return "Exists"


	def getArtist(self, artistName, getHandle, config):



		if os.access(os.path.join(self.DLFolder, artistName), os.W_OK) and self.config.ovwMode == "Check Folders":
			self.log.info("self.DLFolder PyDir/" + artistName + "/ Exists: ")
			self.log.info("Assuming page has already been downloaded, skipping")

		else:
			if not os.access(os.path.join(self.DLFolder, artistName), os.W_OK):
				try:
					os.mkdir(os.path.join(self.DLFolder, artistName))
					self.log.info("Created folder " + artistName)
					self.log.info("Beginning Download")
				except:
					self.log.info("Cannot Make working directory %s/. Do you have write Permissions? %s" % (artistName, sys.exc_info()[0]))
					return 0

			self.log.info("Input username: " + artistName)							# Show Website that's being scrubbed



			artLinksAll = self.getGalleryLst(artistName, getHandle, config)					# Walk gallery pages, return list of links to individual art pages!

			# print artLinksAll
			debugPr("Artlinks = %s" % artLinksAll)
			debugPr("self.config.ovwMode = %s" % self.config.ovwMode)
			# print "Testing"

			successlinks = []
			existlinks = []
			faillinks = []

			if artLinksAll:									# If getGalleryLst found any pages:
				if self.config.ovwMode == "Check Files with Memory":						# Pickle file speeds up traversal of pages that have already been walked.
					debugPr("Loading Pickle File")
					try:
						pickleFile = open(os.path.join(self.DLFolder, artistName, self.pickleFileName), "rb")		# Load list of links
						existlinks.extend(cPickle.load(pickleFile))
						pickleFile.close()

						os.unlink(os.path.join(self.DLFolder, artistName, self.pickleFileName))		# delete list of links, that way if something crashes while downloading the gallery, it'll check everything again


					except:
						self.log.info("No Pickle file - Is this the first run on this username?")
						artlinks = artLinksAll
						debugPr("Pickle File load Failed")

						# traceback.print_exc()


					artlinks = []

					debugPr("ExistLinks %s" % existlinks)
					debugPr("NewLinks %s" % artLinksAll)
					for link in artLinksAll:
						if link in existlinks:
							pass
							# self.log.info("Already Have: "+link)
							# debugPr("Already Have: "+link)
						else:
							debugPr("Retrieving: " + link)
							artlinks.append(link)



				else:
					debugPr("Trying all Images")
					artlinks = artLinksAll

				# raise ValueError
				pickleFile = open(os.path.join(self.DLFolder, artistName, self.pickleFileName), "wb")
				cPickle.dump("", pickleFile)
				pickleFile.close()



				self.log.info("%s%s%s" % ("\n\n\n- - - - - - - - - - - Total Pages: ", len(artlinks), " - - - - - - - - - - -\n"))

				for link in artlinks:
					self.log.info(link)


				numinfo = 1

				config.progBars.setSubBarLen(len(artlinks))							# Setup local progress bar


				for link in artlinks:
					success = self.getpic(link, artistName, getHandle)		# Download the actual pictures themselves Pictures!
					self.log.info("%s%s%s%s" % ("On page ", numinfo, " of ", len(artlinks)))
					numinfo = numinfo + 1
					config.progBars.incSubBar()
					if success == "Downloaded":
						successlinks.append(link)
					elif success == "Exists":
						existlinks.append(link)
					else:
						faillinks.append(link)







				if len(faillinks):
					self.log.info("%s%s%s" % ("- - - Failed Pages: ", len(faillinks), " - - -"))
					for link in faillinks:
						self.log.info(link)

			else:
				self.log.error("Could not get pages of artist: " + artistName)
			debugPr("SuccessLinks : ", successlinks)
			debugPr("ExistLinks : ", existlinks)
			outlinks = successlinks + existlinks

			debugPr("outlinks : ", outlinks)

			pickleFile = open(os.path.join(self.DLFolder, artistName, self.pickleFileName), "wb")		# Recreate pickle file
			cPickle.dump(outlinks, pickleFile)
			pickleFile.close()
			return faillinks


	def getAllArtistNames(self, config):

		breakFlag = True
		counter = 1
		content = ""
		resultList = []
		nameRE = re.compile('<a href="member\.php\?id=(\d*?)">')

		while 1:
			breakFlag = True
			self.log.info("Got Page %d" % counter)
			pageURL = "http://www.pixiv.net/bookmark.php?type=user&rest=show&p=%d" % (counter)
			config.progBars.pulseMainBar()

			self.log.info("Getting Page: " + pageURL)
			content = self.getHandles[0].getpage(pageURL)
			if content == "Failed":
					self.log.info("cannot get image")
					return "Failed"

			# print "Searching"
			temp = nameRE.findall(content)
			# print temp
			# print "Content"
			# print content
			counter += 1
			config.progBars.pulseMainBar()
			for item in temp:
				if not item in resultList:


					resultList.append(item)
					breakFlag = False		# If a new name is found, we must not be at the end of the namelist
			# self.log.info("Waiting...")
			# time.sleep(self.loopDelay)

			if breakFlag == True:			# Break when none of the new names were original
				break




		self.log.info("Found %d Names" % len(resultList))

		return resultList


	def childThread(self, config, getHandle, nameQueue, failedQueue):

		while not config.stopChildThreads:
			try:
				name = nameQueue.get_nowait()

				config.progBars.addSubProgressBar()
				# print "Starting"
				self.log.info("Getting %s's Gallery" % name)
				faillinks = self.getArtist(name, getHandle, config)
				if faillinks != None and faillinks != 0:
					# print faillinks
					failedQueue.put(faillinks)

					faillogptr = open("Failed Pages.txt", "a")
					faillogptr.write("%s%s%s%s%s%s%s" % ("\n\n\n- - - - - - - - - - - Failed Pages of artist ", name, ". Quantity : ", len(faillinks), " at ", time.ctime(time.time()), " - - - - - - - - - - -\n"))
					for link in faillinks:
						faillogptr.write("\n		" + link)
					faillogptr.close()
				time.sleep(3)
				self.log.info("Done")



				config.progBars.removeSubProgressBar()

				config.progBars.incMainBar()

			except Queue.Empty:
				self.log.info("Thread", threading.currentThread(), "queue is empty. Exiting")
				break


	def main(self, config, getHandles):											 # Main Function Call

		self.log = logmanager.ThreadLog(self.retrievalHandles)

		self.log.info("Starting")


		self.config = config
		self.getHandles = getHandles

		self.DLFolder = os.path.join(config.downloadDir, self.DLFolder)

		if not os.access(self.DLFolder, os.W_OK):									# Creates General Directory
			try:
				os.mkdir(self.DLFolder)
			except:
				self.log.critical("Need write Permissions")
				self.log.critical("%s" % self.DLFolder)

				return


		if self.config.nSrcMode == "Name List File":									# Downloads Set of Gallery
			artists = self.config.aList

		elif self.config.nSrcMode == "Favorite Artists":										# Downloads ALL Gallery
			self.log.info("Favourite Artists")
			artists = self.getAllArtistNames(config)


		else:
			self.log.error("Unknown Error ------")					# Dump crap to error log for debugging
			self.log.error("Passed Values :")
			self.log.error(self.config)
			self.log.error(("Artist List : %s" % self.config.aList))
			self.log.error(("Name Source : %s" % self.config.nSrcMode))
			self.log.error(("Overwrite Mode : %s" % self.config.ovwMode))
			self.log.error(("self.getHandles : %s" % self.getHandles))


		config.progBars.setupMainBar(len(artists))

		nameQueue = Queue.Queue()
		failedQueue = Queue.Queue()

		for name in artists:
			nameQueue.put(name)
		#print nameQueue
		#print artists

		threadNumber = 0
		threads = []

		print "Starting Threads"
		config.stopChildThreads = False

		for getHandle in getHandles:
			temp = threading.Thread(target=self.childThread, name="Thread %d" % threadNumber, args=(config, getHandle, nameQueue, failedQueue))
			threadNumber += 1
			temp.daemon = True
			temp.start()
			threads.append(temp)

		import time
		alive = True

		while (not nameQueue.empty()) or alive:
			alive = False
			for childThread in threads:
				alive = alive or childThread.isAlive()		# Do not exit untill all child threads have returned

			time.sleep(0.1)

		totalfaillinks = []
		while not failedQueue.empty():
			totalfaillinks.append(failedQueue.get())

		if len(totalfaillinks) > 0:
			self.log.error("- - - - - - - - - - -Overall Failed Pages: %s - - - - - - - - - - -" % (len(totalfaillinks)))
			self.log.error("Done!")
			self.log.error("")


		for link in totalfaillinks:
			self.log.error(link)


		logmanager.clearThreadLog(self.log)
		print threads
		print "exiting"

		time.sleep(2)
		print "ret"
		return


