# -*- coding: utf-8 -*-
"""
    CrunchyRoll;xbmc
    Copyright (C) 2012 - 2014 Matthew Beacher
    This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License.

This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import os
import xbmc
import xbmcgui
import urllib
import urllib2
import StringIO
import cookielib
import gzip
import re
import datetime
import crunchy_main
import xbmcaddon
__settings__ = xbmcaddon.Addon(id='plugin.video.crunchyroll-old')
__language__ = __settings__.getLocalizedString
from BeautifulSoup import BeautifulSoup

__settings__ = sys.modules[ "__main__" ].__settings__
lineupRegion = __settings__.getSetting("lineupRegion")

class _Info:
	
	def __init__( self, *args, **kwargs ):
		self.__dict__.update( kwargs )
        
class CrunchyParser:

	pattern_episode_id = re.compile('[0-9]{6}')
	
	def __init__(self):
		self.settings = {}
		self.settings['thumb_quality'] = int(__settings__.getSetting("thumb_quality"))
		print "CRUNCHY: --> Thumb Quality: " + str(self.settings['thumb_quality'])
		thumb_quality = ['_thumb', '_large', '_full']
		self.settings['thumb_quality'] = thumb_quality[self.settings['thumb_quality']]

        def parseTitleList(self, feed_title):
                item = {}
                print "Crunchyroll Takeout: --> in parseTitleList"
                soup_title = BeautifulSoup(feed_title)
                queue_list = soup_title.findAll('li',attrs={"class":"hover-bubble group-item"})
                queue_lists = sorted(queue_list, key=lambda elem: elem.text.lower())
                #queue_list = soup_title.findAll('li',attrs={"itemtype":"http://schema.org/TVSeries"})
                num_series = len(queue_list)
                print "Crunchyroll Takeout: -->number of found series "+str(num_series)
                for queue_series in queue_lists:
                        if queue_series is not None:
                                #print queue_series
                                item['name'] = queue_series.a['title']
                                item['page_url'] = queue_series.a['href']
                                print item['page_url']
                                crunchy_main.UI().addItem({'Title':item['name'].encode("utf8"),'mode':'series', 'page_url':item['page_url'].encode("utf8")}, True, num_series)
                crunchy_main.UI().endofdirectory('none')
	
	def parseSpBoxScrappedSeries(self, feed):
                print "Crunchyroll Takeout: --> in parseSpBoxScrappedSeries"
                item = {}
                soup_title = BeautifulSoup(feed)
                #queue_list = soup_title.findAll('li',attrs={"itemtype":"http://schema.org/TVSeries"})
                queue_list = soup_title.findAll('li',attrs={"class":"hover-bubble group-item"})
                num_series = len(queue_list)
                print "Crunchyroll Takeout: -->number of found series "+str(num_series)
                for queue_series in queue_list:
                        if queue_series is not None:
                                #print queue_series
                                item['name'] = queue_series.a['title']
                                item['page_url'] = queue_series.a['href']
                                item['img'] = queue_series.img['src']
                                print item['page_url']
                                crunchy_main.UI().addItem({'Title':item['name'].encode("utf8"),'mode':'series','Thumb':item['img'].encode("utf8"), 'page_url':item['page_url'].encode("utf8")}, True, num_series)
		crunchy_main.UI().endofdirectory('none')

	def parseQueue(self, feed_queue):
                item = {}
                print "Crunchyroll Takeout: --> in parseQueue"
		local_string = xbmcaddon.Addon(id='plugin.video.crunchyroll-old').getLocalizedString
		loginerror = local_string(70006).encode("utf8")
                soup_queue = BeautifulSoup(feed_queue)
                queue_list = soup_queue.findAll('li',attrs={"series_id":True})
                num_series = len(queue_list)
                print "Crunchyroll Takeout: -->number of found series "+str(num_series)
		if num_series == 0:
                        title = soup_queue.html.head.title
                        print "Crunchyroll Takeout -> title.string '"+title.string+"'"
                        if title.string == 'Crunchyroll - Sign Up or Log In':
                                crunchy_main.UI().addItem({'Title':loginerror, 'mode':'series'}, True, num_series)
                for queue_series in queue_list:
                        if queue_series is not None:
                                #print queue_series
                                item['name'] = queue_series.find('span', attrs={"class":'series-title ellipsis'}).string.replace("&amp;","&")
                                item['img'] = queue_series.img['src']
                                item['page_url'] = queue_series.div.div.a['href']
                                print item['page_url']
                                crunchy_main.UI().addItem({'Title':item['name'].encode("utf8"),'mode':'series', 'Thumb':item['img'].encode("utf8"), 'page_url':item['page_url'].encode("utf8")}, True, num_series)
                crunchy_main.UI().endofdirectory('none')

	def parseEpisodes(self, feed):
                item = {}
                print "Crunchyroll Takeout: --> in parseEpisodes"
                soup_feed = BeautifulSoup(feed)
                episode_list = soup_feed.findAll('div',attrs={"class":True,"data-classes":True})
                num_series = len(episode_list)
                print "Crunchyroll Takeout: -->number of found speisodes: "+str(num_series)
                for queue_episodes in episode_list:
                        try:
                                #print queue_episodes
                                item['name'] = queue_episodes.span.string +":"+ queue_episodes.p.string.replace("\n","")
                                item['name'] = re.sub(' +',' ',item['name'])
                                try:
                                        item['img'] = queue_episodes.img['src']
                                except:
                                        item['img'] = queue_episodes.img['data-thumbnailurl']
                                pass
                                temp = queue_episodes.find('div', attrs={"media_id":True})
                                item['id'] = temp["media_id"]
                                item['page_url'] = "http://www.crunchyroll.com"+queue_episodes.a['href']
                                crunchy_main.UI().addItem({'Title':item['name'].encode("utf8"),'mode':'episode', 'id':item['id'], 'Thumb':item['img'], 'page_url':item['page_url']}, True, num_series)
                        except:
                                print "do something"
                        pass
		crunchy_main.UI().endofdirectory('none')

class CrunchyScraper:
	
	def __init__(self):
		self.base_path = os.path.join(xbmc.translatePath("special://masterprofile/"), "addon_data", os.path.basename(os.getcwd()))
		self.base_cache_path = os.path.join(self.base_path, "cache")
		if not os.path.exists(self.base_cache_path):
			os.makedirs(self.base_cache_path)
		self.episodes_list = []

        def getRegion(self): 
                # print 'CRUNCHYROLL: --> Lineup: '+lineupRegion
                if lineupRegion == "0":
                        lineupRegions = "en-us"
                if lineupRegion == "1":
                        lineupRegions = "pt-br"
                if lineupRegion == "2":
                        lineupRegions = "es-es"
                if lineupRegion == "3":
                        lineupRegions = "fr-fr"
                print 'CRUNCHYROLL: --> Using lineup from: '+lineupRegions
                return lineupRegions    	
		
	def getEpisodeListing(self, url): 
		full_url = "http://www.crunchyroll.com"+url
		id = url.replace('/','')
		file_path = os.path.join(self.base_cache_path, id+".html")
		refreshHTML = self.check_cache_time(file_path)
		if(os.path.exists(file_path) and refreshHTML is False):
			usock = open(file_path, "r")
			rssFeed = usock.read()
		else:
			opener = urllib2.build_opener()
			opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language',self.getRegion()+',en;q=0.5')]
			#opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language','en-us,en;q=0.5')]
			usock = opener.open(full_url)
			rssFeed = usock.read()
			if usock.headers.get('content-encoding', None) == 'gzip':
				rssFeed = gzip.GzipFile(fileobj=StringIO.StringIO(rssFeed)).read().decode('utf-8','ignore')
		usock.close()
		if (not os.path.exists(file_path)):
			file_object = open(file_path, "w")
			file_object.write(rssFeed.encode('utf-8'))
			file_object.close()
		CrunchyParser().parseEpisodes(rssFeed)

        def getTitleOnlyScrappedSeries(self, mode):
                if mode == 'anime_all': 
                        full_url = "http://www.crunchyroll.com/videos/anime/alpha?group=all"
                else:
                        full_url = "http://www.crunchyroll.com/videos/drama/alpha?group=all"
		file_path = os.path.join(self.base_cache_path, mode+".html")
		refreshHTML = self.check_cache_time(file_path)
		if(os.path.exists(file_path) and refreshHTML is False):
			usock = open(file_path, "r")
			rssFeed = usock.read()
		else:
			opener = urllib2.build_opener()
			#opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language','en-us,en;q=0.5')]
                        opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language',self.getRegion()+',en;q=0.5')]
			usock = opener.open(full_url)
			rssFeed = usock.read()
			if usock.headers.get('content-encoding', None) == 'gzip':
				rssFeed = gzip.GzipFile(fileobj=StringIO.StringIO(rssFeed)).read().decode('utf-8','ignore')
		usock.close()
		if (not os.path.exists(file_path)):
			file_object = open(file_path, "w")
			file_object.write(rssFeed.encode('utf-8'))
			file_object.close()
		CrunchyParser().parseTitleList(rssFeed)

        def getEpBoxScrappedSeries(self, mode):
                if mode == 'anime_updated': 
                        full_url = "http://www.crunchyroll.com/videos/anime/updated"
                elif mode == 'pop_updated': 
                        full_url = "http://www.crunchyroll.com/videos/pop/updated"
                else:
                        full_url = "http://www.crunchyroll.com/videos/drama/updated"
		file_path = os.path.join(self.base_cache_path, mode+".html")
		refreshHTML = self.check_cache_time(file_path)
		if(os.path.exists(file_path) and refreshHTML is False):
			usock = open(file_path, "r")
			rssFeed = usock.read()
		else:
			opener = urllib2.build_opener()
			#opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language','en-us,en;q=0.5')]
                        opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language',self.getRegion()+',en;q=0.5')]
			usock = opener.open(full_url)
			rssFeed = usock.read()
			if usock.headers.get('content-encoding', None) == 'gzip':
				rssFeed = gzip.GzipFile(fileobj=StringIO.StringIO(rssFeed)).read().decode('utf-8','ignore')
		usock.close()
		if (not os.path.exists(file_path)):
			file_object = open(file_path, "w")
			file_object.write(rssFeed.encode('utf-8'))
			file_object.close()
		CrunchyParser().parseSpBoxScrappedSeries(rssFeed)

        def getSpBoxScrappedSeries(self, mode, id1):
                filename = ""
                full_url = ""
                if mode == "anime_withtag":
                        filename = "anime"+id1
                        full_url = "http://www.crunchyroll.com/videos/anime/genres/"+id1
                if mode == "drama_withtag":
                        filename = "drama"+id1
                        full_url = "http://www.crunchyroll.com/videos/drama/genres/"+id1
                if mode == "pop_withtag":
                        filename = "pop"+id1
                        full_url = "http://www.crunchyroll.com/videos/pop/genres/"+id1
                if mode == "dseason":
                        filename = "drama_season"
                        full_url = "http://www.crunchyroll.com/videos/drama/seasons/"+id1
                if mode == "aseason":
                        filename = "anime_seasons"
                        full_url = "http://www.crunchyroll.com/videos/anime/seasons/"+id1
                elif mode == "SpBox_scraper":
                        if id1 == "anime_popular":
                                showtype = "topanime"
                                filename = showtype+id1
                                full_url = "http://www.crunchyroll.com/videos/anime/popular"
                        elif id1 == "anime_simulcasts":
                                showtype = "animesim"
                                filename = showtype+id1
                                full_url = "http://www.crunchyroll.com/videos/anime/simulcasts"
                        elif id1 == "drama_popular":
                                showtype = "topdrama"
                                filename = showtype+id1
                                full_url = "http://www.crunchyroll.com/videos/drama/popular"
                        elif id1 == "drama_simulcasts":
                                showtype = "dramasim"
                                filename = showtype+id1
                                full_url = "http://www.crunchyroll.com/videos/drama/simulcasts"
                        elif id1 == "pop_popular":
                                showtype = "pop"
                                filename = showtype+id1
                                full_url = "http://www.crunchyroll.com/videos/pop/popular"
                print full_url
                file_path = os.path.join(self.base_cache_path, filename+".html")
		refreshHTML = self.check_cache_time(file_path)
		if(os.path.exists(file_path) and refreshHTML is False):
			usock = open(file_path, "r")
			rssFeed = usock.read()
		else:
			opener = urllib2.build_opener()
                        opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language',self.getRegion()+',en;q=0.5')]
			#opener.addheaders = [('User-Agent','curl/7.16.3 (Windows  build 7600; en-US; beta) boxee/0.9.21.12594'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7'),('Accept-Language','en-us,en;q=0.5')]
			usock = opener.open(full_url)
			rssFeed = usock.read()
			if usock.headers.get('content-encoding', None) == 'gzip':
				rssFeed = gzip.GzipFile(fileobj=StringIO.StringIO(rssFeed)).read().decode('utf-8','ignore')
		usock.close()
		if (not os.path.exists(file_path)):
			file_object = open(file_path, "w")
			file_object.write(rssFeed.encode('utf-8'))
			file_object.close()
		CrunchyParser().parseSpBoxScrappedSeries(rssFeed)
                
                
		
        #Added to Pull Queue File; Throws error if no login found
	def getQueue(self):
                settings = {}
                local_string = xbmcaddon.Addon(id='plugin.video.crunchyroll-old').getLocalizedString
                notice_msg = local_string(70000).encode("utf8")
                login_try_msg = local_string(70002).encode("utf8")
                setup_msg = local_string(70003).encode("utf8")
                pullshow_msg = local_string(70004).encode("utf8")
                dl_queue = local_string(70005).encode("utf8")
		settings['username'] = __settings__.getSetting("crunchy_username")
		settings['password'] = __settings__.getSetting("crunchy_password")
		cj = cookielib.LWPCookieJar()
		if (settings['username'] != '' and settings['password'] != ''):
			print "CRUNCHYROLL: --> Attempting to log-in with your user account..."
			ex = 'XBMC.Notification("'+notice_msg+':","'+login_try_msg+'...", 3000)'
			xbmc.executebuiltin(ex)
			url = 'https://www.crunchyroll.com/?a=formhandler'
			data = urllib.urlencode({'formname':'RpcApiUser_Login', 'next_url':'','fail_url':'/login','name':settings['username'],'password':settings['password']})
			COOKIEFILE= os.path.join(self.base_cache_path, "crunchycookie.lwp")
                        try:
                                os.remove(COOKIEFILE)
                                print "delete ->" + COOKIEFILE
                        except Exception: 
                                pass
			self.cookie = cj
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
			opener.addheaders = [('Referer', 'https://www.crunchyroll.com'),('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')]
			self.opener = opener
			print "CRUNCHYROLL: --> saving new Cookie."
			urllib2.install_opener(opener)
			req = self.opener.open(url, data)
			req.close()
			cj.save(COOKIEFILE)
		else:
                        ex = 'XBMC.Notification("'+notice_msg+':","'+setup_msg+'.", 3000)'
                        xbmc.executebuiltin(ex)
			print "crunchyroll-takeout -> NO CRUNCHYROLL ACCOUNT FOUND!"
		full_url = "http://www.crunchyroll.com/home/queue"
		file_path = os.path.join(self.base_cache_path, "queue.html")
		refreshRSS = self.check_cache_time(file_path)
		ex = 'XBMC.Notification("'+notice_msg+':","'+dl_queue+'", 3000)'
		xbmc.executebuiltin(ex)
		if(os.path.exists(file_path) and refreshRSS is False):
			usock = open(file_path, "r")
			rssFeed = usock.read()
		else:
                        print "Crunchyroll Takeout: --> getting queue list"
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
			opener.addheaders = [('Referer', 'https://www.crunchyroll.com'),('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'),('Accept-Encoding','deflate, gzip'),('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.7')]
			usock = opener.open(full_url)
			rssFeed = usock.read()
			if usock.headers.get('content-encoding', None) == 'gzip':
				rssFeed = gzip.GzipFile(fileobj=StringIO.StringIO(rssFeed)).read().decode('utf-8','ignore')
		usock.close()
		if (not os.path.exists(file_path)):
			file_object = open(file_path, "w")
			file_object.write(rssFeed.encode('utf-8'))
			file_object.close()
		CrunchyParser().parseQueue(rssFeed)
        
		
	def getImages(self, url, file_path):
		file_path += ".jpg"
		full_path = os.path.join(self.base_cache_path, file_path)
		try:
			if(url):
				if(not os.path.exists(full_path) and url != ""):
					urllib.urlretrieve( url, full_path )
				img_path = full_path
				return img_path
		except:
			urllib.urlcleanup()
			remove_tries = 3
			while remove_tries and os.path.isfile(full_path):
				try:
					os.remove(full_path)
				except:
					remove_tries -=1
					xbmc.sleep(1000)
					
			
	def check_cache_time(self, filename):
		if os.path.exists(filename):
			mod_time = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
			cur_time = datetime.datetime.now()
			elapsed = cur_time - mod_time
			if(elapsed > datetime.timedelta(minutes=60)):
				print "CRUNCHY: --> Removing cached RSS feed..."
				os.remove(filename)
				return False
			else:
				print "CRUNCHY: --> RSS feed is still valid."
				return True
		else:
			print "CRUNCHY: --> RSS feed not found.  Downloading..."
			return False
