# -*- coding: utf-8 -*-
import os
import re
import json
import urllib

from base import mcLogger
from base import mcScrapper
from base import mcDown
		
class bayfilesDown(mcDown):

	@classmethod
	def exist_file(self, url):
		
		out = mcScrapper.get(url)
		m = re.search('<div class="not-found" id="download-header">', out)
		if m :
			return False
		
		return True

	
	def run(self, url, output_file):
		
		print "Bayfile\n"
		logger = mcLogger.getInstance('bayfiles')

		cookies_file = "cookies/bayfiles"
		salida = mcScrapper.get(url, cookies_file)
		
		vfid=""
		rec_vfid = re.compile('var vfid =([^;]*)')
		match = re.search(rec_vfid, salida)
		if match != None:
			vfid=match.group(1).strip()		

		print "vfid :" + vfid
		
		delay=""
		rec_delay = re.compile('var delay =([^;]*)')
		match = re.search(rec_delay, salida)
		if match != None:
			delay=match.group(1)		

		print "delay :" + delay
		
		data = {'action': "startTimer", 'vfid': vfid}

		post = urllib.urlencode(data)
		
		ajax_url = "http://bayfiles.com/ajax_download?" + post
		
		print "Url: " + ajax_url
		
		headers = { 'X-Requested-With' : 'XMLHttpRequest',
					'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
					'Referer' : url,
					'Accept' : 'application/json, text/javascript, */*; q=0.01',
					'Connection' : 'keep-alive',
					'Host' : 'bayfiles.com'					
					}		
		
		salida = mcScrapper.get(ajax_url, cookies_file, headers)		
		#print salida				
				
				
		logger.debug(salida)
		
		jsalida = json.loads(salida)
		
		token = jsalida['token']
		
		print "Token: " + token		

		self.wait(delay)
		
		print "-- END WAIT"
	
		data = {'action': "getLink", 'vfid': vfid, 'token' :  token}
		post = urllib.urlencode(data)		
		ajax_url = "http://bayfiles.com/ajax_download"  		
		print "Url: " + ajax_url		
		
		headers = { 'X-Requested-With' : 'XMLHttpRequest',
					'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
					'Referer' : url,
					'Accept' : 'text/html, */*; q=0.01'
					#,'Content-Length' :	'73'
					#,'Host' : 'bayfiles.com'					
					}				
		
		salida = mcScrapper.post(ajax_url, post, cookies_file, headers)
		#print salida

		surl = ""
		rec_surl = re.compile("javascript:window.location.href = '([^']*)")
		match = re.search(rec_surl, salida)
		if match != None:
			surl=match.group(1)
		
		print "Link : " + surl

		os.system("wget '" + surl + "' -O '" + output_file + "'")

