from django.db import models

import re
import pycurl
import StringIO
import urllib
from HTMLParser import HTMLParser

# Create your models here.

#Some regexp here

def get_link_list_from_request(request):
	"""
		Get links from the post request
	"""
	query = request.POST.get('link_list','')
        #Clear all whitespaces from textarea
        whitespace = re.compile(r'\s+')
	urls = query.split('\n')
	links = list()
	for url in urls:
		url = whitespace.sub('', url)
        	if url and url.lower().find('rapidshare') is not -1:
                	links.append(url)

	return links

def check_links(link_list):
	"""
		Check if the given links are valid or not 
		in rapidshare.

		Return an tuple, the first element if the
		operation was ok, and the second one an array
		of boolean.
	"""
	#Regexp to get the links status
	LINK_CHECKER_REGEXP = re.compile("""<table width=\"100%\"><tr valign=top><td>([\s\w]+)""")

	class RSCheckerReader:
		html = ""
		
		def body(self,buf):
			self.html = self.html + buf

		def get_html(self, link_list):
			c = pycurl.Curl()
			c.setopt(c.URL, "http://rapidshare.com/cgi-bin/checkfiles.cgi")
			c.setopt(c.POST, 1)
			c.setopt(c.WRITEFUNCTION,self.body)
			c.setopt(c.POSTFIELDS,"urls=" + "\n".join(link_list))
			c.perform()
			c.close()
			return self.html

	#Call the rapidshare links checker with a list of links
	checker = RSCheckerReader()
	html = checker.get_html(link_list)

	#Get status from html
	status = LINK_CHECKER_REGEXP.findall(html)

	result = list()
	for link in status:
		if link == "File is on server number":
			result.append(True)
		else:
			result.append(False)

	#If result is different from imput -> error
	if len(result) <> len(link_list):
		return (0,[])
	else:
		return (1,result)


def scan_links_from_url(url):
	"""
		Fetch the url and scans the content for rapidshare links
	"""

	#Get the url
	html_io = StringIO.StringIO()

	curl = pycurl.Curl()
	curl.setopt(pycurl.URL, str(url))
	curl.setopt(pycurl.WRITEFUNCTION, html_io.write)
	curl.perform()

	html = html_io.getvalue()

	html_io.close()
	curl.close()

	#Apply the regex expression and fetch all links from source
	regexp = re.compile("""http\:\/\/rapidshare\.(?:com|de)\/files\/[\d]*\/.*?\..*?[^"\s\<\>]*[^.,;'">\:\s\<\>\)\]\!]""")

	return regexp.findall(html)
