import os, sys, re, time, gzip
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
from ConfigParser import ConfigParser
from emailbots.EmailBot import EmailBot
from urlparse import urlparse, urlsplit
import PIL
from StringIO import StringIO
import mimetypes, mimetools
#from threading import Thread
import thread, random
from utils.logsHandler import Logger
import simplejson as json


"""
Some utility function definitions
"""
def urlEncodeString(s):
    tmphash = {'str' : s }
    encodedStr = urllib.urlencode(tmphash)
    encodedPattern = re.compile(r"^str=(.*)$")
    encodedSearch = encodedPattern.search(encodedStr)
    encodedStr = encodedSearch.groups()[0]
    encodedStr = encodedStr.replace('.', '%2E')
    encodedStr = encodedStr.replace('-', '%2D')
    encodedStr = encodedStr.replace(',', '%2C')
    return (encodedStr)

def encode_multipart_formdata(fields):
    BOUNDARY = mimetools.choose_boundary()
    CRLF = '\r\n'
    L = []
    for (key, value) in fields.iteritems():
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"' % key)
        L.append('')
        L.append(value)
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    content_length = str(len(body))
    return content_type, content_length, body

def getTimeStampString():
    ts = time.time()
    ts_str = int(ts).__str__()
    return (ts_str)


class NoRedirectHandler(urllib2.HTTPRedirectHandler):
    def http_error_302(self, req, fp, code, msg, headers):
        infourl = urllib.addinfourl(fp, headers, req.get_full_url())
        infourl.status = code
        infourl.code = code
        return infourl

    http_error_300 = http_error_302
    http_error_301 = http_error_302
    http_error_303 = http_error_302
    http_error_307 = http_error_302 


class webbot(object):
    absUrlPattern = re.compile(r"^https?:\/\/", re.IGNORECASE)
    htmlTagPattern = re.compile(r"<[^>]+>", re.MULTILINE | re.DOTALL)
    newlinePattern = re.compile(r"\n")
    multipleWhitespacePattern = re.compile(r"\s+")
    pathEndingWithSlashPattern = re.compile(r"\/$")
    userExistsPattern = re.compile(r"LOGININSTEAD", re.MULTILINE | re.DOTALL)
    userCreatedPattern = re.compile(r"SUCCESS", re.MULTILINE | re.DOTALL)
    emptyStringPattern = re.compile(r"^\s*$", re.MULTILINE | re.DOTALL)

    htmlEntitiesDict = {'&nbsp;' : ' ', '&#160;' : ' ', '&amp;' : '&', '&#38;' : '&', '&lt;' : '<', '&#60;' : '<', '&gt;' : '>', '&#62;' : '>', '&apos;' : '\'', '&#39;' : '\'', '&quot;' : '"', '&#34;' : '"'}
    # Set DEBUG to False on prod env
    DEBUG = True
    RegistrationDataQueue = []
    exitMutexes = []

    def __init__(self, siteUrl):
        # Create the opener object(s). Might need more than one type if we need to get pages with unwanted redirects.
        self.opener = urllib2.build_opener() # This is my normal opener....
        self.no_redirect_opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler()) # this one won't handle redirects.
        #self.debug_opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1))
        # Initialize some object properties.
        self.sessionCookies = ""
        self.httpHeaders = { 'User-Agent' : r'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.10) Gecko/20111103 Firefox/3.6.24',  'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-us,en;q=0.5', 'Accept-Encoding' : 'gzip,deflate', 'Accept-Charset' : 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive' : '115', 'Connection' : 'keep-alive', }
        self.homeDir = os.getcwd()
        self.websiteUrl = siteUrl
        self.registrationUrl = None
        self.loginPageUrl = None
        self.requestUrl = self.websiteUrl
        self.baseUrl = None
        self.pageRequest = None
        if self.websiteUrl:
            parsedUrl = urlparse(self.requestUrl)
            self.baseUrl = parsedUrl.scheme + "://" + parsedUrl.netloc
            # Here we just get the webpage pointed to by the website URL
            self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
        self.pageResponse = None
        self.requestMethod = "GET"
        self.postData = {}
        self.sessionCookies = None
        self.currentPageContent = None
	self.buildtime = -1 # This is a variable that specifies the version of the javascript files and functions weebly uses. We will populate it once we retreive the first page from weebly.com
	self.cfgParser = None
        self.username = None
        self.password = None
        self.emailId = None
        self.emailPasswd = None
        self.isLoggedIn = False
        self.registered = False
	self.siteTitle = None
	self.domainName = None
	self.siteType = "business"
	self.siteCategory = "services"
	self.domainType = "subdomain" # Allowable values are 'subdomain', 'newdomain', 'existingdomain'. Default is 'subdomain'
	self.domainTLD = "com"
	self.weeblySiteId = None
	self.weeblyNumericUserId = None
	self.currentTheme = None
	self.weeblyNumericUserIdLocation = None
	self.siteVersion = None
	self.tempUser = '0'
	self.friendRequests = ''
	self.adsenseID = ''
	self.customDomain = ''
	self.hideTitle = '0'
	self.currentHeader = ''
	self.pageid = None
	self.userEvents = None
	self.updatedTheme = '0'
	self.requestToken = ''
	self.availableTabs = {} # Dictionary to hold the tab elements like 'Design', 'Elements', 'Pages', 'Editors', 'Settings' and their respective 'pos' values.
	self.favouriteThemes = {}
	self.availableThemes = {}
	self.availableElements = {}
	self.addedPages = {} # This will be a dictionary of page Ids (keys) and page titles (values).
	cfgFile = "./config/app.cfg"
	self.cfgParser = ConfigParser()
        self.cfgParser.read(cfgFile)
	self.captchaServiceName = self.cfgParser.get("CaptchaAPI", "servicename")
        self.dbcUsername = self.cfgParser.get("CaptchaAPI", "username")
        self.dbcPassword = self.cfgParser.get("CaptchaAPI", "password")
	self.captchaImagesDir = self.cfgParser.get("Data", "captchadumpdir")
	if re.compile("^\s*$").search(self.captchaImagesDir):
	    self.captchaImagesDir = "./captcha_images"
	if re.compile("^\./").search(self.captchaImagesDir):
	    self.captchaImagesDir = self.homeDir + os.path.sep + self.captchaImagesDir
	regDataFile = self.cfgParser.get("Data", "registrationDataFile")
	if re.compile("^\./").search(regDataFile):
	    regDataFile = self.homeDir + os.path.sep + regDataFile[2:].__str__()
	# Expect 'regDataFile' to be a csv file. Each line will contain:
    	# emailId,emailPasswd,username,password,siteTitle, domainName, siteType,siteCategory,domainType, and domainTld.
    	# Out of these, the first 4 (email Id, email password, username and password) are mandatory and should be non-
    	# empty. The bot will start creating a website only if there is a non-empty value for site title and domain name.
    	# If these values are empty, then the bot will abort the process after account creation step. All the rest of 
    	# the values (siteType,siteCategory,domainType, and domainTld) may or may not be empty. Each of these fields
    	# will have a default value (discussed elsewhere) which would be used in the absence of any value for any of 
    	# these fields. 
	# Check if the 'RegistrationDataQueue' is empty or not. If it is empty, read 'regDataFile' to populate it.
	if webbot.RegistrationDataQueue.__len__() == 0:
	    self.__class__.buildRegistrationQueue(regDataFile)
	regDataList = self.popRegistrationInfo()
	print "Handling Registration Data: " + regDataList.__str__()
	if regDataList.__len__() < 4:
	    print "Insufficient information to create account. The registration data file should contain values for atleast the first 4 elements\n"
	    return(None)
	if regDataList.__len__() == 4:
	    self.emailId, self.emailPasswd, self.username, self.password = (regDataList[0], regDataList[1], regDataList[2], regDataList[3])
	elif regDataList.__len__() == 5:
	    self.emailId, self.emailPasswd, self.username, self.password, self.siteTitle = (regDataList[0], regDataList[1], regDataList[2], regDataList[3], regDataList[4])
	elif regDataList.__len__() >= 6:
	    self.emailId, self.emailPasswd, self.username, self.password, self.siteTitle, self.domainName = (regDataList[0], regDataList[1], regDataList[2], regDataList[3], regDataList[4], regDataList[5])
	    for ctr in range(6, regDataList.__len__()):
		if ctr == 6:
		    self.siteType = regDataList[ctr]
		elif ctr == 7:
		    self.siteCategory = regDataList[ctr]
		elif ctr == 8:
		    self.domainType = regDataList[ctr]
		elif ctr == 9:
		    self.domainTLD = regDataList[ctr]
		else:
		    pass
	else:
	    print "Insufficient information to process account creation"
	#print self.emailId, self.emailPasswd, self.username, self.password, self.siteTitle, self.domainName, self.siteType, self.siteCategory, self.domainType, self.domainTLD
	# Now fetch the start page...
	if self.websiteUrl:
            try:
                self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
                self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
                self.httpHeaders["Cookie"] = self.sessionCookies
            except:
                print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
	    	return(None)
            self.httpHeaders["Referer"] = self.requestUrl
            # Initialize the account related variables...
            self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
            if not self.currentPageContent:
                print "Could not access the website content of " + self.websiteUrl
	    else:
		self._getBuildTime()

    
    def buildRegistrationQueue(cls, regDataFile):
	f = open(regDataFile)
	lines = f.readlines() # We are expecting only one line for now.
	f.close()
	commentLinePattern = re.compile(r"^#")
	emptyLinePattern = re.compile(r"^\s*$")
	registrationInfo = []
	dataCount = 0
	for line in lines:
	    line = line.strip() # strip whitespace character (including newlines)
	    if commentLinePattern.search(line):
		continue
	    if emptyLinePattern.search(line):
		continue
	    registrationInfo = line.split(",")
	    cls.RegistrationDataQueue.append(registrationInfo)
	    dataCount += 1
	return(dataCount)

    buildRegistrationQueue = classmethod(buildRegistrationQueue)


    def popRegistrationInfo(self):
	return(self.__class__.RegistrationDataQueue.pop(0))


    def _getBuildTime(self):
	soup = BeautifulSoup(self.currentPageContent)
	# Get all script tags
	allScriptTags = soup.findAll("script", {'type' : 'text/javascript'})
	buildtimePattern = re.compile(r'\.js\?buildtime=(\d{10})')
	for scriptTag in allScriptTags:
	    if not scriptTag.has_key("src"):
		continue
	    buildtimeSearch = buildtimePattern.search(scriptTag["src"])
	    if not buildtimeSearch:
		continue
	    self.buildtime = buildtimeSearch.groups()[0]
	    return(self.buildtime)
	# If we could not fetch the buildtime by this point, then we will possibly not find it.
	return(self.buildtime)


    def getPageUrlPath(cls, url):
	pageUrlPattern = re.compile(r"^(http://.*)/\w+\.\w{2,3}$")
	pageUrlSearch = pageUrlPattern.search(url)
	if pageUrlSearch:
	    pageUrlPath = pageUrlSearch.groups()[0]
	    return(pageUrlPath)
	else:
	    return(None)

    getPageUrlPath = classmethod(getPageUrlPath)


    def postInitializationProcess(self):
	editorAppsUrl = "http://www.weebly.com/editor/apps/tracking_frame_static.php?trigger=home&tracking_extra=0&transaction_id=0&amount=0"
	editorAppsUrl2 = "http://www.weebly.com/editor/apps/tracking_frame_static.php?trigger=unknown_user_home&tracking_extra=0&transaction_id=0&amount=0"
	ipixelUrl = ""
	editorAppsRequest = urllib2.Request(editorAppsUrl, None, self.httpHeaders)
	try:
	    editorAppsResponse = self.no_redirect_opener.open(editorAppsRequest)
            cookie = self.__class__._getCookieFromResponse(editorAppsResponse)
	    if cookie is not None:
	    	self.sessionCookies += cookie + "; "
            self.httpHeaders["Cookie"] = self.sessionCookies
	    editorAppsContent = self.__class__._decodeGzippedContent(editorAppsResponse.read())
	    esoup = BeautifulSoup(editorAppsContent)
	    iframe = esoup.find("iframe")
	    if iframe is not None:
		ipixelUrl = iframe['src']
	    else:
		print "Could not get ipixel URL"
		return(False)
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again: " + sys.exc_info()[1].__str__()
	    return(False)
	editorAppsRequest2 = urllib2.Request(editorAppsUrl2, None, self.httpHeaders)
	try:
	    editorAppsResponse2 = self.no_redirect_opener.open(editorAppsRequest2)
            cookie = self.__class__._getCookieFromResponse(editorAppsResponse2)
	    if cookie is not None:
	    	self.sessionCookies += cookie + "; "
            self.httpHeaders["Cookie"] = self.sessionCookies
	except:
	    print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again: " + sys.exc_info()[1].__str__()
	    return(False)
	ipixelRequest = urllib2.Request(ipixelUrl, None, self.httpHeaders)
	try:
	    ipixelResponse = self.no_redirect_opener.open(ipixelRequest)
	    cookie = self.__class__._getCookieFromResponse(ipixelResponse)
	    if cookie is not None:
            	self.sessionCookies += cookie + "; "
            self.httpHeaders["Cookie"] = self.sessionCookies
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again: " + sys.exc_info()[1].__str__()
	    return(False)
	return(True)
	
    """
    Captcha processing methods: Using DeathByCaptcha ('processCaptchaUsingDBC') service and Decaptcher API ('')
    """
    def processCaptchaUsingDBC(self, captchaUrl):
        apiPath = os.getcwd() + os.path.sep + "api"
        sys.path.append(apiPath)
        import deathbycaptcha
        try:
            client = deathbycaptcha.SocketClient(self.dbcUsername, self.dbcPassword)
            captchaImageResponse = urllib2.urlopen(captchaUrl)
            captchaImage = captchaImageResponse.read()
            strIoCaptchaImage = StringIO(captchaImage)
            captchaDumpDir = self.cfgParser.get("Data", "captchadumpdir")
            timeout = int(self.cfgParser.get("CaptchaAPI", "timeout"))
            minBal = int(self.cfgParser.get("CaptchaAPI", "minbalance"))
            balance = client.get_balance()
            captcha = client.decode(strIoCaptchaImage, timeout)
            if balance < minBal:
                print "Warning: Your DeathByCaptcha service balance is low. Please renew/recharge the service balance to enjoy uninterrupted service."
            if captcha:
                return (captcha["text"])
        except:
            print "Could not retrieve captcha text from deathbycaptcha service. Please check your credentials or balance: " + sys.exc_info()[1].__str__()
            return(None)

    def processCaptchaUsingDecaptcher(self, captchaUrl):
        captchaImageResponse = urllib2.urlopen(captchaUrl)
        captchaImage = captchaImageResponse.read()
        formDataParams = {'function' : 'picture2', 'username' : self.dbcUsername, 'password' : self.dbcPassword, 'pict' : captchaImage, 'pict_to' : '0', 'pict_type' : '0'}
        formDataEncoded = urllib.urlencode(formDataParams)
        headers = {'Content-Type' : 'multipart/form-data', 'Content-Length' : str(len(formDataEncoded))}
        postRequest = urllib2.Request('http://poster.decaptcher.com/', formDataEncoded, headers)
        captchaString = None
        try:
            postResponse = urllib.urlopen(postRequest)
            postResponseContent = postResponse.read()
            responseParts = postResponseContent.split("|")
            if responseParts.__len__() > 5:
                captchaString = responseParts[5]
                print "The returned captcha string from decaptcher is '%s'"%captchaString
            else:
                print "Failed to receive the captcha string from decaptcher. The ResultCode, MajorID and MinorID are: %s, %s and %s"%(responseParts[0], responseParts[1], responseParts[2])
        except:
            print "Failed to make the post request to decaptcher.com. The server returned the following error:\n%s"%(sys.exc_info()[1].__str__())
        return captchaString


    """
    Cookie extractor method to get cookie values from the HTTP response objects. (class method)
    """
    def _getCookieFromResponse(cls, lastHttpResponse):
        cookies = ""
        lastResponseHeaders = lastHttpResponse.info()
        responseCookies = lastResponseHeaders.getheaders("Set-Cookie")
        pathCommaPattern = re.compile(r"path=/\s*;?", re.IGNORECASE)
        domainPattern = re.compile(r"Domain=[^;]+;?", re.IGNORECASE)
        expiresPattern = re.compile(r"Expires=[^;]+;?", re.IGNORECASE)
	deletedPattern = re.compile(r"=deleted;", re.IGNORECASE)
        if responseCookies.__len__() >= 1:
            for cookie in responseCookies:
                cookieParts = cookie.split("path=/")
                cookieParts[0] = re.sub(domainPattern, "", cookieParts[0])
                cookieParts[0] = re.sub(expiresPattern, "", cookieParts[0])
		deletedSearch = deletedPattern.search(cookieParts[0])
		if deletedSearch:
		    continue
                cookies += "; " + cookieParts[0]
	    multipleWhiteSpacesPattern = re.compile(r"\s+")
	    cookies = re.sub(multipleWhiteSpacesPattern, " ", cookies)
	    multipleSemicolonsPattern = re.compile(";\s*;")
	    cookies = re.sub(multipleSemicolonsPattern, "; ", cookies)
	    if re.compile("^\s*;").search(cookies):
		cookies = re.sub(re.compile("^\s*;"), "", cookies)
            return(cookies)
	else:
	    return(None)
    
    _getCookieFromResponse = classmethod(_getCookieFromResponse)


    def _decodeGzippedContent(cls, encoded_content):
        response_stream = StringIO(encoded_content)
        decoded_content = ""
        try:
            gzipper = gzip.GzipFile(fileobj=response_stream)
            decoded_content = gzipper.read()
        except: # Maybe this isn't gzipped content after all....
            decoded_content = encoded_content
        return(decoded_content)

    _decodeGzippedContent = classmethod(_decodeGzippedContent)


    def getPageContent(self):
        if self.pageResponse:
            content = self.pageResponse.read()
            # Remove the line with 'DOCTYPE html PUBLIC' string. It sometimes causes BeautifulSoup to fail in parsing the html
            self.currentPageContent = re.sub(r"<.*DOCTYPE\s+html\s+PUBLIC[^>]+>", "", content)
            return(self.currentPageContent)
        else:
            return None


    def getSignupFormElementsDict(self, pageContent=None):
	if not pageContent:
	    pageContent = self.currentPageContent
	if not pageContent or pageContent == "":
	    return (None)
	# Remove 'div' and 'img' tags. BeautifulSoup can't find the end form tag when they are present.
	divTagEndPattern = re.compile(r"</div>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	divTagStartPattern = re.compile(r"<div\s[^>]+>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	imgPattern = re.compile(r"<img\s[^>]+>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	pageContent = re.sub(divTagStartPattern, "", pageContent)
	pageContent = re.sub(divTagEndPattern, "", pageContent)
	pageContent = re.sub(imgPattern, "", pageContent)
	signupFormIdentifierPattern = re.compile(r"(signup|register)", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	soup = BeautifulSoup(pageContent)
	signupDict = {}
	signupForm = soup.find("form", {'name' : signupFormIdentifierPattern})
	if not signupForm:
	    signupForm = soup.find("form", {'id' : signupFormIdentifierPattern})
	if not signupForm: # Couldn't find signup form... 
	    print "Couldn't find sign up form."
	    return (None)
	else:
	    signupSoup = BeautifulSoup(signupForm.renderContents())
	    # Find all 'input' tags with type=text, password, hidden and all checkboxes
	    allTextInputs = signupSoup.findAll("input", {'type' : 'text'})
	    allHiddenInputs = signupSoup.findAll("input", {'type' : 'hidden'})
	    allPasswordInputs = signupSoup.findAll("input", {'type' : 'password'})
	    submitButtonInput = signupSoup.find("input", {'type' : 'submit'})
	    # TODO: Need to add support for checkboxes and submit button later.
	    for textInp in allTextInputs:
		inputIdentifier = None
		if textInp.has_key("name"):
		    inputIdentifier = textInp["name"]
		    signupDict[inputIdentifier] = ""
	  	elif textInp.has_key("id"):
		    inputIdentifier = textInp["id"]
		    signupDict[inputIdentifier] = ""
		else:
		    pass
		if inputIdentifier is not None and textInp.has_key("value"):
		    signupDict[inputIdentifier] = textInp["value"]
	    for hiddenInp in allHiddenInputs:
		inputIdentifier = None
		if hiddenInp.has_key("name"):
		    inputIdentifier = hiddenInp["name"]
		    signupDict[inputIdentifier] = ""
	  	elif hiddenInp.has_key("id"):
		    inputIdentifier = hiddenInp["id"]
		    signupDict[inputIdentifier] = ""
		else:
		    pass
		if inputIdentifier is not None and hiddenInp.has_key("value"):
		    signupDict[inputIdentifier] = hiddenInp["value"]
	    for passwdInp in allPasswordInputs:
		inputIdentifier = None
		if passwdInp.has_key("name"):
		    inputIdentifier = passwdInp["name"]
		    signupDict[inputIdentifier] = ""
	  	elif passwdInp.has_key("id"):
		    inputIdentifier = passwdInp["id"]
		    signupDict[inputIdentifier] = ""
		else:
		    pass
	    if submitButtonInput is not None and (submitButtonInput.has_key("name") or submitButtonInput.has_key("id")):
		inputIdentifier = ""
		if submitButtonInput.has_key("name"):
		    inputIdentifier = submitButtonInput['name']
		    signupDict[inputIdentifier] = submitButtonInput['value']
		elif submitButtonInput.has_key("id"):
		    inputIdentifier = submitButtonInput['id']
		    signupDict[inputIdentifier] = submitButtonInput['value']
	    return(signupDict)	



    def createAccount(self, logger=None):
	signupform = self.getSignupFormElementsDict()
	if signupform is None:
	    print "Could not retrieve sign up form elements dict...\n"
	    return(None)
	if logger is not None:
	    logger.write("Retrieved elements from sign up form...\n")
	# Now get the js file that contains the captcha image key.
	jsPageUrl = "http://www.weebly.com/weebly/libraries/weebly_ensure_account.js?buildtime=" + self.buildtime.__str__()
	jsPageRequest = urllib2.Request(jsPageUrl, None, self.httpHeaders)
	captchaKey = ""
	try:
            jsPageResponse = self.no_redirect_opener.open(jsPageRequest)
	    jsContent = self.__class__._decodeGzippedContent(jsPageResponse.read())
	    keyLocatorPattern = re.compile(r'Recaptcha.create\(\"([^\"]+)\"', re.MULTILINE | re.DOTALL)
	    keyLocatorSearch = keyLocatorPattern.search(jsContent)
	    if keyLocatorSearch:
		captchaKey = keyLocatorSearch.groups()[0]
	    else:
		print "Could not find the captcha key locator text.\n"
		return(None)
	    logger.write("Successfully requested '%s' to extract recaptcha key...\n"%jsPageUrl)
	except:
	    print "Could not fetch the recaptcha key: " + sys.exc_info()[1].__str__()
	    return(None)
	cachestop = time.time()/10**10
	captchaJsonUrl = "http://www.google.com/recaptcha/api/challenge?k=" + captchaKey + "&ajax=1&cachestop=" + cachestop.__str__()
	# However, before sending the captcha json URL request, we need to send a POST request to http://www.weebly.com/weebly/apps/stats.php?stat=recaptcha.shown&location=signup
	statsUrl = "http://www.weebly.com/weebly/apps/stats.php?stat=recaptcha.shown&location=signup"
	postHeaders = {}
	for hdr in self.httpHeaders.keys():
	    postHeaders[hdr] = self.httpHeaders[hdr]
	postHeaders['X-Prototype-Version'] = '1.7'
	postHeaders['X-Requested-With'] = 'XMLHttpRequest'
	postHeaders['Content-Length'] = '0'
	postHeaders['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
	postHeaders['Accept'] = 'text/javascript, text/html, application/xml, text/xml, */*'
	postHeaders['Referer'] = 'http://www.weebly.com/'
	statsRequest = urllib2.Request(statsUrl, "", postHeaders)
	try:
	    statsResponse = self.no_redirect_opener.open(statsRequest)
	except:
	    print "Failed to make POST request to stats.php page: %s\n"%sys.exc_info()[1].__str__()
	captchaJsonRequest = urllib2.Request(captchaJsonUrl, None, self.httpHeaders)
	try:
	    captchaJsonResponse = self.no_redirect_opener.open(captchaJsonRequest)
	    captchaJsonContent = self.__class__._decodeGzippedContent(captchaJsonResponse.read())
	except:
	    print "Could not find captcha JSON page: " + sys.exc_info()[1].__str__()
	    return(None)
	challengePattern = re.compile(r"challenge\s*:\s*'([^']+)'", re.MULTILINE | re.DOTALL)
	challengeSearch = challengePattern.search(captchaJsonContent)
	captchaChallenge = ""
	if challengeSearch:
	    captchaChallenge = challengeSearch.groups()[0]
	else:
	    print "Could not find captcha challenge string: " + sys.exc_info()[1].__str__()
	    return(None)
	if logger is not None:
	    logger.write("Retrieved captcha challenge '%s'....\n"%captchaChallenge)
	captchaImageUrl = "http://www.google.com/recaptcha/api/image?c=" + captchaChallenge
	captchaString = self.processCaptchaUsingDBC(captchaImageUrl)
	if not captchaString: # Could not fetch the captcha string from the Captcha service.
	    print "Could not fetch the captcha string from the Captcha service\n"
	    return(None)
	if logger is not None:
	    logger.write("Retrieved captcha string '%s'\n"%captchaString)
	print "Captcha String: " + captchaString
	# Now create the POST request 
	self.requestUrl = "http://www.weebly.com/weebly/publicBackend.php"
	emailId = re.sub(r"@", "%40", self.emailId)
	postData = "pos=signup&user=" + emailId + "&name=" + self.username + "&pass=" + self.password + "&campaign=&email=" + emailId + "&create_designer=&designer_company=&designer_portal=&response=&recaptcha_response_field=" + captchaString + "&recaptcha_challenge_field=" + captchaChallenge + "&easy_login=1"
	locationUrl = None
	httpHeaders = {}
	for headerName in self.httpHeaders.keys():
	    if headerName == 'Referer':
		httpHeaders[headerName] = "http://www.weebly.com/"
	    elif headerName == 'Accept':
		httpHeaders[headerName] = "text/javascript, text/html, application/xml, text/xml, */*"
	    else:
	    	httpHeaders[headerName] = self.httpHeaders[headerName]
	httpHeaders['Content-Length'] = postData.__len__()
	httpHeaders['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
	httpHeaders['X-Requested-With'] = "XMLHttpRequest"
	httpHeaders['X-Prototype-Version'] = "1.7"
	if logger is not None:
	    logger.write("Trying to post registration data:\nPOST Data: %s"%postData)
	self.pageRequest = urllib2.Request(self.requestUrl, postData, httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
            self.httpHeaders["Cookie"] += self.sessionCookies.__str__()
	    if logger is not None:
	        logger.write("Cookie state after posting registration data: %s\n"%self.httpHeaders["Cookie"])
	    print "Cookie After Response: %s\n"%(self.httpHeaders["Cookie"])
	except:
	    print "The post request to '/weebly/publicBackend.php' for registration failed: " + sys.exc_info()[1].__str__()
	    if logger is not None:
	        logger.write("The post request to '/weebly/publicBackend.php' for registration failed: %s\n"%sys.exc_info()[1].__str__())
	    return(None)
	self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	userExistsSearch = self.__class__.userExistsPattern.search(self.currentPageContent)
	if userExistsSearch:
	    print "User already exists with username '%s' and email Id '%s'\n"%(self.username, self.emailId)
	    if logger is not None:
		logger.write("User already exists with username '%s' and email Id '%s'\n"%(self.username, self.emailId))
	    return (True)
	else:
	    userCreatedSearch = self.__class__.userCreatedPattern.search(self.currentPageContent)
	    if not userCreatedSearch:
		print "Could not create account with username '%s' and email Id '%s'. Please refer to the 'exitdump.txt' file for more details\n"%(self.username, self.emailId)
		if logger is not None:
		    logger.write("Could not create account with username '%s' and email Id '%s'. Please refer to the 'exitdump.txt' file for more details\n"%(self.username, self.emailId))
		return (None)
	    # Now, send a request to http://www.weebly.com/weebly/autoLogin.php
	    self.requestUrl = "http://www.weebly.com/weebly/autoLogin.php"
	    if logger is not None:
		logger.write("Sending GET request to '%s'\n"%self.requestUrl)
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
	    	self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		responseHeaders = self.pageResponse.info()
		if responseHeaders.has_key("Location"):
		    locationUrl = responseHeaders["Location"]
		    if logger is not None:
			logger.write("Found 'Location' header in response headers: %s\n"%locationUrl)
		else:
		    print "Could not find the location header in response from %s.\n"%self.requestUrl
		    if logger is not None:
			logger.write("Could not find the location header in response from %s.\n"%self.requestUrl)
		    return(None)
	    except:
		print "The post request to '/weebly/autoLogin.php' after registration failed: " + sys.exc_info()[1].__str__()
		if logger is not None:
		    logger.write("The post request to '/weebly/autoLogin.php' after registration failed: %s\n"%sys.exc_info()[1].__str__())
	    	return(None)
	    if locationUrl is not None and not EmailBot._isAbsoluteUrl(locationUrl):
		locationUrl = "http://www.weebly.com" + locationUrl
	    self.requestUrl = locationUrl
	    if logger is not None:
		logger.write("Requesting URL '%s'... \n"%self.requestUrl)
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
	    	self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		responseHeaders = self.pageResponse.info()
		if responseHeaders.has_key("Location"):
		    locationUrl = responseHeaders["Location"]
		    if logger is not None:
			logger.write("Location URL: '%s'\n"%locationUrl)
		else:
		    print "Could not find the location header in response from %s.\n"%self.requestUrl
		    if logger is not None:
			logger.write("Could not find the location header in response from %s.\n"%self.requestUrl)
		    return(None)
	    except:
		print "The post request to '%s' after registration failed: %s\n"%(self.requestUrl, sys.exc_info()[1].__str__())
		if logger is not None:
		    logger.write("The post request to '%s' after registration failed: %s\n"%(self.requestUrl, sys.exc_info()[1].__str__()))
	    	return(None)
	    if locationUrl is not None and not EmailBot._isAbsoluteUrl(locationUrl):
		pageUrlPath = self.__class__.getPageUrlPath(self.requestUrl)
		locationUrl = pageUrlPath + "/" + locationUrl
	    self.requestUrl = locationUrl
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
	    	self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
		if logger is not None:
		    logger.write("Successfully created account for %s - username: %s"%(self.emailId, self.username))
		return(self.currentPageContent)
	    except:
		print "The post request to '%s' after registration failed: %s\n"%(self.requestUrl, sys.exc_info()[1].__str__())
		if logger is not None:
		    logger.write("The post request to '%s' after registration failed: %s\n"%(self.requestUrl, sys.exc_info()[1].__str__()))
	    	return(None)


    # Private method to create a sub domain under weebly.
    def _createSubDomain(self):
	if not self.domainName:
	    print "Cannot find value for domain name. Can't proceed any further.\n"
	    return(None)
	domainNameCheckAvailabilityUrl = "http://www.weebly.com/weebly/getElements.php"
	domainNameCheckPostDataDict = {'pos' : 'checkWeeblyDomain', 'weeblySubDomain' : self.domainName, 'cookie' : self.httpHeaders['Cookie']}
	domainNameCheckPostData = urllib.urlencode(domainNameCheckPostDataDict)
	domainCheckPostHeaders = {}
	for hdr in self.httpHeaders.keys():
	    if hdr == "Content-Length":
		domainCheckPostHeaders[hdr] = domainNameCheckPostData.__len__()
		continue
	    domainCheckPostHeaders[hdr] = self.httpHeaders[hdr]
	domainCheckAvailabilityRequest = urllib2.Request(domainNameCheckAvailabilityUrl, domainNameCheckPostData, domainCheckPostHeaders)
	isAvailable = False
	SuccessPattern = re.compile(r"%%SUCCESS%%", re.MULTILINE | re.DOTALL)
	try:
	    domainCheckAvailabilityResponse = self.no_redirect_opener.open(domainCheckAvailabilityRequest)
	    responseContent = domainCheckAvailabilityResponse.read()
	    responseContent = self.__class__._decodeGzippedContent(responseContent)
	    if SuccessPattern.search(responseContent):
		isAvailable = True
	except:
	    print "Failed to make a POST request for checking if the domain name suggested by the user is available or not - %s"%sys.exc_info()[1].__str__()
	    return(None)
	if isAvailable is True: # Now we must make the post request for domain registration.
	    self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	    postDataDict = {'pos' : 'savedomain', 'type' : self.domainType, 'domain' : self.domainName }
	    postData = urllib.urlencode(postDataDict)
	    self.httpHeaders['Content-Length'] = postData.__len__()
	    self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	    try:
		self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
		# Check to see if the named domain has been created or not.
	    except:
		print "Could not post the domain information to '%s' - %s"%(self.requestUrl, sys.exc_info()[1].__str__())
		return(None)
	    # Finally we need to make a GET request to main.php to get the next screen.
	    self.requestUrl = "http://www.weebly.com/weebly/main.php"
	    del self.httpHeaders['Content-type']
	    del self.httpHeaders['Content-Length']
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
		self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
		return(self.currentPageContent)
	    except:
		print "Could not fetch the website Editor screen - Error: %s"%sys.exc_info()[1].__str__()
		return (None)
	else:
	    print "Possibly your domain name '%s' is already is in use. Could not create subdomain."%self.domainName
	    return(0) # returning 0 if the username is not available


    # Private method to create a domain under weebly.
    def _createNewDomain(self):
	self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	postDataDict = {'pos' : 'doevent', 'event' : 'customDomainS2', 'cookie' : self.httpHeaders['Cookie']}
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	    return(self.currentPageContent)
	except:
	    print "Could not fetch the website Editor screen - Error: %s"%sys.exc_info()[1].__str__()
	    return (None)
	# To be completed later.


    # Private method to link an existing domain under weebly.
    def _linkExistingDomain(self):
	if not self.domainName:
	    print "Cannot find value for domain name. Can't proceed any further.\n"
	    return(None)
	self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	postDataDict = {'pos' : 'savedomain', 'type' : 'domain', 'domain' : self.domainName + "." + self.domainTLD, 'continue_not_configured' : '1', 'cookie' : self.httpHeaders['Cookie'] }
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	except:
	    print "Could not post the domain information to '%s' - %s"%(self.requestUrl, sys.exc_info()[1].__str__())
	    return(None)
	# Now make a "GET" request to "http://www.weebly.com/weebly/main.php"
	self.requestUrl = "http://www.weebly.com/weebly/main.php"
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	    return(self.currentPageContent)
	except:
	    print "Could not post the domain information to '%s' - %s"%(self.requestUrl, sys.exc_info()[1].__str__())
	    return(None)


    def doLogout(self):
	pass


    def __repr__(self):
	attribsKeys = self.__dict__.keys()
	r = "Dumping object:\n"
	for akey in attribsKeys:
	    if type(self.__dict__[akey]) == str:
		r += akey + " :  " + self.__dict__[akey] + "\n"
	    elif type(self.__dict__[akey]) == None:
		r += akey + " :  " + "\n"
	    elif type(self.__dict__[akey]) == dict:
		r += akey + " :  " + self.__dict__[akey].__str__()
	    elif type(self.__dict__[akey]) == list:
		r += akey + " :  " + self.__dict__[akey].__str__()
	    elif type(self.__dict__[akey]) == tuple:
		r += akey + " :  " + self.__dict__[akey].__str__()
	    else:
		r += akey + " :  Probably a method\n"
	return(r)


    def _get16DigitTimeStamp(cls):
	ts = int(time.time() * 10000)
	return(ts)
    _get16DigitTimeStamp = classmethod(_get16DigitTimeStamp)

    """
    This method retrieves all the available tabs from the website creation page... It populates the <obj>.availableTabs dict.
    The keys are the tab names while the values are the corresponding POST request objects to each of the tab pages.
    Note: This method should be called only after 'createWebsiteStub' has been called. 
    """
    def getAvailableTabs(self, logger=None):
	tabTagPattern = re.compile(r"<ul\s+id=\"weebly-tablist\">(.*?)</ul>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	tabTagSearch = tabTagPattern.search(self.currentPageContent)
	html = ""
	if tabTagSearch:
	    html = tabTagSearch.groups()[0]
	else:
	    print "Could not find tabs in the page... Are we on the correct page right now?\n"
	    return {}
	soup = BeautifulSoup(html)
	allSpans = soup.findAll("span")
	url = "http://www.weebly.com/weebly/getElements.php"
	postHeaders = {}
	getHeaders = {}
	for hdr in self.httpHeaders.keys():
	    postHeaders[hdr] = self.httpHeaders[hdr]
	    if hdr == "Referer":
		continue
	    else:
		getHeaders[hdr] = self.httpHeaders[hdr]
	postHeaders['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
	for span in allSpans:
	    tabName = span.renderContents()
	    if tabName == "Design":
		postDataDict = {'pos' : 'getthemes', 'keys' : '', 'cookie' : self.httpHeaders['Cookie']}
		postData = urllib.urlencode(postDataDict)
		postHeaders['Content-Length'] = postData.__len__()
		ts = self.__class__._get16DigitTimeStamp()
		postHeaders['x-ajax-request-id'] = ts.__str__()
		getUrl = "http://www.weebly.com/weebly/goPage.php?page=themesMenu&1=undefined&2=undefined&3=undefined&4=undefined&5=undefined"
		getRequest = urllib2.Request(getUrl, None, getHeaders)
		self.availableTabs[tabName] = [ urllib2.Request(url, postData, postHeaders), getRequest ]
	    elif tabName == "Elements":
		getUrl = "http://www.weebly.com/weebly/goPage.php?page=editMenu&1=undefined&2=undefined&3=undefined&4=undefined&5=undefined"
		getRequest = urllib2.Request(getUrl, None, getHeaders)
		self.availableTabs[tabName] = [ getRequest, ]
	    elif tabName == "Pages":
		postDataDict = {'pos' : 'doevent', 'event' : 'tab_pages', 'cookie' : self.httpHeaders['Cookie']}
		postData = urllib.urlencode(postDataDict)
		postHeaders['Content-Length'] = postData.__len__()
		ts = self.__class__._get16DigitTimeStamp()
		postHeaders['x-ajax-request-id'] = ts.__str__()
		getUrl = "http://www.weebly.com/weebly/goPage.php?page=pagesMenu&1=undefined&2=undefined&3=undefined&4=undefined&5=undefined"
		getRequest = urllib2.Request(getUrl, None, getHeaders)
		self.availableTabs[tabName] = [ urllib2.Request(url, postData, postHeaders), getRequest ]
	    elif tabName == "Editors":
		postDataDict = {'pos' : 'getcontributors', 'cookie' : self.httpHeaders['Cookie'], 'token' : self.requestToken}
		postData = urllib.urlencode(postDataDict)
		postHeaders['Content-Length'] = postData.__len__()
		ts = self.__class__._get16DigitTimeStamp()
		postHeaders['x-ajax-request-id'] = ts.__str__()
		getUrl = "http://www.weebly.com/weebly/goPage.php?page=contributorsMenu&1=undefined&2=undefined&3=undefined&4=undefined&5=undefined"
		getRequest = urllib2.Request(getUrl, None, getHeaders)
		self.availableTabs[tabName] = [ urllib2.Request(url, postData, postHeaders), getRequest ]
	    elif tabName == "Settings":
		postDataDict1 = {'pos' : 'sitesettings', 'reqid' : self.weeblySiteId, 'cookie' : self.httpHeaders['Cookie'], 'token' : self.requestToken}
		postData1 = urllib.urlencode(postDataDict1)
		postHeaders['Content-Length'] = postData1.__len__()
		ts = self.__class__._get16DigitTimeStamp()
		postHeaders['x-ajax-request-id'] = ts.__str__()
		getUrl = "http://www.weebly.com/weebly/goPage.php?page=displaySiteSettings&1=undefined&2=undefined&3=undefined&4=undefined&5=undefined"
		getRequest = urllib2.Request(getUrl, None, getHeaders)
		postRequest_1 = urllib2.Request(url, postData1, postHeaders)
		postDataDict2 = {'pos' : 'doevent', 'event' : 'tab_settings', 'cookie' : self.httpHeaders['Cookie']}
		postData2 = urllib.urlencode(postDataDict2)
		postHeaders['Content-Length'] = postData2.__len__()
		ts = self.__class__._get16DigitTimeStamp()
		postHeaders['x-ajax-request-id'] = ts.__str__()
		postRequest_2 = urllib2.Request(url, postData2, postHeaders)
		self.availableTabs[tabName] = [ postRequest_1, postRequest_2, getRequest ]
	    else:
		print "Unrecognized tab %s\n"%tabName
	    	if logger:
		    logger.write("Unrecognized tab %s\n"%tabName)
	if logger:
	    logger.write("Found %s tabs: %s\n"%(self.availableTabs.keys().__len__(), self.availableTabs.keys().__str__()))
	return (self.availableTabs)

    
    """
    Open a tab page. Tab name is specified as the second argument.
    Note: The argument 'tabName' is case-insensitive.
    Note: This method doesn't set 'currentPageContent' attribute of the 'webbot' object. It simply returns the content sent by the server in
    response to the click event on the specified tab.
    Note: This method is not very useful since it just fetches the javascript code (embedded in HTML) for changing tabs on weebly.com.
    """
    def openTabPage(self, tabName, logger=None):
	tabFound = False
	for tab in self.availableTabs.keys():
	    if tab.lower() == tabName.lower():
		tabName = tab
		tabFound = True
		break
	# Did we find the tab?
	if not tabFound:
	    print "The requested tab '%s' could not be found"%tabName
	    if logger:
		logger.write("The requested tab '%s' could not be found\n"%tabName)
	    return(None)
	tabRequestsCount = self.availableTabs[tabName].__len__()
	print "Found tab '%s' ... Number of requests to send: %s"%(tabName, tabRequestsCount.__str__())
	if logger:
	    logger.write("Found tab '%s' ... Number of requests to send: %s\n"%(tabName, tabRequestsCount.__str__()))
	requestsList = self.availableTabs[tabName]
	requestCounter = 1
	tabPageContent = ""
	for tabRequest in requestsList:
	    try:
		tabResponse = self.no_redirect_opener.open(tabRequest)
		tabPageContent = self.__class__._decodeGzippedContent(tabResponse.read())
	    except:
		print "Request # %s for tab '%s' failed - Reason: %s"%(requestCounter.__str__(), tabName, sys.exc_info()[1].__str__())
		if logger:
		    logger.write("Request # %s for tab '%s' failed - Reason: %s\n"%(requestCounter.__str__(), tabName, sys.exc_info()[1].__str__()))
	    	return(None)
	    requestCounter += 1
	return(tabPageContent)


    """
    This method attempts to retrieve the list of favourite themes from the content of the 'Design' tab.
    It sets the value of the 'favouriteThemes' attribute of the webbot object.
    """
    def getFavouriteThemes(self, logger=None):
	themePattern = re.compile(r"Weebly.themeListCache.insert\((.*)\);\s+var\s+remoteAccounts\s*=\s*", re.MULTILINE | re.DOTALL)
	themeSearch = themePattern.search(self.currentPageContent)
	themeTextChunk = None
	if themeSearch:
	    themeTextChunk = themeSearch.groups()[0]
	if not themeTextChunk:
	    print "Could not find available themes... Are we on the right page?"
	    return(None)
	themeTextParts = themeTextChunk.split("colors='[{")
	themeIdPattern = re.compile(r"\/(\d+)\\", re.MULTILINE | re.DOTALL)
	themeColourNamePattern = re.compile(r"_screenshot_(\w+)_small\.jpg", re.MULTILINE | re.DOTALL)
	themeColourCodePattern = re.compile(r";(#[\w\d]{6})&", re.MULTILINE | re.DOTALL)
	for themeText in themeTextParts:
	    themeTextList = themeText.split("}]'") # We need the element at index 0 only 
	    themeText1 = themeTextList[0]
	    themeElements = themeText1.split("},{")
	    themeColoursList = []
	    for element in themeElements:
		themeIdSearch = themeIdPattern.search(element)
		themeColourNameSearch = themeColourNamePattern.search(element)
		themeColourCodeSearch = themeColourCodePattern.search(element)
		if themeIdSearch:
		    themeId = themeIdSearch.groups()[0]
		    if not self.favouriteThemes.has_key(themeId):
			self.favouriteThemes[themeId] = []
			themeColourName = ""
			themeColourCode = ""
			if themeColourNameSearch:
			    themeColourName = themeColourNameSearch.groups()[0]
			if themeColourCodeSearch:
			    themeColourCode = themeColourCodeSearch.groups()[0]
			self.favouriteThemes[themeId].append({themeColourCode : themeColourName})
		    else:
			themeColourName = ""
			themeColourCode = ""
			if themeColourNameSearch:
			    themeColourName = themeColourNameSearch.groups()[0]
			if themeColourCodeSearch:
			    themeColourCode = themeColourCodeSearch.groups()[0]
			self.favouriteThemes[themeId].append({themeColourCode : themeColourName})
	if logger:
	    logger.write("Favourite Themes: " + self.favouriteThemes.__str__() + "\n")
	return(self.favouriteThemes)


    """
    This method attempts to retrieve the list of all available themes from the content of the 'Design' tab.
    It sets the value of the 'availableThemes' attribute of the webbot object.
    """
    def getAllAvailableThemes(self, logger=None):
	themesPageUrl = "http://www.weebly.com/theme_browser/"
	themesRequestHeaders = {}
	for hdr in self.httpHeaders.keys():
	    if hdr.lower() in ('x-prototype-version', 'x-ajax-request-id', 'content-length', 'content-type', 'weebly-site-id'):
		continue
	    themesRequestHeaders[hdr] = self.httpHeaders[hdr]
	themesPageRequest = urllib2.Request(themesPageUrl, None, themesRequestHeaders)
	themesPageContent = ""
	try:
	    themesPageResponse = self.no_redirect_opener.open(themesPageRequest)
	    themesPageContent = self.__class__._decodeGzippedContent(themesPageResponse.read())
	except:
	    print "Could not find themes page - Reason: %s"%(sys.exc_info()[1].__str__())
	    if logger:
		logger.write("Could not find themes page - Reason: %s\n"%(sys.exc_info()[1].__str__()))
	    return(None)
	themeSoup = BeautifulSoup(themesPageContent)
	currentPageUrl = themesPageUrl
	# Iterate over multiple pages....
	while True:
	    allThemeDivs = themeSoup.findAll("div", {'class' : 'theme'})
	    for themeDiv in allThemeDivs:
	    	themeId = None
	    	if themeDiv.has_key("id"):
		    themeIdValue = themeDiv['id']
		    (themetext, themeId) = themeIdValue.split("-")
	    	coloursList = []
	        themeDataColourParts = []
	        if themeDiv.has_key('data-colors'):
		    themeDataColours = themeDiv['data-colors']
		    themeDataColours = re.sub(re.compile("&quot;"), "", themeDataColours)
		    themeDataColourParts = themeDataColours.split("},{")
	    	themecolourPattern = re.compile(r"\"value\"\s*:\s*\"([^,]+)\",\s*\"sample\"\s*:\s*\"(.*)\"$", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	    	for themeColourPart in themeDataColourParts:
		    themecolourSearch = themecolourPattern.search(themeColourPart)
		    if themecolourSearch:
		    	colourName = themecolourSearch.groups()[0]
		    	colourCode = themecolourSearch.groups()[1]
		    	coloursList.append({colourCode : colourName})
		    else:
		    	pass # Don't append anything
		    	#coloursList.append({})
	        self.availableThemes[themeId] = coloursList
	    nextPageAnchor = themeSoup.find("a", {'class' : 'next'})
	    nextPageUrl = None
	    if nextPageAnchor.has_key("href"):
		nextPageUrl = nextPageAnchor["href"]
	    else:
		break
	    if nextPageUrl is not None:
	    	nextPageUrl = "http://www.weebly.com" + nextPageUrl
	    if nextPageUrl == currentPageUrl: # We also need to check if the last page URL was the same as this one... if so, break free.
		break
	    print "Next Page URL: " + nextPageUrl
	    themesPageRequest = urllib2.Request(nextPageUrl, None, themesRequestHeaders)
	    themesPageContent = ""
	    try:
	    	themesPageResponse = self.no_redirect_opener.open(themesPageRequest)
	    	themesPageContent = self.__class__._decodeGzippedContent(themesPageResponse.read())
	    except:
	    	print "Could not find themes page - Reason: %s"%(sys.exc_info()[1].__str__())
	    	if logger:
		    logger.write("Could not find themes page - Reason: %s\n"%(sys.exc_info()[1].__str__()))
	    	return(None)
	    themeSoup = BeautifulSoup(themesPageContent)
	    currentPageUrl = nextPageUrl
	if logger:
	    logger.write("All Available Themes: " + self.availableThemes.__str__() + "\n")
	return(self.availableThemes)


    """
    Adds a new page on the website. Sets the page name and the page title with the values passed as arguments.
    """
    def addNewPage(self, pageTitle, logger=None):
	self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	postDataDict = {'pos' : 'right', 'site_id' : self.weeblySiteId, 'pageid' : '', 'blog' : '0', 'newtitle' : pageTitle, 'hidden' : '0', 'pwprotected' : '0', 'header' : '', 'footer' : '', 'keywords' : '', 'seo_title' : '', 'description' : '', 'extlink' : '', 'layout' : 'tall-header', 'hierarchy' : [{"id":self.pageid,"children":[]},{"id":"","children":[]}] , 'cookie' : self.httpHeaders['Cookie']}
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	addedPageId = None
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    pageContent = self.__class__._decodeGzippedContent(self.pageResponse.read())
	    jsonDataStruct = json.loads(pageContent)
	    if jsonDataStruct.has_key("id"):
		addedPageId = jsonDataStruct["id"]
	except:
	    print "Couldn't create page titled '%s' at step #1: %s"%(pageTitle, sys.exc_info()[1].__str__())
	    if logger:
		logger.write("Couldn't create page titled '%s' at step #1: %s"%(pageTitle, sys.exc_info()[1].__str__()))
	    return(False)
	self.addedPages[addedPageId] = pageTitle
	# Now, the secong request...
	postDataDict = {'pos' : 'doevent', 'event' : 'updatePages', 'cookie' : self.httpHeaders['Cookie']}
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	except:
	    print "Couldn't create page titled '%s' at step #2: %s"%(pageTitle, sys.exc_info()[1].__str__())
	    if logger:
		logger.write("Couldn't create page titled '%s' at step #2: %s"%(pageTitle, sys.exc_info()[1].__str__()))
	    return(False)
	if logger:
	    logger.write("Successfully added page titled '%s' for user '%s'...\n"%(pageTitle, self.emailId))
	return(True)



    """
    Self explanatory - deletes the page whose name is specified as the second argument.
    """
    def deletePage(self, pageName, logger=None):
	pass


    def doLogin(self, logger=None):
	pass

    
    """
    This method sets the theme for the website being created. The 'themeId' parameter is optional.
    If a valid theme ID is passed, then that theme is selected for the website being created. If
    'themeId' is not passed, or 'None' is passed, then this method will randomly select a theme and
    set it for the website. The 'randomFlag' parameter is responsible for this behaviour. The
    default value for 'randomFlag' is True, so if random theme selection is not desirable, then
    you need to pass a 'False' value explicitly for 'randomFlag' parameter. Hence, passing 'None' for
    themeId and explicitly passing 'False' for 'randomFlag' will leave the website unaltered.
    Note: the 'themeId' parameter will be a 18 digit numeric string (NOT an integer).
    """
    def setTheme(self, logger=None, themeId=None, randomFlag=True):
	if themeId is not None:
	    postDataDict = {'pos' : 'settheme', 'keys' : themeId, 'color' : '', 'current_page_id' : self.pageid}
	    self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	    postData = urllib.urlencode(postDataDict)
	    self.httpHeaders['Content-Length'] = postData.__len__()
	    self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	    try:
		self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		print "Theme %s has been set successfully"%themeId
		if logger:
		    logger.write("Theme %s has been set successfully\n"%themeId)
		return(False)
	    except:
		print "Could not set theme - %s"%sys.exc_info()[1].__str__()
		if logger:
		    logger.write("Could not set theme - %s\n"%sys.exc_info()[1].__str__())
		return(True)
	else:
	    if not randomFlag:
		return(False)
	    themesCount = self.availableThemes.keys().__len__()
	    themesList = self.availableThemes.keys()
	    # Now randomly select a theme Id.
	    randIndex = random.randrange(0, themesCount - 1)
	    selectedThemeId = themesList[randIndex]
	    # Check if colours are available for this theme Id.
	    coloursList = self.availableThemes[selectedThemeId]
	    selectedColour = ""
	    if coloursList.__len__() > 0:
		selectedColourIndex = random.randrange(0, coloursList.__len__() - 1)
		for colourkey in coloursList[selectedColourIndex].keys():
		    selectedColour = coloursList[selectedColourIndex][colourkey].__str__()
		    break
	    # Now form the post data....
	    postDataDict = {'pos' : 'settheme', 'keys' : selectedThemeId.__str__(), 'color' : selectedColour, 'current_page_id' : self.pageid}
	    self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	    postData = urllib.urlencode(postDataDict)
	    self.httpHeaders['Content-Length'] = postData.__len__()
	    self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders)
	    try:
		self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
		print "Theme %s has been set successfully"%selectedThemeId
		if logger:
		    logger.write("Theme %s has been set successfully\n"%selectedThemeId)
		return(False)
	    except:
		print "Could not set theme - %s"%sys.exc_info()[1].__str__()
		if logger:
		    logger.write("Could not set theme - %s\n"%sys.exc_info()[1].__str__())
		return(True)




    """
    Internal method to populate the website related attributes ('weeblySiteId', 'weeblyNumericUserId', 'currentTheme', 'weeblyNumericUserIdLocation',
    'siteVersion', 'tempUser', 'friendRequests', 'adsenseID', 'customDomain', 'hideTitle', 'currentHeader', 'pageid', 'userEvents' and 'updatedTheme').
    """
    def _populateWebsiteAttributes(self, logger=None):
	initMainPattern = re.compile(r"initMain\((.*?)\);", re.MULTILINE | re.DOTALL)
	initMainSearch = initMainPattern.search(self.currentPageContent)
	if not initMainSearch:
	    if logger is not None:
		logger.write("Could not populate website related attributes - probably the website registration was not successful\n")
	    print "Could not populate website related attributes - probably the website registration was not successful"
	    return (None)
	initMainContent = initMainSearch.groups()[0]
	weeblySiteIdPattern = re.compile(r"currentSite\s*:\s*'(\d+)',", re.MULTILINE | re.DOTALL)
	weeblyNumericUserIdPattern = re.compile(r"userID\s*:\s*'(\d+)\',", re.MULTILINE | re.DOTALL)
	currentThemePattern = re.compile(r"currentTheme\s*:\s*'(\d+)',", re.MULTILINE | re.DOTALL)
	weeblyNumericUserIdLocationPattern = re.compile(r"userIDLocation\s*:\s*'([^']+)',", re.MULTILINE | re.DOTALL)
	siteVersionPattern = re.compile(r"siteVersion\s*:\s*'(\d+)',", re.MULTILINE | re.DOTALL)
	tempUserPattern = re.compile(r"tempUser\s*:\s*'(\d+)'", re.MULTILINE | re.DOTALL)
	friendRequestsPattern = re.compile(r"friendRequests\s*:\s*'([^']+)',", re.MULTILINE | re.DOTALL)
	adsenseIDPattern = re.compile(r"adsenseID\s*:\s*'([^']+)',", re.MULTILINE | re.DOTALL)
	customDomainPattern = re.compile(r"customDomain\s*:\s*'([^']+)',", re.MULTILINE | re.DOTALL)
	hideTitlePattern = re.compile(r"hideTitle\s*:\s*(\d+)\s*,", re.MULTILINE | re.DOTALL)
	currentHeaderPattern = re.compile(r"currentHeader\s*:\s*'([^']+)',", re.MULTILINE | re.DOTALL)
	pageidPattern = re.compile(r"pageid\s*:\s*(\[[^\]]+\]),\s+", re.MULTILINE | re.DOTALL)
	userEventsPattern = re.compile(r"userEvents\s*:\s*'(\{[^\{]+\})'", re.MULTILINE | re.DOTALL)
	updatedThemePattern = re.compile(r"updatedTheme\s*:\s*'(\d+)'", re.MULTILINE | re.DOTALL)
	requestTokenPattern = re.compile(r"Weebly.RequestToken\s*=\s*'([^']+)';", re.MULTILINE | re.DOTALL)
	weeblySiteIdSearch = weeblySiteIdPattern.search(initMainContent)
	if weeblySiteIdSearch:
	    self.weeblySiteId = weeblySiteIdSearch.groups()[0]
	# From this point onwards, all requests to weebly.com for the website under process should be sent with a header named 'Weebly-Site-ID'.
	# Additionally, another header named 'x-ajax-request-id' will also be sent with every request to weebly.com
	self.httpHeaders['Weebly-Site-ID'] = self.weeblySiteId
	self.httpHeaders['x-ajax-request-id'] = None # This needs to be populated for each request with a 16 digit timestamp value
	weeblyNumericUserIdSearch = weeblyNumericUserIdPattern.search(initMainContent)
	if weeblyNumericUserIdSearch:
	    self.weeblyNumericUserId = weeblyNumericUserIdSearch.groups()[0]
	currentThemeSearch = currentThemePattern.search(initMainContent)
	if currentThemeSearch:
	    self.currentTheme = currentThemeSearch.groups()[0]
	weeblyNumericUserIdLocationSearch = weeblyNumericUserIdLocationPattern.search(initMainContent)
	if weeblyNumericUserIdLocationSearch:
	    self.weeblyNumericUserIdLocation = weeblyNumericUserIdLocationSearch.groups()[0]
	siteVersionSearch = siteVersionPattern.search(initMainContent)
	if siteVersionSearch:
	    self.siteVersion = siteVersionSearch.groups()[0]
	tempUserSearch = tempUserPattern.search(initMainContent)
	if tempUserSearch:
	    self.tempUser = tempUserSearch.groups()[0]
	friendRequestsSearch = friendRequestsPattern.search(initMainContent)
	if friendRequestsSearch:
	    self.friendRequests = friendRequestsSearch.groups()[0]
	adsenseIDSearch = adsenseIDPattern.search(initMainContent)
	if adsenseIDSearch:
	    self.adsenseID = adsenseIDSearch.groups()[0]
	customDomainSearch = customDomainPattern.search(initMainContent)
	if customDomainSearch:
	    self.customDomain = customDomainSearch.groups()[0]
	hideTitleSearch = hideTitlePattern.search(initMainContent)
	if hideTitleSearch:
	    self.hideTitle = hideTitleSearch.groups()[0]
	currentHeaderSearch = currentHeaderPattern.search(initMainContent)
	if currentHeaderSearch:
	    self.currentHeader = currentHeaderSearch.groups()[0]
	pageidSearch = pageidPattern.search(initMainContent)
	if pageidSearch:
	    self.pageid = pageidSearch.groups()[0]
	userEventsSearch = userEventsPattern.search(initMainContent)
	if userEventsSearch:
	    self.userEvents = userEventsSearch.groups()[0]
	updatedThemeSearch = updatedThemePattern.search(initMainContent)
	if updatedThemeSearch:
	    self.updatedTheme = updatedThemeSearch.groups()[0]
	requestTokenSearch = requestTokenPattern.search(self.currentPageContent)
	if requestTokenSearch:
	    self.requestToken = requestTokenSearch.groups()[0]
	if logger is not None:
	    logger.write("Populated attribute values...\n")
	return(self)
	

    def createWebsiteStub(self, logger=None):
	if not self.siteTitle or not self.domainName:
	    print "Can't create website due to insufficient information\n"
	    if logger is not None:
		logger.write("Can't create website due to insufficient information\n")
	    return(0) # Return code '0' for insufficient info.
	self.httpHeaders['Referer'] = self.requestUrl
	self.requestUrl = "http://www.weebly.com/weebly/getElements.php"
	self.httpHeaders['Content-type'] = "application/x-www-form-urlencoded; charset=UTF-8"
	self.httpHeaders['X-Requested-With'] = "XMLHttpRequest"
	self.httpHeaders['X-Prototype-Version'] = "1.7"
	postDataDict = {'pos' : 'sitetitle', 'newtitle' : self.siteTitle, 'site_type' : self.siteType, 'site_category' : self.siteCategory, 'cookie' : self.httpHeaders['Cookie']}
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders) # Request for website title
	if logger is not None:
	    logger.write("Trying to set basic website info - site type: '%s', site category: '%s', site title: '%s'\n"%(self.siteType, self.siteCategory, self.siteTitle))
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	except:
	    print "Failed to create website at step #1: Error - %s\n"%(sys.exc_info()[1].__str__())
	    if logger is not None:
		logger.write("Failed to create website at step #1: Error - %s\n"%(sys.exc_info()[1].__str__()))
	    return(-1)
	if logger is not None:
	    logger.write("Set website title to '%s'\n"%self.siteTitle)
	postDataDict = {'pos' : 'checkdomain', 'domainname' : self.domainName, 'domaintld' : self.domainTLD, 'cookie' : self.httpHeaders['Cookie']} # re-initialized postDataDict for use in request for domain name
	postData = urllib.urlencode(postDataDict)
	self.httpHeaders['Content-Length'] = postData.__len__()
	self.pageRequest = urllib2.Request(self.requestUrl, postData, self.httpHeaders) # Request for domain name
	if logger is not None:
	    logger.write("Trying to set website domain info - Domain Name: '%s', Domain TLD: '%s'\n"%(self.domainName, self.domainTLD))
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	except:
	    print "Failed to create website at step #2: Error - %s\n"%(sys.exc_info()[1].__str__())
	    if logger is not None:
		logger.write("Failed to create website at step #2: Error - %s\n"%(sys.exc_info()[1].__str__()))
	    return(-2)
	if logger is not None:
	    logger.write("Successfully set domain info: '%s'\n"%(self.domainName + "." + self.domainTLD))
	# Now, based on user's preferences, create subdomain or a domain on weebly or link an existing domain to this weebly account.
	if self.domainType.lower() == "subdomain":
	    if logger is not None:
		logger.write("Trying to create sub-domain under Weebly...\n")
	    self._createSubDomain()
	    self._populateWebsiteAttributes(logger)
	elif self.domainType.lower() == "newdomain":
	    if logger is not None:
		logger.write("Trying to create new domain...\n")
	    self._createNewDomain()
	    self._populateWebsiteAttributes(logger)
	elif self.domainType.lower() == "existingdomain":
	    if logger is not None:
		logger.write("Trying to link an existing domain...\n")
	    self._linkExistingDomain()
	    self._populateWebsiteAttributes(logger)
	else:
	    print "Unhandled domain type in step #3. \n"
	    return(-3)


def runApp(tid, logPath, logFile, regDataQueueMutex):
    while webbot.RegistrationDataQueue.__len__() > 0:
	regDataQueueMutex.acquire()
	logName = logPath + os.path.sep + logFile + "_%s.log"%(tid.__str__())
	if not os.path.exists(logName):
	    logger = Logger(logName)
	else:
	    logger = Logger(logName, "a+")
	print "Thread ID: %s creating bot object..."%tid.__str__()
	logger.write("Thread ID: %s creating bot object...\n"%tid.__str__())
    	bot = webbot("http://www.weebly.com/")
	regDataQueueMutex.release()
    	if bot is None:
	    print "Skipping subsequent processes ... \n"
	    logger.write("Skipping subsequent processes ... \n\n")
	    logger.close()
	    continue
	else:
    	    bot.postInitializationProcess()
    	    logger.write("Created bot object for operating on Weebly.com\n")
    	    content = bot.createAccount(logger)
    	if not content:
	    print "Could not create account on weebly... Skipping subsequent processes\n"
	    logger.write("Could not create account on weebly... Skipping subsequent processes\n")
	    logger.close()
	    continue
	else:
    	    bot.createWebsiteStub(logger)
	    tabsDict = bot.getAvailableTabs(logger)
	    #print bot.__repr__
	    # Now navigate to all tabs...
	    for tab in tabsDict.keys():
	    	bot.openTabPage(tab, logger)
	    #bot.getFavouriteThemes(logger)
	    bot.getAllAvailableThemes(logger)
	    bot.setTheme(logger)
	    bot.addNewPage("About Us", logger)
	    bot.addNewPage("Contact Us", logger)
	    bot.addNewPage("Our Team", logger)
	logger.close()
    webbot.exitMutexes[tid].acquire()


if  __name__ == "__main__":
    cfgFile = "./config/app.cfg"
    logPath = None
    logFile = None
    cfgParser = ConfigParser()
    cfgParser.read(cfgFile)
    maxThreads = cfgParser.get("Threads", "maxcount")
    regDataFile = cfgParser.get("Data", "registrationDataFile")
    logPath = cfgParser.get("Logging", "logpath")
    logFile = cfgParser.get("Logging", "logfile")
    if not logPath or re.compile(r"^\s*$").search(logPath):
	logPath = "./logs"
    if not logFile or re.compile(r"^\s*$").search(logFile):
	logFile = "autoweebly"
    if re.compile("^\./").search(regDataFile):
	regDataFile = os.getcwd() + os.path.sep + regDataFile[2:].__str__()
    webbot.buildRegistrationQueue(regDataFile)
    regDataQueueMutex = thread.allocate_lock()
    for thr in range(int(maxThreads)):
	webbot.exitMutexes.append(thread.allocate_lock())
       	thread.start_new(runApp, (thr, logPath, logFile, regDataQueueMutex))
    for mutex in webbot.exitMutexes:
	while not mutex.locked( ): pass
    	




