# IMPORTS ---------------------------------------------------------
import urllib
import urllib2
import re
import os.path
import datetime
# init  ------------------------------------------------------
url = "TBD"
# FUNCIONES ---------------------------------------------------
def writeFile(fileX, content): # 
    objFile = open(fileX, "w")
    objFile.writelines(content)
    objFile.close()
    print " . " + fileX + " ... OK"
    

#MAIN ---------------------------------------------------------
print "PhishBuster @ CSIRTBanelco\n||||||||||||||||||||||||||\n"
while len(url) > 2:
    # PARAMETROS -----------------------------------------------------------
    rootFolder = 'C:\\csiwebtool\\' 
    bruteForceResult = ""
    usersFound = ""
    projectDirectory = "TBD"
    projectName = "TBD"
    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    values = {'username' : 'admin','pass' : 'marito'}
    headers = {'User-Agent' : user_agent }
    data = urllib.urlencode(values)
    dumpFileLocation =  'empty'
    parsedFileLocation = 'empty'
    linksFileLocation = 'empty'
    bruteFileLocation = 'no results'
    now = datetime.datetime.now()
    y = now.year
    m = now.month
    d = now.day
    print ">> Tipear la url completa:"
    url=raw_input ()
    if url == "bye":
        print "_______________ \r Bye!"
        break
    if len(url) > 5:   
        if  url[:4] != 'http':
            url = 'http://' + url
        projectName = url.replace('http://', '')
        projectName = projectName.replace('/', '-')
        projectDirectory = rootFolder + projectName
        print ">> Creando directorios"
        if not os.path.exists(rootFolder):
            os.makedirs(rootFolder)
        if not os.path.exists(projectDirectory):
            os.makedirs(projectDirectory)
        print " . Directorios ... OK"
        print ">> Analizando URL"
        dumpFileLocation =  projectDirectory + '\\' + projectName  + '.html'
        parsedFileLocation = projectDirectory + '\\' + projectName + '.txt'
        linksFileLocation = projectDirectory + '\\' + projectName + '.links'
        bruteFileLocation = projectDirectory + '\\' + projectName + '.brute'
        usersFileLocation = projectDirectory + '\\' + projectName + '.users'
        ps = projectDirectory + '\\' + projectName + '.parsed'
        try:
            req = urllib2.Request(url)
            response = urllib2.urlopen(req)
            the_page = response.read()
        except:
            print " . Error al leer el sitio:" + url
            the_page = "@Error"
        if the_page != "@Error":
            #---------------------------------------------------------------------
            writeFile(dumpFileLocation, the_page)
            #---------------------------------------------------------------------
            urls = re.findall(r'href=[\'"]?([^\'" >]+)', the_page)
            writeFile(linksFileLocation, '\n '.join(list(set(urls))))
            #---------------------------------------------------------------------
            with open("shells.txt") as infile:
                for line in infile:
                    if line[:1] != '#':
                        req = urllib2.Request(url + "/" + line)
                        req.add_header('Referer', 'http://www.python.org/')
                        try:
                            response = urllib2.urlopen(req, timeout = 10)
                            the_page = response.read()
                            bruteForceResult += line # except urllib2.HTTPError.code == 404:
                        except urllib2.URLError, e:
                            bruteForceResult = bruteForceResult #e.reason + " " + line  
            writeFile(bruteFileLocation, bruteForceResult)
            #---------------------------------------------------------------------
            with open("usernames.txt") as infile:
                for line in infile:
                    if line[:1] != '#':
                        req = urllib2.Request(url + "/~" + line)
                        req.add_header('Referer', 'http://www.python.org/')
                        try:
                            response = urllib2.urlopen(req, timeout = 10)
                            the_page = response.read()
                            usersFound += line # except urllib2.HTTPError.code == 404:
                        except urllib2.URLError, e:
                            usersFound = usersFound #e.reason + " " + line  
            writeFile(usersFileLocation, usersFound)
            #---------------------------------------------------------------------
        print ">> Analisis finalizado.\n"
        print ">> Desea analizar uan nueva URL?(y/n)"
        option=raw_input ()
        if option == "y":
            url="next"
        else:
            print "_______________ \r Bye!"
            break
            
        #os.system("start " + dumpFileLocation);