## {{{ http://code.activestate.com/recipes/576551/ (r2)

###############################################################
#
#  This web crawler was taken from the above link
#  it was then modified so that at each URL the 
#  crawler visits we scan the HTML, JScript, and URL
#  information looking for malicious code or generally
#  speaking signs of a cross-site scripting attack.
#
#  This code is intended for Alex Wellock and Nick Anderson's
#  UWYO COSC5560 AI Final project.
#
#  To run this code, you must have slimit and beautiful soup4
#  installed.  The easiest way to do this is with python's 
#  pip installer package. To install pip, run the following
#
#    sudo apt-get install python-pip
#    sudo pip install --upgrade pip
#
#  OR
#
#    sudo apt-get install python-setuptools
#    sudo easy_install pip    
#
#  Then to install slimit, run the following from the command line
#    
#    sudo pip install slimit
#    sudo pip install beautifulsoup4
#  
#
###############################################################

#!/usr/bin/env python

"""Web Crawler/Spider

This module implements a web crawler. This is very _basic_ only
and needs to be extended to do anything usefull with the
traversed pages.
"""

import re
import sys
import time
import math
import urllib2
import urlparse
import optparse
import commands
import subprocess as sub
from cgi import escape
from traceback import format_exc
from Queue import Queue, Empty as QueueEmpty
from slimit.parser import Parser
from slimit.visitors import nodevisitor
from slimit import ast
from bs4 import BeautifulSoup

# Original Author Information
__version__ = "0.2"
__copyright__ = "CopyRight (C) 2008-2011 by James Mills"
__license__ = "MIT"
__author__ = "James Mills"
__author_email__ = "James Mills, James dot Mills st dotred dot com dot au"

USAGE = "%prog [options] <url>"
VERSION = "%prog v" + __version__
AGENT = "%s/%s" % (__name__, __version__)

# Storage Container for Potentially Malicious webpages
MAL_SITES = []

class Crawler(object):

    def __init__(self, root, depth, locked=True):
        self.root = root
        self.depth = depth
        self.locked = locked
        self.host = urlparse.urlparse(root)[1]
        self.urls = []
        self.links = 0
        self.followed = 0

    def crawl(self):
        page = Fetcher(self.root)
        page.fetch()
        q = Queue()
        for url in page.urls:
            q.put(url)
        followed = [self.root]

        n = 0

        while True:
            try:
                url = q.get()
            except QueueEmpty:
                break

            n += 1

            if url not in followed:
                try:
                    host = urlparse.urlparse(url)[1]
                    if self.locked and re.match(".*%s" % self.host, host):
                        followed.append(url)
                        self.followed += 1
                        page = Fetcher(url)
                        page.fetch()
                        for i, url in enumerate(page):
                            if url not in self.urls:
                                self.links += 1
                                q.put(url)
                                self.urls.append(url)
                        if n >= self.depth and self.depth >= 0:
                            break
                except Exception, e:
                    print("ERROR: Can't process url '%s' (%s)" % (url, e))
                    print(format_exc())

class Fetcher(object):

    def __init__(self, url):
        self.url = url
        self.urls = []

    def __getitem__(self, x):
        return self.urls[x]

    def _addHeaders(self, request):
        request.add_header("User-Agent", AGENT)

    def open(self):
        url = self.url
        try:
            request = urllib2.Request(url)
            handle = urllib2.build_opener()
        except IOError:
            return None
        return (request, handle)

    def fetch(self):
        request, handle = self.open()
        self._addHeaders(request)
        if handle:
            try:
                content = unicode(handle.open(request).read(), "utf-8",
                        errors="replace")
                soup = BeautifulSoup(content)
                #Here is where we make our call to the scanner
                mal_scan(soup, soup.findAll('script'), self.url)
                tags = soup('a')
            except urllib2.HTTPError, error:
                if error.code == 404:
                    print >> sys.stderr, "ERROR: %s -> %s" % (error, error.url)
                else:
                    print >> sys.stderr, "ERROR: %s" % error
                tags = []
            except urllib2.URLError, error:
                print >> sys.stderr, "ERROR: %s" % error
                tags = []
            for tag in tags:
                href = tag.get("href")
                if href is not None:
                    url = urlparse.urljoin(self.url, escape(href))
                    if url not in self:
                        self.urls.append(url)

def getLinks(url):
    page = Fetcher(url)
    page.fetch()
    for i, url in enumerate(page):
        print "%d. %s" % (i, url)

def parse_options():
    """parse_options() -> opts, args

    Parse any command-line options given returning both
    the parsed options and arguments.
    """

    parser = optparse.OptionParser(usage=USAGE, version=VERSION)
    parser.add_option("-q", "--quiet", action="store_true", default=False, dest="quiet", help="Enable quiet mode")
    parser.add_option("-l", "--links", action="store_true", default=False, dest="links", help="Get links for specified url only")
    parser.add_option("-d", "--depth", action="store", type="int", default=30, dest="depth", help="Maximum depth to traverse")

    opts, args = parser.parse_args()

    if len(args) < 1:
        parser.print_help()
        raise SystemExit, 1

    return opts, args




# Begin modifications made by A. Wellock and N. Anderson

#
#  Insert any new modules here:
#  Insert any new modules here:
#



# Note to Alex - I found 37 iframe objects on my google+ page.  Maybe we should have
# a function checking if a page has more than 45 iframes? or 50?

# searched for just the word 'hidden' in g+, it returned 109 results...
# so maybe we want to look for something like.... 150? or 200? or maybe not use this feature?

# searched for '<script' for script tags.  Found 131.  Again maybe 150 or 200?

def embed_obj_count(domsoup):
    count = 0
    for s in domsoup.findAll('object'):
        count += 1
    for s in domsoup.findAll('embed'):
        count += 1
    if count >= 25:
        return 1
    return 0

def percent_script(domsoup):
    domLen = len(domsoup.prettify())
    scriptLen = 0.0
    #print ("Dom length is: %d " % domLen)
    for s in domsoup.findAll('script'):
        #print(s.prettify())
        scriptLen += len(s.prettify())
    #print ("the scripLen is: %d " % scriptLen)
    p = scriptLen / domLen
    #print(p)
    if p >= 0.50:
        return 1
    return 0 

def i_frame_count(domsoup):
    count = 0
    for iframe in domsoup("iframe"):
        count = count+1
        #print ("the cout is now: %d" %(count))
    if count > 45:
        return 1
    else:
        return 0

def script_tag_count(domsoup):
    count = 0
    for script in domsoup("script"):
        count = count+1
    #print ("the count is now: %d" %(count))
    if count > 25:
        return 1
    else:
        return 0

def hidden_count(domsoup):
    count = 0
    hidden = domsoup.findAll(type="hidden")
    for hidden in domsoup(type="hidden"):
        count = count+1
        #print ("the cout of hidden is now: %d" %(count))
    for hidden in domsoup(visibility="hidden"):
        count = count+1
        #print ("the cout of hidden is now: %d" %(count))
    #print hidden
    #print ("%d hidden elements " % count)
    
    if count > 75:
        return 1
    else:
        return 0

def count_SetTimeOut_SetInterval(tree):
    count = 0
    for node in nodevisitor.visit(tree):
        if isinstance(node, ast.Identifier) and (node.value == 'setTimeout' or node.value == 'setInterval'):
            count += 1
    if count >= 15:
        return 1
    return 0

# Counts the number of eval uses
def jscript_eval_count(tree):
    count = 0
    for node in nodevisitor.visit(tree):
        if isinstance(node, ast.Identifier) and node.value == 'eval':
            count += 1
    #print("Number of eval function calls: %d " % count)
    if count >= 5:
        return 2
    return 0


# This function checks to see if the URL has an IP address in it.
def url_has_ip(url):
    # regex obtained from http://stackoverflow.com/questions/5287381/regular-expression-for-ipaddress-and-mac-address
    val = re.search(r'((2[0-5]|1[0-9]|[0-9])?[0-9]\.){3}((2[0-5]|1[0-9]|[0-9])?[0-9])', url, re.I)
    if val is not None:
        #print("URL had IP")
        return 3
    return 0


# Checks if the URL has an explicit port number
def url_has_port(url):
    val = re.search(r':[0-9]+', url)
    if val is not None:
        return 3
    return 0

# Checks the IP address of the website as lookuped up by google, and as is
# listed in local DNS listings.
def ip_addr_check(url):
    if(url[:4] == 'http'):
        url = url[7:]
    elif(url[:5] == 'https'):
        url = url[8:]
    cmd1 = ["dig", "@8.8.8.8", url, "+short"]
    cmd2 = ["dig", url, "+short"]
    out1, err1 = sub.Popen(cmd1, stdin = sub.PIPE, stdout = sub.PIPE, stderr = sub.PIPE).communicate()
    out2, err2 = sub.Popen(cmd2, stdin = sub.PIPE, stdout = sub.PIPE, stderr = sub.PIPE).communicate()
    if(out1 == out2):
        return 3
    return 0




# This function is the list of malware scans which get called.
# Run through the list of modules and store any links which may
# potentially be malicious
def mal_scan(domsoup, scriptsoup, url):
    score = 0
    parser = Parser()
    #print('\n\n')

    #print("Printing HTML")
    #print('\n')
    #print(domsoup.prettify())
    #print('\n\n')
    #print("Printing URL which will be used for analysis")
    #print(url)
    #print('\n\n')

    # ** TO DO **
    #
    # This is not yet printing all script.  In particular, if you get javascript of the
    # form <script **lotsa of javascript code></script> you'll get an empty string obviously.
    #
    # Nevermind.. i think we'll just omit many of these because they are largely just
    # loading scripts from a library.
    #

    for s in scriptsoup:
        # Any functions involving java script should probably be called in here.
        if s is not "":
            #print("Printing Javascript")
            try:
                tree = parser.parse(s.string)
                score += jscript_eval_count(tree)
                score += shell_detect(tree)
                score += count_SetTimeOut_SetInterval(tree)
            except:
                score += 1
                #print("There was an error parsing the JScript")
                break
            #print(tree.to_ecma())
            #print('\n\n')

    # Add any new function calls here
    score += url_has_ip(url)
    score += url_has_port(url)
    score += ip_addr_check(url)
    score += percent_script(domsoup)
    score += hidden_count(domsoup)
    score += script_tag_count(domsoup)
    score += i_frame_count(domsoup)
    score += embed_obj_count(domsoup)


    # Check the score of the site.  If it's above X then it's potentially malicious
    if score >= 6:
        global MAL_SITES
        MAL_SITES.append(url)
    print("Site had score of %d" % score)
    # At the end of this function, we return the score of the site.  If the score
    # is high enough, we put the sites URL into a global array.


def main():
    opts, args = parse_options()
    url = args[0]

    if opts.links:
        getLinks(url)
        raise SystemExit, 0

    depth = opts.depth

    sTime = time.time()

    print("Crawling %s (Max Depth: %d)" % (url, depth))
    crawler = Crawler(url, depth)
    crawler.crawl()
    #print "\n".join(crawler.urls)

    eTime = time.time()
    tTime = eTime - sTime

    #print("Found:    %d" % crawler.links)
    #print("Followed: %d" % crawler.followed)
    #print("Stats:    (%d/s after %0.2fs)" % (int(math.ceil(float(crawler.links) / tTime)), tTime))
    #print("Malscan was visited %d times " % SITE_NUM)
    if(len(MAL_SITES) >= 1):
        print("Potentially malicious sites found: \n")
        for site in MAL_SITES:
            print(site)
    else:
        print("All sites crawled appear to be benign")

if __name__ == "__main__":
    main()
## end of http://code.activestate.com/recipes/576551/ }}}

