#!/usr/bin/env python
# VERSION 1.1
# Sharecrawling and sharecrawler is a word used to describe this software's function.
# Sharecrawling means using a webcrawling computer program to find (discover) hyperlinks, then
# share those discovered hyperlinks with others via a webpage hosted on the same machine
# on which the sharecrawling/webcrawling software is being run, a.k.a. the host machine.
# Sharecrawling/sharecrawler is a term associated with the software, activity, functions, 
# and web-based sharing of info seen in this very instance you are reading right now.
# 
# Copyright protected by Lance Miller January 2010, aka this.is.lance.miller@gmail.com
# This software is protected by the Affero General Public License (GNU AGPL)
# The GNU AGPL protections means the software source code, and any derivative versions thereof, are ALWAYS provided over the same internet connection that the software is running on. To run the software means one is providing the entire source code at the same time.
# http://www.gnu.org/licenses/agpl-3.0-standalone.html
# example usage
# sharecrawler.py http://www.slashdot.org
# sharecrawler.py 216.34.181.45
#
# see USER MODIFIED space below. Note the output file sharecrawler.html is either going to be
# in the same directory as this program, or in your wwwdir once you set the wwwdir variable
#

global license
license = "<a href=\"#top\">back to top</a>\n<div id=\"footer\">\n"
license = license + "Copyright 2010 Lance Miller, aka this.is.lance.miller@gmail.com \n<br />"
license = license + "\nThis software is protected by the Affero General Public License (GNU AGPL)\n<br />"
license = license + "\n<p>The GNU AGPL protections means the software source code, and any derivative versions thereof, are ALWAYS provided over the same internet connection that the software is running on. To run the software means one is providing the entire source code at the same time.</p>"
license = license + "\n<br /><a href=\"http://www.gnu.org/licenses/agpl-3.0-standalone.html\">http://www.gnu.org/licenses/agpl-3.0-standalone.html</a></br ></div>\n"

import os
import sys
import string
import urllib
import urllib2
import sys
import sgmllib
import traceback
import time
import datetime


 
class MyParser(sgmllib.SGMLParser):
    def parse(self, s):
        "Parse the given string 's'."
        self.feed(s)
        self.close()

    def __init__(self, verbose=0):
        "Initialise an object, passing 'verbose' to the superclass."
        sgmllib.SGMLParser.__init__(self, verbose)
    self.todo = []
    # ############################################################
        # USER MODIFIED SPACE. CHANGE THESE VARIABLES TO WHAT YOU WANT
        # LEAVE PERSONAL INFO BLANK IF YOU WANT           
        wwwdir = "" # set this to localhost www root directory e.g. /var/www/ 
        email = "root@example.com" 
        address = "Maybe Put A Real Address Here If in Ad Hoc Local Resilient Mode"
        phone = "000-123-4567"
        self.DarkMicrobrew = True # set to False for Lite version. Lite version does not serve webpage.
    self.user_agent = 'shiny.sharecrawler source at http://code.google.com/p/shiny-sharecrawler/' 
        # BELOW ARE SOME INTERESTING SEEDS
        self.todo.append('http://fsboa.com/')
        self.todo.append('http://americansagainsthate.org/')
    self.todo.append('http://209.151.164.28/')
    self.todo.append('http://www.sjtu.edu.cn/')
        # ############################################################
    self.whileloop = 0
    self.forbiddens = ['pdf','jpg','gif','png','mov', 'wmv', 'avi']
    self.done = []
    self.errors = []
    self.domains = []
        self.sharecrawlers = []
    self.emails = {}
    self.successfuls = []
    self.suspect = []
    self.exe = []
        self.parentchild = {} 
    self.current_url = ""
    self.parent_url = ""
    self.current_domain = ""
        self.sharecrawler_filename = "sharecrawler.html"
        self.sharecrawler_html = wwwdir + self.sharecrawler_filename
        self.remote_sharecrawler_file = ""
    self.querystring = ""
    self.thepage = ""
    self.links = []
    self.header_section = ""
    self.footer_section = ""
    self.response_header = ""
    self.totop = "<center>" + spanwhite("[") + "<a href=\"#top\" title=\"top\">top</a>" + spanwhite("]") + "</center>"
    self.arrow = "&#160;" + spanorange("=&gt;") + "&#160;"
    self.ltorange = spanorange("&lt;&lt;")
    self.gtorange = spanorange("&gt;&gt;")
    self.ltwhite = spanwhite("&lt;&lt;")
    self.gtwhite = spanwhite("&lt;&lt;")
    self.contact_info = ""
    if email != "":
        self.contact_info = self.contact_info + "\n<a mail=\"" + email + "\">email: " + email + "</a><br />\n"
        if address != "":
        self.contact_info = self.contact_info + "\n<a address=\"" + address + "\">address: " + address + "</a><br />\n"
        if phone != "":
        self.contact_info = self.contact_info + "\n<a phone=\"" + phone + "\">phone: " + phone + "</a><br />\n"
    self.header_webpage()    
        self.banner() 
    self.footer()
        if self.DarkMicrobrew == True:
        cmd = "touch " + self.sharecrawler_html
            os.system(cmd)
    try:
        value = sys.argv[1]
    except:
        print "useage:"
        print sys.argv[0] + " http://example.com"
        print "or"
        print sys.argv[0] + " http://216.34.181.45"
        value = "http://localhost"
        print "using " + value 
        os.system('sleep 3')
    if string.find(value, 'http') == 0:             
        if value not in self.done:
        self.todo.append(value)

        
    def start_a(self, attributes):
        for name, value in attributes:
        value = str(value)
            if name == "href":    
        if string.find(value, 'http') == 0:
            try:
            value_stripped = domainSplit(value)
            except:
            value_stripped = ""    
            if string.find(value, '?') == 0:
            self.querystring = value.split('?')[1]
            else:
            self.querystring = ""    
            if len(self.querystring) > 254:
            self.suspect.append(value)
            elif value not in self.done and value not in self.errors and value_stripped not in self.domains and value not in self.todo and value_stripped not in self.todo:
            self.todo.append(value)        
            self.todo.append(value_stripped)
            self.links.append(value)
            
            elif string.find(value, 'mailto') == 0:
            try:
            emailvalue = value.split(':')[1]
            self.addemailpc(emailvalue, self.current_url)
            self.links.append(emailvalue)
            except:
            pass             
        
        
    def header_webpage(self):
    self.header_section = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"
        self.header_section = self.header_section + "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">"
        self.header_section = self.header_section + "\n<head>\n"    
        self.header_section = self.header_section + "<meta http-equiv=\"refresh\" content=\"360\">\n"
    self.header_section = self.header_section + "\n<title>Shiny.Sharecrawler page</title>\n"
        self.header_section = self.header_section + "\n<meta name=\"Keywords\" content=\"Python, Programming, Sharecrawling, Sharecrawler, Bot, Bots \" />\n"
        self.header_section = self.header_section + "\n<meta name=\"description\" content=\"Sharecrawler page. One user's instance of the sharecrawling infrastructure, useful for general webcrawling, and equiped with features for resilient ad hoc networking.\" />\n"
        self.header_section = self.header_section + "\n<meta http-equiv=\"pragma\" content=\"no-cache\" />\n"
    self.header_css()
    self.header_javascript()
        self.header_section = self.header_section + "\n</header>\n\n<body>\n<a name=\"top\"></a>\n\n"
    return self.header_section
    
    def header_css(self):
        self.header_section = self.header_section + "\n<style type=\"text/css\">\n"
        self.header_section = self.header_section + "\nbody { margin:0px; text-align:left; font-family: monospace;font-style: normal;font-variant: normal;font-weight: normal;font-size: normal;line-height: 100%;word-spacing: normal;text-decoration: none;text-transform: none;color: #00FF00;background: #000000; padding:5px; }\n"
        self.header_section = self.header_section + "\na:link {color:#00FF00;background-color:transparent;text-decoration:none}"
        self.header_section = self.header_section + "\na:visited {color:orange;background-color:transparent;text-decoration:none}"
        self.header_section = self.header_section + "\na:hover, a:active {color:#FFFFFF;background-color:transparent;text-transform:none;text-decoration:none;border:dotted 1px #FFFFFF;padding:1px;}"
        self.header_section = self.header_section + "\npre {padding:3px;border:dotted 1px #333333;background:#DDDDDD;color:#333333}"
        self.header_section = self.header_section + "\n#contact {padding:3px;border:dotted 1px #333333;background:#DDDDDD;color:#333333}"
        self.header_section = self.header_section + "\n#stats {padding:3px;border:dotted 1px #333333;background:#DDDDDD;color:#333333}"
    self.header_section = self.header_section + "\n#footer {padding:3px;border:dotted 1px #333333;background:#DDDDDD;color:#333333;align:center}"
        self.header_section = self.header_section + "\n.orange {color:orange;font-weight:bold;}"
    self.header_section = self.header_section + "\n.parentchild {padding:1px;border:dotted 1px #736F6E;}"
        self.header_section = self.header_section + "\n\t </style> \n\n"
    return self.header_section
    
    def header_javascript(self):
    self.header_section = self.header_section + "\n<script type=\"text/javascript\">\n\n" 
    self.header_section = self.header_section + "\n\n\t</script>\n"    
    return self.header_section
    
    def banner(self):
        self.header_section = self.header_section + "<h2>The.Shiny.<i><span class=\"orange\">Sharecrawler</span></i></h2> &#160; &#160; \n"
        self.header_section = self.header_section + "<a href=\"#sourcecode\">[ see source code ]</a>\n"
        self.header_section = self.header_section + "<a href=\"#license\">[ see general public license  ]</a>\n"
        self.header_section = self.header_section + "\n<p style=\"font-size:100%\">" 
        self.header_section = self.header_section + "\tSharecrawling and sharecrawler are words used to describe this software's function. "
    self.header_section = self.header_section + "Most <a href=\"http://en.wikipedia.org/wiki/Web_crawler\">web crawlers</a> save discovered content. I didn't want that at all, I wanted to simply discover who is linked to who, and also be able to <i>easily</i> share the discovered information." 
        self.header_section = self.header_section + "The method of sharing is via the output of an HTML page. Since the output format is the same as the input [HTML], the Shiny.Sharecrawler can read output from other Shiny.Sharecrawlers. First off, this might be simply fun, "
        self.header_section = self.header_section + "but there is a scenario in which this sharecrawling linking could be a life saver. If a calamity caused internet connectivity to be sporadic and non-global, a web of sharecrawlers could aid in creating an ad hoc network. "
        self.header_section = self.header_section + "The program is hardcoded to always check if the filename <i><b>sharecrawler.html</b></i> (it's default output filename) exists on every host it crawls. "
        self.header_section = self.header_section + "A group of users, or community, could prepare for connectivity disaster by keeping records of the IP of known hosts. Then use those IP's as seeds for the sharecrawler during a catastrophe. "
        self.header_section = self.header_section + "Of course this does nothing at the physical layer to repair networking. But if used intelligently this crawling could quickly show who is online, and by omission who is offline."
        self.header_section = self.header_section + "</p>\n<p>This software has two settings: <span class=\"orange\">Lite</span> and <span class=\"orange\">DarkMicrobrew</span>."
        self.header_section = self.header_section + "</p>\n<table><tr>\n"
    if self.contact_info != "":
        self.header_section = self.header_section + "<td align=\"left\"><div id=\"contact\">\nContact info for person running this website sharecrawler:<br/ >\n" + self.contact_info + "</div></td>"
    return self.header_section
    
    def footer(self):
    sourcefile = open(sys.argv[0], 'r')
        self.footer_section = "\n<a name=\"sourcecode\"></a><a href=\"#top\">back to top</a>\n<h4>Source code:</h4><pre>\n"
        for line in sourcefile.readlines():
        self.footer_section = self.footer_section + line.replace(">","&gt;").replace("<","&lt;").replace("\"","&quot;").replace("\'", "&#039;")
        self.footer_section = self.footer_section + "\n</pre>\n"
        sourcefile.close() 
        self.footer_section = self.footer_section + "\n<a name=\"license\"></a>" + license + "\n"
        self.footer_section = self.footer_section + "\n\n</body>\n\n</html>\n"
        return self.footer_section
                        
            
    def commandline_print(self):
    os.system('clear')
    print "len(self.done) == " + str(len(self.done))
    print "len(self.domains) == " + str(len(self.domains))
    print "len(self.emails) == " + str(len(self.emails))
    print "len(self.errors) == " + str(len(self.errors))
    print "len(self.exe) == " + str(len(self.exe))
    print "current_url: " + self.current_url 
    print "whileloop: " + str(self.whileloop)
    print print_mem()

    def webpage_print(self):
    insertme = ""
    insertme = self.header_section
    mytime = datetime.datetime.now()
    insertme = insertme + "\n<td align=\"right\"><div id=\"stats\">\nlast update: " + str(mytime) + "\n<br/ >\n"
    insertme = insertme +  "\n memory (resident) usage : " + str(rss()) + " kB\n<br />\n memory (resident + text) usage : " + str(rsz()) + " kB\n<br />\n memory (virtual) usage : " + str(vsz()) + " kB\n<br />\ncurrent_url = " + str(self.current_url) + "\n</div>\n</td>\n</tr>\n</table>\n"    
    insertme = insertme + "<a href=\"#\" title=\"done\">[done]</a>[" + str(len(self.done)) + "]"
    insertme = insertme + "<a href=\"#todo\" title=\"todo\">[todo]</a>[" + str(len(self.todo)) + "]"
    insertme = insertme + "<a href=\"#exe\" title=\"dos exe\">[dos exe]</a>[" + str(len(self.exe)) + "]<br /><br />" 
    insertme = insertme + self.webpage_parentchild()
    insertme = insertme +  "\n<a name=\"todo\"></a><h3>todo</h3>\n" 
    for i in self.todo:
        insertme = insertme + self.totop + wrapperA(i) + "\n<br />"    
    insertme = insertme +  "\n<a name=\"exe\"></a><h3>dos exe:</h3>\n"     
    for i in self.exe:
        insertme = insertme + self.totop + wrapperA(i) + "\n<br />"         
    insertme = insertme +  self.footer_section
      cmd = "echo '' > " + self.sharecrawler_html
    os.system(cmd)
    f = open(self.sharecrawler_html, 'r+', 0)
    f.write(insertme)
    f.close()
    insertme = ""
    print "wrote new html file: " + self.sharecrawler_html

    def webpage_parentchild(self):
    insertme = ""
    for parent, children in self.parentchild.iteritems():
        insertme = insertme + "\n<a name=\"" + parent + "\"></a>"
        insertme = insertme + "\n<br /><div class=\"parentchild\">" + wrapperF( children[len(children) -1] ) + wrapperA(parent) + "\n<br />\n" 
        for i,child in enumerate(children):
        if child in self.done:
            anchorjump = wrapperC(child)
        else:
            anchorjump = ""
        if string.find(child, 'http') == -1: 
            showlink = wrapperE(child)
            externallink = ""
        else:
            showlink = wrapperA(child)
            externallink = wrapperD(child)
            
        if i == 0:
            insertme = insertme + child
        elif i == 1 and i != len(children):
            insertme = insertme + "\n" + npblank(7) + "&lt;" + spanwhite('links') + "&gt;<br />\n"
            insertme = insertme + "\n" + npblank(9) + "\n" + str(i) + npblank(1) + showlink + "\n" + anchorjump + "\n" + externallink + "\n<br/>" 
        elif i > 1 and i < (len(children) - 1):
            insertme = insertme + "\n" + npblank(9) + "\n" + str(i) + npblank(1) + showlink + "\n" + anchorjump + "\n" + externallink + "\n<br/>"
                else:
            pass
        
        insertme = insertme + "\n</div>\n" + self.totop    
    return insertme
    
    def checkURL(self):
    try:
        self.current_domain = domainSplit(self.current_url)
        if self.current_domain == "http://localhost":
        self.remote_sharecrawler_file = False
            else:
                self.remote_sharecrawler_file = self.current_domain + "/" + self.sharecrawler_filename
                
    except:
        self.current_domain = ""
    if string.find(self.current_url, 'http') == 0:
        if self.current_domain not in self.domains:
        if self.current_url not in self.done:
            return True        
    else:
        return False
    
    def domainInject(self):
    try:
        self.domains.append(self.current_domain)
    except:
        pass  
    
    def addpc(self, parent, child):
    self.parentchild[parent] = child
    
    def addemailpc(self, email, url):
    self.emails[email] = url    
    
    def crawl(self):
    self.whileloop = 0
    while self.todo != []:
        self.commandline_print()
        if self.whileloop % 5 == 0:
        self.webpage_print()
        try:
        self.parent_url = self.current_url
        self.current_url = self.todo.pop()
        except:
        print "self.todo.pop() failed"    
        if self.checkURL():
        try:
            self.sendGet()
            try:
            self.links = []
            self.links.append(self.response_header)
            self.parse(self.thepage)
            self.links.append(self.parent_url)
            self.addpc(self.current_url,self.links)
            self.done.append(self.current_url)
            self.domainInject()    
            except:
            self.done.append(self.current_url)
        except:
            self.errors.append(self.current_url)    
        self.whileloop = self.whileloop + 1        
        
        
    def header(self):
    return self.header_section
    
    def sendGet(self):
        # headers = { 'User-Agent' : self.user_agent }
    # values = {'name':'somevalue'}
    # data = urllib.urlencode(values)
        try:
        # req = urllib2.Request(self.current_url, data, headers)
            req = urllib2.Request(self.remote_sharecrawler_file) # attempt for sharecrawler.html comes first
            self.sharecrawlers.append(self.remote_sharecrawler_file)
        req.add_header('User-agent', self.user_agent)
        except:
        try:
            req = urllib2.Request(self.current_url) 
            req.add_header('User-agent', self.user_agent)           
        except:
                try:
                        req = urllib2.Request(self.current_domain)
                    req.add_header('User-agent', self.user_agent)
                except:  
                return False
        try:
        response = urllib2.urlopen(req)
        except:
        try:
        req = urllib2.Request(self.current_domain) 
        response = urllib2.urlopen(req)
        except: 
        return False
    try:
        self.response_header = "<br />" + npblank(5) + "&lt;" + spanwhite("response header") + "&gt;<br />"
        for key in response.info():
        print str(key) + " => " + response.info().get(key)
        if response.info().get(key) == "application/x-ms-dos-executable":
            self.exe.append(self.current_url)
        if response.info().get(key) == "set-cookie":
            self.response_header = self.response_header + "\n<pre style=\"color:#00FF00;background:#000000;border:none;display:inline\">&#160; &#160;&#160; &#160;" + str(key) + "&#160;" + self.arrow + "&#160;" + response.info().get(key) + "</pre>\n<br />\n"
        else:
            self.response_header = self.response_header + "\n&#160; &#160;&#160; &#160;" + str(key) + "&#160;" + self.arrow + "&#160;" + response.info().get(key) + "\n<br />\n"
        self.response_header = self.response_header + "\n"    
    except: 
        pass      
        try:
        self.thepage = response.read()
        except:
        return False
        return True    
    
def mem(size="rss"):
    """Generalization; memory sizes: rss, rsz, vsz."""
    return int(os.popen('ps -p %d -o %s | tail -1' %
                        (os.getpid(), size)).read())
 
def rss():
    """Return ps -o rss (resident) memory in kB."""
    return mem("rss")
 
def rsz():
    """Return ps -o rsz (resident + text) memory in kB."""
    return mem("rsz")
 
def vsz():
    """Return ps -o vsz (virtual) memory in kB."""
    return mem("vsz")    

def print_mem():
    return "memory (resident) usage : " + str(rss()) + " kB \n memory (resident + text) usage : " + str(rsz()) + " kB \n memory (virtual) usage : " + str(vsz()) + " kB"    
       
def httpme(input):
    input = "http://" + input
    return input


def domainSplit(input):
    L = []
    try:
        input = input.split('//')[1].split('/')[0]
    except:
        try:
        input = input.split('//')[1]
        except:
        return None   
    try:
        L = input.split('.')
        TLD = str(L[len(L) - 1])
        if TLD == 'com' or TLD == 'org' or TLD == 'mil' or TLD == 'net':
        input = str(L[len(L) - 2]) + "." + str(L[len(L) - 1])
    except:
        return None
    return httpme(input)   
                    
def spanorange(input):
    input = "<span style=\"color:orange\">" + str(input) + "</span>"
    return input
    
def spanwhite(input):
    input = "<span style=\"color:#FFFFFF\">" + str(input) + "</span>"
    return input

def spanred(input):
    input = "<span style=\"color:red\">" + str(input) + "</span>"
    return input

def spanpurple(input):
    input = "<span style=\"color:purple\">" + str(input) + "</span>"
    return input

def wrapperA(input):
    input = "<a href=\"" + str(input) + "\">" + str(input) + "</a>" 
    return input

def wrapperB(input):
    input = "<a name=\"" + str(input) + "\"></a>[<a href=\"#" + str(input) + "\">anchorlink</a>]&#160;"
    return input

def wrapperC(input):
    input = "&#160;" + spanwhite('[') + "<a href=\"#" + str(input) + "\">" + spanred('goto response header') + "</a>" + spanwhite(']')
    return input

def wrapperD(input):
    input = spanwhite('[') + "<a href=\"" + input + "\">goto external site</a>" + spanwhite(']')
    return input

def wrapperE(input):
    input = spanpurple(input) + "&#160;" + spanorange('&lt;&lt email')
    return input

def wrapperF(input):
    input = "&lt;" + spanwhite("referrer") + "&gt;<br />" + npblank(1) + "<a href=\"#" + input + "\">" + input + "</a>\n<br />\n" 
    input = input + npblank(3) +"\n&lt;" + spanwhite("url") + "&gt;<br />\n" + npblank(5) 
    return input

def npblank(input):
    try:
    return "&#160;"*input
    except:
    return "&#160;"

def main():
    myparser = MyParser()
    try:
    myparser.crawl()
    except:
    print "crawl() exited or failed"
    sys.exit(1)

    
if __name__ == "__main__":
    main()

    

