#!/usr/bin/env python2.6 -tt
# coding: utf-8

import sys
import re
import urllib2
import urlparse
import threading
import sqlite3 as sqlite
import time
from BeautifulSoup import BeautifulSoup
# Try to import psyco for JIT compilation
#try:
#    import psyco
#    psyco.full()
#except ImportError:
#    print "Continuing without psyco JIT compilation!"

"""
The program should take arguments
1) database file name
2) start url
3) crawl depth
4) verbose (optional)
Start out by checking to see if the args are there and
set them to their variables
"""
if len(sys.argv) < 4:
    sys.exit("Not enough arguments!")
else:
    dbname = sys.argv[1]
    starturl = sys.argv[2]
    crawldepth = int(sys.argv[3])
if len(sys.argv) == 5:
    if (sys.argv[4].upper() == "TRUE"):
        verbose = True
    else:
        verbose = False
else:
    verbose = False
if verbose:
    print "Executing with parameters:",dbname,starturl,crawldepth

# urlparse the start url
surlparsed = urlparse.urlparse(starturl)

# Connect to the db and create the tables if they don't already exist
connection = sqlite.connect(dbname)
connection.text_factory = sqlite.OptimizedUnicode
cursor = connection.cursor()

# crawl_index: holds all the information of the urls that have been crawled
cursor.execute('CREATE TABLE IF NOT EXISTS crawl_index (crawlid INTEGER, parentid INTEGER, url VARCHAR(256), title VARCHAR(256), keywords VARCHAR(256) )')

# queue: this should be obvious
cursor.execute('CREATE TABLE IF NOT EXISTS queue (id INTEGER PRIMARY KEY, parent INTEGER, depth INTEGER, url VARCHAR(256))')

# Mostly in place for a future application to watch the crawl interactively.
cursor.execute('CREATE TABLE IF NOT EXISTS status ( s INTEGER, t TEXT )')
connection.commit()

# insert starting url into queue
cursor.execute("INSERT INTO queue VALUES ((?), (?), (?), (?))", (None, 0, 0, starturl))
# status: Contains a record of when crawling was started and stopped. 
# set crawling status and stick starting url into the queue
cursor.execute("INSERT INTO status VALUES ((?), datetime('now'))",(1,))
connection.commit()

# Compile keyword and link regex expressions
keywordregex = re.compile('<meta\sname=["\']keywords["\']\scontent=["\'](.*?)["\']\s/>')
linkregex = re.compile('<a.*\shref=[\'"](.*?)[\'"].*?>')
crawled = []

class threader ( threading.Thread ):
    """
    run()
    Args:
        none
    the run() method contains the main loop of the program. Each iteration takes the url
    at the top of the queue and starts the crawl of it.
    """
    def run(self):
        while 1:
            try:
                # Get the first item from the queue
                cursor.execute("SELECT * FROM queue LIMIT 1")
                crawling = cursor.fetchone()
                if verbose:
                    print "Read from queue:",crawling
                # Remove the item from the queue
                cursor.execute("DELETE FROM queue WHERE id = (?)", (crawling[0], ))
                connection.commit()
                if verbose:
                    print "Crawling: " + crawling[3]
            except KeyError:
                raise StopIteration
            except:
                pass

            # if theres nothing in the que, then set the status to done and exit
            if crawling == None:
                cursor.execute("INSERT INTO status VALUES ((?), datetime('now'))", (0,))
                connection.commit()
                sys.exit("Done!")

            # Crawl the link
            self.crawl(crawling)
            time.sleep(1)

    """
    crawl()
    Args:
        crawling: this should be a url

    crawl() opens the page at the "crawling" url, parses it and puts it into the database.
    It looks for the page title, keywords, and links.
    """
    def crawl(self, crawling):
        def to_unicode_or_burst(obj,encoding='utf-8'):
            if isinstance(obj,basestring):
                if not isinstance(obj, unicode):
                    try:
                        obj = unicode(obj,encoding)
                    except:
                        if verbose: 
                            print obj
                        pass
            return obj

        # crawler id
        cid = crawling[0]
        # parent id. 0 if start url
        pid = crawling[1]
        # current depth
        curdepth = crawling[2]
        # crawling urL
        curl = crawling[3]
        # Split the link into its sections
        url = urlparse.urlparse(curl)

#        try:
#            # Have our robot parser grab the robots.txt file and read it
#            if verbose:
#                print 'Reading for respect: http://'+url[1]+'/robots.txt'
#            self.rp.set_url('http://' + url[1] + '/robots.txt')
#            self.rp.read()
#            # If we're not allowed to open a url, return the function to skip it
#            if not self.rp.can_fetch('[POLITE BOT] Dizorg.net Testing www bot|spider|crawler', curl):
#                if verbose:
#		    print curl + " WTF!? not allowed by robots.txt?"
#		    print self.rp.__dir__
#                return
#	    else:
#		print curl + " is allowed to be crawled"
#        except:
#            pass

        try:
            # Add the link to the already crawled list
            crawled.append(curl)
        except MemoryError:
            # If the crawled array is too big, deleted it and start over
            del crawled[:]

        # Connect and do all stuff needed:
        #try:
        print "Creating the request to:",curl
        # Create a Request object
        request = urllib2.Request(curl)
        # Add user-agent header to the request
        request.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.7.6) Gecko/20050405 Epiphany/1.6.1 (Ubuntu) (Ubuntu package 1.0.2)")
        # Build the url opener, open the link and read it into msg
        opener = urllib2.build_opener()
        soup = BeautifulSoup(opener.open(request).read())
        to_unicode_or_burst(soup)
        # Beautiful Soup Parsing:
        title = soup('title')[0].string.encode('utf-8')
        print "TITLE:",title
        # Meta-Keywords:
        metaKeywordTag=soup.find('meta',{'name': 'keywords' })
        if metaKeywordTag:
            print "KEYWORDS:",metaKeywordTag['content'],curl
            keywordlist=metaKeywordTag['content'].encode('utf-8')
        else:
            keywordlist=''
        # Soup links
        aTags = soup.findAll('a')
        links = []
        for href in aTags:
            try:
                #links.append(href['href']) if not href['href'].find('javascript')
                link = href['href']
                links.append(link))
            except:
                pass

        # queue up the links
        self.queue_links(url, links, cid, curdepth)

        try:
            # Put now crawled link into the db
            if verbose:
                print "Trying to insert:",cid,pid,curl,title,keywordlist
            cursor.execute("INSERT INTO crawl_index VALUES( (?), (?), (?), (?), (?) )", (cid, pid, curl, title, keywordlist))
            connection.commit()
        except:
            if verbose: print "Insert FAILED? WTF?!?!?!"
            pass

    def queue_links(self, url, links, cid, curdepth):
        if curdepth < crawldepth:
            # Read the links and insert them into the queue
            for link in links:
                cursor.execute("SELECT url FROM queue WHERE url=?", [link])
                for row in cursor:
                    if row[0].encode('utf-8') == url:
                        continue
                if link.startswith('/'):
                    link = 'http://' + url[1] + link
                elif link.startswith('#'):
                    continue
                elif not link.startswith('http'):
                    link = urlparse.urljoin(url.geturl(),link)
                if link.decode('utf-8') not in crawled:
                    try:
                        cursor.execute("INSERT INTO queue VALUES ( (?), (?), (?), (?) )", (None, cid, curdepth+1, link))
                        connection.commit()
                    except:
                        continue
        else:
            pass

if __name__ == '__main__':
    # Run main loop
    threader().run()
