#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Jazz Crawler a simple site analysis tool.
"""
__author__ = "Alessandro Martin (metallourlante@gmail.com)"

from lib import toolbar_pagerank
from lib.PageRank import PageRank
from urlparse import urlsplit
import Queue
import datetime
import httplib
import lib.robotexclusionrulesparser as robotparser
import lib.smart_http_client as client
import logging
import networkx as NX
import os
import pylab as P
import re
import sqlite3
import sys
import threading
import time
import urllib2, gzip, StringIO
from  lib.normalize import normalize
from lib.BeautifulSoup import BeautifulSoup

START_TIME = datetime.datetime.now()
THREAD_LIMIT = 20
ROOT_PAGE = "http://localhost/"
base_url = "http://localhost/"

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')

class Crawler:
    """
    Main class
    """
    
    def __init__(self, domain, robots=False, nofollow=False):
        logging.debug('Initialize')
        self.graph = NX.DiGraph()
        self.crawled = set()
        self.domain = domain
        self.jobs = Queue.Queue(0) #no item limit
        self.robots = robots
        self.nofollow = nofollow
        self.now = time.ctime
        self.db_name = urlsplit(domain).netloc + '.db'
        
        if self.robots:
            self.robot_txt = robotparser.RobotExclusionRulesParser()
            self.robot_txt.user_agent = "Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
            self.robot_txt.fetch(self.domain + 'robots.txt')
            #if self.robot_txt.is_allowed('Googlebot', url):
        
    def init_db(self):
        try:
            os.remove(self.db_name)
        except Exception:
            logging.debug('Cannot remove old database file')
                        
        con = sqlite3.connect(self.db_name)
        con.executescript('''
                        CREATE TABLE site (
                        url text,
                        title text, 
                        description text, 
                        html text, 
                        tbpr integer, 
                        last_update text);
                        
                        CREATE TABLE links (
                        url text,
                        outlink text,
                        rel text);
                        ''')
        con.commit()
        con.close()

    def start_crawling(self):
        """
        Start the crawling process putting the first URL, i.e. the domain,
        in crawling queue and creating the thread pool
        """
        logging.debug('Start crawling')
        self.jobs.put(self.domain)
        for n in xrange(THREAD_LIMIT):
            t = threading.Thread(target=self.__downloader)
            t.setDaemon(True)
            t.start()
        while threading.activeCount() > 1: 
            time.sleep(1) # Wait to finish

    def __downloader(self):
        """
        Pick an URL from the queue and download it till the queue is empty
        """
        con = sqlite3.connect(self.db_name)
        c = con.cursor()
        while True:
            try:
                url = self.jobs.get(0) # 0 means raise empty if no element 
                #A check at this point avoid duplicated URLs
                if url not in self.crawled:
                    logging.debug('Getting %s...', url) 
                    page_info = self.__get_page_info(url)
                    values = (
                              urlsplit(url).path, 
                              page_info['title'], 
                              page_info['description'],
                              page_info['html'],
                              page_info['tbpr'],
                              time.ctime(),
                              )
                    c.execute('insert into site values (?,?,?,?,?,?)', values) 
                    logging.debug('Page info saved')
                    logging.debug('Found %d links', len(page_info['links']))
                    for link in page_info['links']:
                        values = (url, normalize(link, self.domain), '')
                        c.execute('insert into links values (?,?,?)', values) 
                    con.commit()
                    logging.debug('Saved %d links for %s', len(page_info['links']), urlsplit(url).path)

                    normalized_urls = [ normalize(url, self.domain) for url in  page_info['links'].keys() ]
                    for url in set(normalized_urls):
                        if url:
                            nurl = normalize(url, self.domain)
                            #put only external links in queue
                            if nurl.startswith(self.domain):
                                if self.robots:
                                    if self.robot_txt.is_allowed('Googlebot', nurl): self.jobs.put(nurl)
                                else:
                                    self.jobs.put(nurl)
                            else:
                                logging.debug('Skip external url: %s', url)
            except Queue.Empty:
                con.close()
                logging.debug('Empty queque!')
                return

    def __get_page_info(self, url):
        """
        Get all links from a web page
        do not parse non-HTML pages
        """
        internal_links = []
        page_info = {'title':'', 'description':'', 'html':'', 'tbpr':None, 'links':[]}
        response = client.get(url)
        self.crawled.add(url)
#        page_info['tbpr'] = toolbar_pagerank.get(url)
        if not hasattr(response,'reason'):
            if response.code == 200:
                if 'html' in response.headers['content-type']:
                    if response.headers.get('content-encoding', '') == 'gzip':
                        logging.debug('Gzip content founded in %s. Unzipping...', url)
                        compressed_data = response.read()
                        compressed_stream = StringIO.StringIO(compressed_data)
                        gzipper = gzip.GzipFile(fileobj=compressed_stream)
                        page_info['html'] = gzipper.read()
                    else:
                        page_info['html'] = response.read()
                        
                    doc = BeautifulSoup(page_info['html'])
                        
                    page_info['title'] = doc.find('title').contents[0]
                    
                    metadata = [i for i in doc.findAll('meta')]
                    if len(metadata) != 0:
                        for meta in metadata:
                            try:
                                if meta['name'].lower() == 'description':
                                    page_info['description'] = meta[1][1]
                                    break
                            except KeyError:
                                    pass
                    anchors = [ tag for tag in doc.findAll('a') ]
                    for anchor in anchors:
                        try:
                            page_info['links'].append( (anchor['href'], ''))
                        except KeyError:
                            pass
                else:
                    logging.debug('Skip url %s. Not an HTML document', url)                                            
            else:
                logging.debug('Resource not available. Status code is %d', response.code)
        else:
            logging.debug('Network problem reaching %s', url)
        page_info['links'] = dict(page_info['links']) 
        return page_info
    
    def build_graph(self):
        """
        Build a graph data structure of the site
        """
        logging.debug('Building graph...')
        con = sqlite3.connect(self.db_name)
        c = con.cursor()
        rows = c.execute('SELECT url, outlink FROM links')
        for url, outlink in rows:
            outlink = normalize(outlink, self.domain)
            if outlink.startswith(self.domain):
                self.graph.add_edge(urlsplit(url).path, urlsplit(outlink).path)

    def draw_graph(self):
        """
        Create a graphical representation of data
        """
        logging.debug('Drawing graph...')
        pos=NX.spring_layout(self.graph)
#        image_name = urlsplit(self.domain).netloc + '.svg'
        NX.draw_networkx_nodes(self.graph, pos, node_size=500, nodelist=['/'], node_color='r')
        difference = filter(lambda x:x not in ['/'],self.graph.nodes())
        NX.draw_networkx_nodes(self.graph, pos, node_size=100, node_color='b', nodelist=difference)
        NX.draw_networkx_edges(self.graph, pos, width=1.0,alpha=0.5)

        P.show()

    def graph_info(self):
        """
        Output some information about the site link structure
        """
        for node in self.graph:
            out_degree = self.graph.out_degree(node)
            print node, "|", out_degree  
        print "-------Distance from root ---------"
        root_distance = NX.single_source_shortest_path_length(self.graph, '/').items() 
        for page, distance in root_distance:
            print page, ' > ', distance 
        edges = self.graph.edges()
        nodes = self.graph.nodes()
        pagerank = PageRank(nodes, edges)
        print "-------Page rank ---------"
        ranking = pagerank.ranking()
        for node, rank in ranking:
            print node + " PR: %.3f" % rank
      
    def generate_report(self):
        #load template
        #populate template
        #write HTML file
        pass
    def dump_db(self):
        con = sqlite3.connect(self.db_name)
        c = con.cursor()
        rows = c.execute('SELECT url, outlink FROM links')
        for url, outlink in rows:
            print url + "\t->\t" +  outlink
               
if __name__ == '__main__':

    domain = "http://localhost/"
    my_crawler = Crawler(domain, robots=False, nofollow=True)
    my_crawler.init_db()
    my_crawler.start_crawling()
    DURATION = datetime.datetime.now() - START_TIME
    logging.debug('%d seconds', DURATION.seconds)
    logging.debug('%d minutes', DURATION.seconds/60)
    print "Crawled links = %d" % len(my_crawler.crawled)
    my_crawler.dump_db()
    my_crawler.build_graph()
    my_crawler.graph_info()
    my_crawler.draw_graph()
