import myhttplib2
import urlparse
from BeautifulSoup import *
from time import sleep as PySleep
from random import randrange
import os
import re
import datetime
import sys
import shutil

from utils import dict_read
from utils import dict_write
from utils import list_read
from utils import list_write
from utils import open_store
from utils import match_or
from utils import match_and

# import pdb

# Relevants rfc
# http://tools.ietf.org/html/rfc4180 csv format

httpmine_config_filename = 'httpmine_config'
httpmine_initial_website = 'httpmine.initial.website'
httpmine_path = 'httpmine.path'
httpmine_data = 'httpmine.data'
httpmine_path_data = 'httpmine.path.data'
httpmine_cache = 'httpmine.cache'
httpmine_callback_path_urls = 'httpmine.callback.path.urls'
httpmine_callback_path_text = 'httpmine.callback.path.text'
httpmine_callback_crawl_iteration_top = 'httpmine.callback.crawl.iteration.top'
httpmine_callback_sleep_min = 'httpmine.callback.sleep.min'
httpmine_callback_sleep_max = 'httpmine.callback.sleep.max'

discovered_filename = 'discovered.txt'
visited_filename = 'visited.txt'
rejected_filename = 'rejected.txt'
forbidden_filename = 'forbidden.txt'
allowed_filename = 'allowed.txt'
phrases_filename = 'phrases.txt'
mails_filename = 'mails.txt'
edges_filename = 'edges-origin_target.csv'
tags_filename = 'tags.txt'


def next(sequence):
    try:
      return sequence.pop(0)
    except Exception:
      return None

def utf8_line(x):
    return (x + unichr(10)).encode('utf-8')


def clean_soup(soup):
    # get rid of javascript, noscript and css
    [[tree.extract() for tree in soup(elem)] for elem in ('script','noscript','style')]
    # get rid of doctype
    subtree = soup.findAll(text=re.compile("DOCTYPE"))
    [tree.extract() for tree in subtree]
    # get rid of comments
    comments = soup.findAll(text=lambda text:isinstance(text,Comment))
    [comment.extract() for comment in comments]
    return soup

class Bot(object):

    def __init__(self, client, callback):
        self.client = client
        self.callback = callback

    def get(self, item):
        if item == None:
           self.callback.unexpected('Bot.get(item)', 'item is None')
        else:
           print item
           try:
               cachekey, response, content = self.client.request(item, 'GET')
           except Exception, ex:
               self.callback.unexpected('Bot.get(item)', repr(ex))
           else:
               self.callback.visited(item)
               self.callback.results(item, cachekey, response, content)

    def crawl(self):
        try:
           item = self.callback.next()
           while item != None:
               self.get(item)
	       self.callback.sleep()
               item = self.callback.next()
	except Exception, ex:
            self.callback.unexpected('Bot.crawl(item)', repr(ex))
	finally:
            self.callback.save()


class BotCallback(object):

    def __init__(self, 
	    path_urls, 
	    path_text,
	    top=1,
	    min_sleep=1,
	    max_sleep=2
	    ): 
        self.tags_top_level = 100
	self.path_urls = path_urls
        self.path_text = path_text
        self.path_html = path_text + '/html'
        self.images_list = []

        if not os.path.exists(self.path_text): 
            os.makedirs(self.path_text)
        if not os.path.exists(self.path_html): 
            os.makedirs(self.path_html)

        self.forbidden = list_read(os.path.join(self.path_urls, forbidden_filename), fun=lambda x: re.compile(x))
        self.allowed = list_read(os.path.join(self.path_urls, allowed_filename), fun=lambda x: re.compile(x))
        self.visited_filename = os.path.join(self.path_urls, visited_filename)
        self.discovered_filename = os.path.join(self.path_urls, discovered_filename)
        self.rejected_filename = os.path.join(self.path_urls, rejected_filename)
        self.mails_filename = os.path.join(self.path_urls, mails_filename)
        self.edges_filename = os.path.join(self.path_urls, edges_filename)
        self.phrases_filename = os.path.join(self.path_text, phrases_filename)
        self.tags_filename = os.path.join(self.path_html, tags_filename)
        self.visited_list = list_read(self.visited_filename)
        self.discovered_list = list_read(self.discovered_filename)
        self.rejected = open_store(self.rejected_filename)
	self.mails = open_store(self.mails_filename)
	self.edges = open_store(self.edges_filename)
        self.phrases = open_store(self.phrases_filename)
        self.top = top
        self.counter = 0
        self.min_sleep = min_sleep
        self.max_sleep = max_sleep
        if self.min_sleep < 1:
            self.min_sleep = 1
        if self.max_sleep <= self.min_sleep:
            self.max_sleep = self.min_sleep + 1
    
    def add(self, url, base, fun):
        if url == None:
            self.unexpected('unexpected url', url)
	else:
            scheme, domain, path, query, fragment = urlparse.urlsplit(url)
            if domain == '':
                url = urlparse.urljoin(base, url)
                scheme, domain, path, query, fragment = urlparse.urlsplit(url)
            if scheme.startswith('http'):
                if fragment != '':
                    url, fragment = urlparse.urldefrag(url)
                # try:
	        #    auxiliar = url not in self.visited_list and url not in self.discovered_list
                # except UnicodeError, ex:
                #    auxiliar = 0
                #    print repr(ex)
                #    print 'See this %s' % url
                #    print repr(url)
                #    self.unexpected('unexpected unicode error', url)
                if url not in self.visited_list and url not in self.discovered_list:
                    self.edges.write(utf8_line(base + u',' + url))
                    if match_or(self.forbidden, url) == False and match_or(self.allowed, url) == True:
                        fun(url)
                    else:
                        self.rejected.write(utf8_line(url))
            else:
                self.rejected.write(utf8_line(url))

    def prefix(self, value):
         self.discovered_list.insert(0, value)

    def suffix(self, value):
         self.discovered_list.append(value)

    def sleep(self):
        PySleep(randrange(self.min_sleep, self.max_sleep))
	
    def unexpected(self, key, value):
        print('%s: %s' % (key, value))

    def visited(self, value):
        self.visited_list.append(value)

    def links(self, value, base):
        for item in value:
            if item.has_key('href'):
                href = myhttplib2.iri2uri(item['href'])
		if href.startswith('mailto:'):
		    (head, sep, tail) = href.partition(u':')
		    self.mails.write(utf8_line(base + u',' + tail))
		else:	
		    self.add(href, base, fun=self.suffix)

    def frames(self, value, base):
        for item in value:
            if item.has_key('src'):
		self.add(item['src'], base, fun=self.prefix)
            else:   
                print 'warning: src attr expected'
                print('item %s' % repr(item))

    def images(self, value, base):
        for item in value:
            if item.has_key('src'):
                self.add(item['src'], base, fun=self.prefix)
            else:
                print 'warning: src attr expected'
                print('image %s' % repr(item))

    def tags(self, tags, level):
         if level > self.tags_top_level:
             self.unexpected('BotCallback.tags', 'excessive nesting of tags')
         else:
             for item in tags:
                 if hasattr(item,'name') and item.name != 'script':
                     if item.string != None:
                         self.phrases.write(utf8_line(item.string))
                     else:
                         self.tags(item.contents, level + 1)

    def results(self, url, cachekey, response, content):
        # pdb.set_trace()
        # if hasattr(response, 'has_key') == False:
        #     self.unexpected('BotCallback.results', 'response has not attribute has_key')
        # elif 
        if response.has_key('content-type') == False:
            self.unexpected('BotCallback.results', 'response without content-type header')
        elif response['content-type'].startswith('text'):
            soup = clean_soup(BeautifulSoup(content))
            self.links(soup.findAll('a'), url)
            self.frames(soup.findAll('frame'), url)
            self.images(soup.findAll('img'), url)
            self.tags(soup, 0)

    def next(self):
        self.counter += 1
        if self.counter > self.top:
            return None
	print 'counter %d' % self.counter
        try:
           url = self.discovered_list.pop(0)
           while url in self.visited_list:
               url = self.discovered_list.pop(0)
           return url
        except Exception:
           return None

    def save(self):
        list_write(self.visited_filename, self.visited_list, fun=utf8_line)
        list_write(self.discovered_filename, self.discovered_list, fun=utf8_line)
        self.rejected.close()
	self.mails.close()
	self.edges.close()
        self.phrases.close()


def configure():
    path = sys.argv[2] if len(sys.argv) > 2 else os.getcwd()
    now = datetime.datetime.now()
    data = '/%s%s%s%s%s%s%s' % (now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond)
    path_data = path + data
    httpmine_config = dict()
    httpmine_config[httpmine_path] = path
    httpmine_config[httpmine_path_data] = path_data
    httpmine_config[httpmine_initial_website] = 'http://www.reddit.com'
    httpmine_config[httpmine_cache] = path_data + '/cache'
    httpmine_config[httpmine_callback_path_urls] = path_data + '/urls'
    httpmine_config[httpmine_callback_path_text] = path_data + '/text'
    httpmine_config[httpmine_callback_crawl_iteration_top] = '5'
    httpmine_config[httpmine_callback_sleep_min] = '1'
    httpmine_config[httpmine_callback_sleep_max] = '2'
    dict_write(os.path.join(path, httpmine_config_filename), httpmine_config)

    f = open(os.path.join(path, discovered_filename), 'w')
    f.write(httpmine_config[httpmine_initial_website])
    f.close()

    forbidden_list = list()
    forbidden_list.append('.*google.*')
    forbidden_list.append('.*youtube.*')
    forbidden_list.append('.*yahoo.*')
    forbidden_list.append('.*flickr.*')
    forbidden_list.append('.*microsoft.*')
    forbidden_list.append('.*amazon.*')
    forbidden_list.append('.*rss.*')
    forbidden_list.append('.*reddit.*\.png.*')
    forbidden_list.append('.*reddit.*\.jpg.*')

    allowed_list = list()
    allowed_list.append('http://.*reddit.*')
    allowed_list.append('http://.*imgur.com.*')
    allowed_list.append('http://.*jpg')
    allowed_list.append('http://.*png')
    allowed_list.append('http://.*pdf')

    list_write(os.path.join(path, forbidden_filename), forbidden_list, fun=lambda x: x + '\n')
    list_write(os.path.join(path, allowed_filename), allowed_list, fun=lambda x: x + '\n')

    print 'configure done!'     

import pdb

def start():
    path = sys.argv[2] if len(sys.argv) > 2 else os.getcwd()
    httpmine_config = dict_read(os.path.join(path, httpmine_config_filename))
    path_data = httpmine_config[httpmine_path_data]
    for key in httpmine_config.keys():
        print "%s = %s" % (key, httpmine_config[key])
    path_urls = httpmine_config[httpmine_callback_path_urls]
    if not os.path.exists(path_urls): 
        os.makedirs(path_urls)
    shutil.copyfile(os.path.join(path, discovered_filename), os.path.join(path_urls, discovered_filename))
    shutil.copyfile(os.path.join(path, forbidden_filename), os.path.join(path_urls, forbidden_filename))
    shutil.copyfile(os.path.join(path, allowed_filename), os.path.join(path_urls, allowed_filename))
    print 'start done!'     


def update():
    path = sys.argv[2] if len(sys.argv) > 2 else os.getcwd()
    httpmine_config = dict_read(os.path.join(path, httpmine_config_filename))
    callback = BotCallback(
       httpmine_config[httpmine_callback_path_urls],
       httpmine_config[httpmine_callback_path_text],
       int(httpmine_config[httpmine_callback_crawl_iteration_top]),
       int(httpmine_config[httpmine_callback_sleep_min]),
       int(httpmine_config[httpmine_callback_sleep_max])
     )
    bot = Bot(myhttplib2.HTTP(cache=httpmine_config[httpmine_cache], timeout=60), callback)
    bot.crawl()
    print 'update done!'     


