﻿from sys import argv

from BeautifulSoup import BeautifulSoup
from urllib import urlopen
from urlparse import urljoin

from threading import Thread
from Queue import Queue
from SimpleSyncSet import SimpleSyncSet
import consistent_hashing

import spade


def get_urls(url):
##  print url
  try:
    ph = urlopen(url)
    html_string = ph.read()
    ph.close()
  except:
    return []
##  print html_string
  soup = BeautifulSoup(html_string)
  urls = []
  for tag in soup.findAll('a', href=True):
    raw_url = tag['href']
    new_url = urljoin(url, raw_url)
    urls.append(new_url)
  return urls


def join_dicts(d, e):
  new_dict = d.copy()
  for key in e.keys():
    if new_dict.has_key(key):
      new_dict[key] = list(set(new_dict[key] + e[key]))
    else:
      new_dict[key] = e[key][:]
  return new_dict


class CrawlerThread(Thread):
  def __init__(self, url_queue, visited_set, agent=None):
    self.queue = url_queue
    self.visited = visited_set
    self.myAgent = agent
    Thread.__init__(self)

  def run(self):
    while True:
##      print "Visited", len(self.visited), len(self.myAgent.visited_set), self.myAgent.last_visited
      print "Queue", self.queue.qsize()
      url = self.queue.get()
      if url and url not in self.visited:
        try:
          new_urls = get_urls(url)
        except Exception, e:
          print "\n\n\n\n\n@@@@@@@@@@@@@@@@@@@@@Parsing error", e, '\n@@@@@@@@@@@@@@@@@@@@@', url, "\n\n\n\n\n"
          new_urls = []
        try:
          self.visited.add(url)
          if self.myAgent:
            agents = self.myAgent.agents
            myAgentName = self.myAgent.getName()
            self.myAgent.visited_url_handler(url)
          else:
            agents = []
            myAgentName = 'agent'
          self.hash_ring = consistent_hashing.ConsistentHashing([myAgentName] + agents) # a moze reinit?
          #tworzenie slownika {nazwa_agenta: [linki]}
          urls = [(i,[]) for i in [myAgentName] + agents]
          urls = dict(urls)
          for new_url in new_urls:
            name = self.hash_ring.get_node(new_url)
            urls[name].append(new_url)
          for new_url in urls[myAgentName]:
            if new_url not in self.visited:
              self.queue.put(new_url)
          urls[myAgentName] = []
          if self.myAgent:
            self.myAgent.urls = join_dicts(self.myAgent.urls, urls)
        except Exception, e:
          print "\n\n\n\n\n######################EXCEPTION", e, '\n############################', url, "\n\n\n\n\n"


def start_threads(n, url_queue, visited_set, agent=None):
  threads = []
  for i in xrange(n):
    threads.append(CrawlerThread(url_queue, visited_set, agent))
    threads[i].start()
  return threads


def init_crawlers(url_queue, visited_set, agent=None, num=None, fname=None):
  n = 5
  seed_urls = ['http://docs.python.org/library/thread.html', 'http://ii.uni.wroc.pl'] # , 'http://ii.uni.wroc.pl/files/LetniePraktykiLTE.pdf'
  if num:
    n = num
  if fname:
    fin = open(fname)
    seed_urls = fin.readlines()
    fin.close()
  for seed_url in seed_urls:
    url_queue.put(seed_url)
  return start_threads(n, url_queue, visited_set, agent)


if __name__ == '__main__':
  url_queue = Queue()
  visited_set = SimpleSyncSet()
  num = None
  fname = None
  if len(argv) > 2:
    num = int(argv[1])
    fname = argv[2]
  init_crawlers(url_queue, visited_set, None, num, fname)
