import urllib
import sys
import logging
import time

import lxml.html
from lxml import etree
import shelve


def get_sites(filename):
    fin = open(filename)
    sites_list = fin.read().split()
    fin.close()
    return sites_list


def parse_page(page):
    links = []
    dom =  lxml.html.fromstring(page)
    for link in dom.xpath('//a/@href'):
        links.append(link)
    return links


def main():
    res = shelve.open("db")
    SLEEP_TIME = 5
    queue = []
    logger = logging.getLogger()
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.setLevel(logging.DEBUG)

    sites_list = get_sites("config.ini")
    queue.extend(sites_list)
    while len(queue) > 0:
        site_url = queue.pop(0)
        time.sleep(SLEEP_TIME)
        logger.info("Opening %s" % site_url)

        try:
            site_content = urllib.urlopen(site_url).read()
        except IOError:
            logger.error("Skipping %s" % site_url)
        res[site_url] = site_content
        logger.info("parsing: %s" % site_url)
        received_links = parse_page(site_content)
        if len(received_links) > 0:
            queue.extend(received_links)
    conn.close()


if __name__=="__main__":
    main()
