#!/usr/bin/env python

"""

This script follows xfn me-relationships for a given URL such as a profile page. 
It tries to find all the profiles linkes from it.
For now it does not check for symmetrical relationships as it maybe should be done.
(which means that the linked pages need to link back)

"""

from BeautifulSoup import BeautifulSoup
from optparse import OptionParser
from urllib import urlopen
from urlparse import urljoin
import sys, copy
from eventlet import coros, httpc, util

class XFNProfileResolver(object):
    """follows profile information"""
    
    def __init__(self,urls, verbose=False, output=sys.stdout):
        """initialize the resolver with a list of URLs"""
        self.urls = urls
        self.verbose = verbose
        self.relationships = urls # we start with the given set
        self.output = output

        # initialize the eventlet coroutines pool
        self.pool = coros.CoroutinePool(max_size=4)
        self.waiters = []
        self.url_stack = copy.copy(urls)
        self.processed= []
        

    def normalize(self,url):
          """just check for a ending / now
          This is a very basic way of normalizing URLs, more needs to be done here
          """
          if url.endswith("/"):
            url=url[:-1]
          return url
          
    def fetch_url(self,url):
        """fetch a URL"""
        # check if we had this URL already
        if self.normalize(url) in self.processed:
            return

        # remember this URL 
        self.processed.append(url)
        if url not in self.relationships:
            self.relationships.append(url)
                    
        # fetch the URL
        try:
            data = httpc.get(url)
        except httpc.Retriable, found:
            self.processed.append(found.retry_url())
            data = found.retry()
        except Exception, e:
            if self.verbose:
                print >>self.output,"Error for retrieving %s: %s" %(url,e)
            return

        soup = BeautifulSoup(data)
        # extract the new URLs from this one
        urls = [self.normalize(ref['href']) for ref in soup.findAll("a",rel="me")]
        urls = [self.fixurl(url,u) for u in urls]
        if self.verbose:
            print >>self.output, "found %s new urls in %s" %(len(urls),url)
        for new_url in urls:
            if self.verbose:
                print >>self.output, "testing new URL %s:" %new_url
            if new_url in self.relationships:
                if self.verbose:
                    print >>self.output, "%s already known" %new_url
                continue
            if self.verbose:
                print >>self.output, "adding %s" %new_url
                
            # create a new waiter
            waiter = self.pool.execute(self.fetch_url,new_url)
            self.waiters.append(waiter)
            waiter.wait()
            
    def fixurl(self,baseurl,url):
        """try to fix URLs without hostname"""
        o = urljoin(baseurl,url)
        return o
          

    def run(self):
        """resolve them"""
        
        for url in self.urls:
            waiter = self.pool.execute(self.fetch_url,url)
            self.waiters.append(waiter)
            waiter.wait()
        
        return 

def main():
    usage = "usage: %prog [options] URL"
    parser = OptionParser(usage)
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose output")
    (options, args) = parser.parse_args()

    if len(args) != 1:
        parser.error("incorrect number of arguments")

    main_url = args[0]
    resolver = XFNProfileResolver([main_url],verbose=options.verbose,output=sys.stdout)
    resolver.run()
    print "%s profiles found" %len(resolver.relationships)
    for url in resolver.relationships:
        print url

if __name__=="__main__":
    main()

    
    


