'''
Created on 2012-4-14

@author: xutao
'''

#if __name__ == '__main__':
#    pass
#print 'hello python!'
#
#list = [1, 2, [3,4]]
#
#for i in list:
#    print i
#    
#print list.index(1)
#print 2 not in list

import urllib 
#import set

def get_page(url):
    try:
        return urllib.urlopen(url).read()
    except:
        return ""
    
def get_next_target(page):
    
    link_start = page.find('<a href=')
    if link_start != -1:
        start_position = page.find('"',link_start)
        end_position = page.find('"',start_position+1)
        url = page[start_position+1:end_position]
    else:
        url = 0
        end_position = 0
    return url,end_position
#print get_next_target(get_page('http://www.udacity.com/cs101x/index.html'))

def get_all_target(page):
    links = []
    
    while True:
       url,end_qos = get_next_target(page)
       if url:
           links.append(url)
#           print url
           page = page[end_qos:] 
       else:
           break
    return links

def union(p,q):
    for e in q:
        if e not in p:
            p.append(e)

def add_to_index(index,keyword,url):
    for temp in index:
        if temp[0] == keyword:
            temp[1].append(url)
            return
    index.append([keyword,[url]])
            
def add_page_to_index(index,url,content):
    keywords = content.split()
    for word in keywords:
        if word not in index:
            add_to_index(index,word,url)
        
def crawl_web(seed):
    tocrawl = [seed]
    crawled = []
    index = []
    while tocrawl:
        page = tocrawl.pop()
        if page not in crawled:
            content = get_page(page)
            add_page_to_index(index,page,content)
            union(tocrawl,get_all_target(content))
#            list(set(tocrawl) | set(get_all_target(get_page(page))))
            crawled.append(page)
    return index

def lookup(index,keyword):
    for word in index:
        if word[0] == keyword:
            return word[1]
    
    return []
#page = get_page('http://www.udacity.com/cs101x/index.html')
#print page
index = crawl_web('http://www.udacity.com/cs101x/index.html')
print index
print lookup(index,"can't")
#get_all_target(page)
        