# Copyright @2012 by Bluefithnue:

import re
import urllib.request as request
import urllib.parse as urlparse

class GetLink:
    
    def __init__(self, url):
        if url != None:
            try:
                self.url = url # Khoi tao site muon crawler.
                self.url_parse = urlparse.urlparse(self.url)
                response = request.urlopen(self.url) # Dowload du lieu.                
                self.content = response.read().decode('utf-8') # Doc noi dung site.
                self._content = response.read() # Doc noi dung site.
            except IOError as e:
                print(e)
    
    # Lay noi dung site:
    def get_content(self):
        return self.content
    
    # Lay tieu de site:
    def get_title(self):
        try:
            startPos = self.content.find('<title>')
            if startPos != -1:
                endPos = self.content.find('</title>', startPos+7)
                if endPos != -1:
                    title = self.content[startPos+7:endPos]
                    return title                
        except IOError as e:
            return e
    
    # Ham lay tat ca cac link trong site:
    def get_links(self):
        clink = set([])
        reg = re.compile('<a.+href=[\'|"](.*?)[\'"].*?>')
        try:
            links = reg.findall(self.content)
            for ls in links:
                if not ls.startswith("#"):
                    if ls.startswith("http://"):
                        clink.add(ls)
                    elif ls.startswith("/"):
                        clink.add("http://"+self.url_parse[1]+ls)  
            return clink
        except IOError as e:
            return
        
    def get_images(self):
        clink = set([])
        reg = re.compile('<img.+src\s*=\s*\"([^\"]+)\"[^\/>]+[\/]?>')
        try:        
            links = reg.findall(self.content)
            for ls in links:
                if not ls.startswith("#"):
                    if ls.startswith("http://"):
                        clink.add(ls)
                    elif ls.startswith("/"):
                        clink.add("http://"+self.url_parse[1]+ls)  
            return clink        
        except IOError as e:
            return e