#-*- coding: utf-8 -*-
import re
import socket
import lxml.html
import chardet

from time import sleep
from urllib import urlretrieve
from urlparse import urlparse
from os import makedirs
from os.path import exists, splitext, dirname

from httptools import strToUnicode, unicodeToStr


class RepLink(object):
    
    def __init__(self, url1, url2, baseurl):
        # 当前页面链接
        self.url1 = url1
        # 页面上的一个链接
        self.url2 = url2
        #
        self.baseUrl = baseurl

    def getRepStr(self):
        urlList1 = [ i for i in self.url1.replace('http://', '').split('/') if i]
        urlList2 = [ i for i in self.url2.replace('http://', '').split('/') if i]
#        print urlList1
#        print urlList2

        n = 0
        while True:
            if urlList1[:n]==urlList2[:n]:
                n+=1
                if n>10:
                    break
                continue
            break

        urlPart = 'http://'+'/'.join(urlList1[:n-1])
        if urlparse(urlPart).netloc and ('.' not in urlparse(urlPart).path):
            urlPart += '/'

        urlListLen = len(urlList1[n-1:])
 
        if urlListLen<1:
            return (urlPart, './')

        if urlListLen>=1:
            return (urlPart, urlListLen*'../', self.url1, self.url2)

    def replUrl(self):
        """把绝对路径替换成相对路径"""
        aUrl = self.getRepStr()
#        print aUrl
        if self.url2.startswith(self.baseUrl+'#'):
            # 处理链接是这种形式的<a href="http://www.baidu.com#s-latest"><span>Latest</span></a>
            self.url2 = self.url2.replace(self.baseUrl+'#', '#')
        else:
            self.url2 = self.url2.replace(aUrl[0], aUrl[1])
#        print self.url2
        return self.url2

class Retrieve(object):

    def __init__(self, url, baseUrl):
        self.url = url
        self.file = self.fileName()
        self.baseUrl = baseUrl
        self.charset = ''

    def fileName(self):
        """根据url创建目录文件"""

        urlPart = urlparse(self.url)
        path = urlPart[1] + urlPart[2]
        if not urlPart[2]:
            path = urlPart[1] + '/'
        ext = splitext(path)
        if ext[1]=='':
            path += 'index.html'

        file = path
        path = dirname(path)

        if not exists(path):
            makedirs(path)

        return file

    def downLoad(self):
        """下载文件"""
        socket.setdefaulttimeout(5)
        try:
#            result = urlretrieve(self.url, self.file)
            import urllib2
            f = open(self.file, 'w')
            print self.url
#            print urllib2.urlopen(self.url).read()
            f.write(urllib2.urlopen(self.url).read())
            f.close()
            result = (1, 0)
        except Exception, e:
            print 'download error:', e
            result = ('** invail url', )
        return result

    def linkReplFunc(self, doc, method='html'):
        """把绝对路径替换成相对路径"""
        def replLink(arg):
            rl = RepLink(self.url, arg, baseurl=self.baseUrl)
            return rl.replUrl()

        doc.rewrite_links(replLink)
        html = lxml.html.tostring(doc, encoding=self.charset, method=method)
        f = open(self.file, 'w')
        f.write(html)
        f.close()

    def getLinks(self):
        """获取文件中的链接"""
        f = open(self.file)
        html = f.read()
        f.close()

        # 编码判断及转换
        charJust = chardet.detect(html)
        try:
            if charJust['encoding'].lower() == 'gb2312':
                charJust['encoding'] = 'gb18030'
        except Exception, e:
            charJust['encoding'] = 'utf-8'
        self.charset = charJust['encoding']
        html = strToUnicode(html, encoding=self.charset)

        # 统一把页面上的所有链接转换成绝对路径
        doc = lxml.html.fromstring(html)
        doc.make_links_absolute(base_url=self.baseUrl)

        linkList = []
        for link in lxml.html.iterlinks(doc):
            if link[2].startswith(self.baseUrl+'#'):
                # 过滤掉原来链接是这种形式的<a href="#s-latest"><span>Latest</span></a>
                continue
            linkList.append(link[2])

        # 把绝对路径替换成相对路径
        self.linkReplFunc(doc)
        
        return linkList

    def getCssLinks(self):
        """获取css文件中的链接(一般主要有图片和其他css文件)"""
        f = open(self.file)
        css = f.read()
        f.close()

        def getNewLink(cl):
            up = urlparse(self.url)
            if (not up.path) or ('../' not in cl):
                return cl

            cs = cl.count('../')+1
            newlink = up.scheme+'://'+up.netloc+'/'.join(up.path.split('/')[:-cs])
            newlink = re.sub(r'(\.\./)+', newlink+'/', cl)
            return newlink

        # 图片链接
        picLinks = re.findall(r'background:\s*url\s*\([\'\"]?([a-zA-Z0-9/\._-]+)[\'\"]?\)', css, re.I)
        # 其他css链接
        cssLinks = re.findall(r'@import\s*[\'\"]*([a-zA-Z0-9/\._-]+)[\'\"]*', css, re.I)
        Links = picLinks + cssLinks

        cLinks = []
        for cl in Links:
            cLinks.append(getNewLink(cl))

        return cLinks

class Crawl(object):
    
    def __init__(self, url, domain):
        self.url = url
        self.domain = domain
        self.seen = []
        self.vlink = [url]
        self.baseUrl = 'http://'+domain

    def getPage(self):
        rv = Retrieve(self.url, self.baseUrl)
        # 把下载过得url放入列表中
        self.seen.append(self.url)
        result = rv.downLoad()
        if result[0] == '** invail url':
            return 

        if self.url[-3:].lower() in ['.js', 'jpg', 'png', 'gif']:
            return
        
        try:
            if self.url[-3:].lower() == 'css':
                links = rv.getCssLinks()
            else:
                links = rv.getLinks()
        except Exception,e:
            print 'getLinks error:', e
            return

        for link in links:
            link = link.split('?')[0].split('#')[0]
            if (link not in self.seen) and (link not in self.vlink) and (self.domain in link):
                self.vlink.append(link)

    def go(self):
        while self.vlink:
#            sleep(2)
            link = self.vlink.pop()
            self.url = link
#            print 'download list: ', self.vlink
            print 'download link: ', self.url
            self.getPage()

if __name__ == '__main__':

    #url = "http://www.phpv.net/topics/79.html"
    url = "http://mindhacks.cn/"
#    url = "http://mindhacks.cn/2011/01/23/escape-from-your-shawshank-4/"
    #c = Retrieve(url)
    #print c.file
    #print c.downLoad()
    #from pprint import pprint
    #pprint(c.getLinks())
    #c.getLinks()
    cr = Crawl(url, 'mindhacks.cn')
    cr.go()
