#!/usr/bin/env python
#coding=utf-8

# time python disk_cache.py 测试时间
import os
import re
import urlparse
import pickle
import zlib
from link_crawler import link_crawler
from datetime import datetime, timedelta # 添加缓存过期时间
max_length = 100
'''
filename example.webscraping.com/index.html
Downloading: http://example.webscraping.com/
filename example.webscraping.com/index.html
filename example.webscraping.com/places/default/index/1
filename example.webscraping.com/places/default/index/2
filename example.webscraping.com/places/default/index/3
filename example.webscraping.com/places/default/index/4
就下载了第一个测试通过
'''
class DiskCache:
	def __init__(self, cache_dir='cache', expires=timedelta(seconds=20), compress=True):
	    self.cache_dir = cache_dir
            self.expires = expires
            self.compress = compress

	def url_to_path(self, url):
		"""
		"""
		components = urlparse.urlsplit(url)
		#append index.html to empty paths
		path = components.path 
		if not path:
			path = '/index.html'
		elif path.endswith('/'):
			path += 'index.html'
		filename = components.netloc + path + components.query
		filename = re.sub('[^/0-9a-zA-Z\-.;_]', '_', filename) # substitute
		filename = '/'.join(segment[:255] for segment in filename.split('/')) # 每个部分都被阉割了
		print 'filename', filename
		return os.path.join(self.cache_dir, filename)

	def __getitem__(self, url):
		"""根据url从磁盘中加载数据,出现错误 文件 等于目录
		"""
		path = self.url_to_path(url)
		#print 'path', path
		#print 'os.path.isdir(path)',os.path.isdir(path),'os.path.isfile(path+".html")',os.path.isfile(path+'.html'),path+'.html'
		if os.path.isdir(path) and os.path.isfile(path+'.html'):
			path += '.html'
		if os.path.exists(path) and os.path.isfile(path):
			with open(path, 'rb') as fp:
				data = fp.read()
				if self.compress:
					data = zlib.decompress(data)
				result, timestamp = pickle.loads(data)
				if self.has_expired(timestamp):
					raise KeyError(url + ' has_expired')
				return result # pick 存储 fp 将 fp 转换为 字符串
		else:
			# URL has not yet been chched
			raise KeyError(url + ' does not exist')

	def __setitem__(self, url, result):
            """Save data to disk for this url
            """
            path = self.url_to_path(url)
            folder = os.path.dirname(path)
            #print 'path',path
            #print 'folder',folder
            if not os.path.exists(folder):
                    os.makedirs(folder)
            if os.path.isdir(path):
                    path+='.html'
            data = pickle.dumps((result, datetime.utcnow()))
            if self.compress:
                data = zlib.compress(data)
            with open(path, 'wb') as fp:
                fp.write(data)
        def has_expired(self, timestamp):
            """Return whether this timestamp has expired
            """
            return datetime.utcnow() > timestamp + self.expires

if __name__ == '__main__':
    link_crawler('http://example.webscraping.com/', '/(index|view)', cache=DiskCache())
