import os
import pickle
import re
from urllib.parse import urlsplit

# 封装磁盘缓存类
class DiskCache:
    # 初始化缓存类
    def __init__(self,max_length,cache_dir='D:\\Crawl_Results\\cache'):
        self.cache_dir = cache_dir
        self.max_length = max_length

    def url_to_path(self,url):
        '''
        从传入的url中创建文件路径
        '''
        components = urlsplit(url)
        print("components==>{}".format(components))
        path = components.path
        print("Path==>{}".format(path))
        print("components.query==>{}".format(components.query))
        if not path:
            path = '/index.html'
        elif path.endswith('/'):
            path += 'index.html'
        filename = components.netloc + path + components.query
        print("pre-filename==>{}".format(filename))
        filename = re.sub('[^/0-9a-zA-Z\\-.,;]','_',filename)
        print("min-filename==>{}".format(filename))
        # linux系统的话，这里把\\改成/
        filename =  '\\'.join(segment[:255] for segment in filename.split('/'))
        print("end_filename==>{}".format(filename))
        result_dir = os.path.join(self.cache_dir,filename)
        print(result_dir)
        return os.path.join(self.cache_dir,filename)

    def __getitem__(self,url):
        '''
            根据url从磁盘提取缓存
        '''
        path = self.url_to_path(url)
        if os.path.exists(path):
            with open(path,'rb') as fp:
                return pickle.load(fp)
        else:
            raise KeyError(url + "不存在")

    def __setitem__(self,url,result):
        '''
            将数据存入磁盘缓存中
        '''
        path = self.url_to_path(url)
        folder = os.path.dirname(path)
        if not os.path.exists(folder):
            os.makedirs(folder)
            print("保存到了{}".format(folder))
        with open(path,'wb') as fp:
            fp.write(pickle.dumps(result))


seed_url="https://movie.douban.com/top250/subject/1291546"
downloader= DiskCache(seed_url)
downloader.url_to_path(seed_url)
