#!/usr/bin/env python
#coding:utf-8
import os
import urlparse
from collections import deque
import mimetypes
mimetypes.init(["mimetypes"])
class DownLoader(object):
    def __init__(self,file_ext='*'):
        self.file_ext = file_ext
    def handle(self,resp):
        """
        插件入口
        """
        try:
            self.__down(resp,self.file_ext)
        except Exception,e:
            pass
        finally:
            return resp

    def __get_mime(self,file_ext):
        """
        根据文件后缀获取文件MIME
        """
        file_ext = str(file_ext).lower()
        if not file_ext.startswith('.'):
            file_ext = '.'+file_ext
        try:
            mtype = mimetypes.types_map[file_ext]
        except Exception,e:
            mtype = False
        return mtype

    def __down(self,resp,file_ext='*'):
        """
        下载指定类型文件
        """
        if file_ext!='*':
            if isinstance(file_ext,str):
                file_ext = file_ext.split(';')
        content_type = resp.info().getheader("Content-Type")
        need_down =[]
        if isinstance(file_ext,list):
            for ext in file_ext:
                mtype = self.__get_mime(ext)
                if mtype:
                    need_down.append(mtype)
        if content_type in need_down or file_ext=='*':
            self.__save_file(resp)

    def __save_file(self,resp):
        """
        保存文件
        """
        save_path,save_name = self.__create_save_path(resp.url)
        save_path = os.path.join(save_path,save_name)
        with open(save_path,'wb+') as fp:
            fp.write(resp.content)
    def __create_save_path(self,url):
        """
        按照站点目录结构创建目录
        """
        path_info =deque()
        url_info = urlparse.urlparse(url)
        file_name = None
        domain = url_info.netloc
        path = url_info.path
        root_path =os.path.dirname(__file__)
        site_path = os.path.join(root_path,domain)
        #创建站点目录
        if not os.path.exists(site_path):
                os.mkdir(site_path)
        paths = path.split('/')
        if paths[-1].find('.')!=-1:
                file_name = paths[-1]
                paths.pop()
        if path =='/' or path=='':
                save_path =site_path
        else:
                save_path = '/'.join(paths)
                if save_path.startswith('/'):
                    save_path = save_path[1:]
                save_path = os.path.join(site_path,save_path)
        path_info.extend(paths)
        if path_info[-1].find('.')!=-1:
                file_name = path_info.pop()
        else:
                file_name = 'index.html' if not file_name else file_name
        if not os.path.exists(save_path):
                c_path = site_path
                #按站点目录结构 创建目录
                while path_info:
                    c_path = os.path.join(c_path,path_info.popleft())
                    if not os.path.exists(c_path):
                        os.mkdir(c_path)
            #文件名 命名规则
        if url_info.query:
            query_str = str(url_info.query)
            if len(query_str)+len(file_name)>255:
                    name_lenght = 255 -len(file_name)
                    query_str = query_str[:name_lenght]
            query_str = str(query_str).replace('*','')
            query_str = str(query_str).replace('?','')
            query_str = str(query_str).replace('<','')
            query_str = str(query_str).replace('>','')
            query_str = str(query_str).replace('/','')
            query_str = str(query_str).replace(';','')
            file_name = file_name + '-' +query_str
        return save_path,file_name
if __name__ =='__main__':
    d = DownLoader(file_ext='.html')
    import urllib2
    resp = urllib2.urlopen('http://os.it168.com/a2007/1022/973/000000973754.shtml?id=1&b=2')
    resp.content = resp.read()
    print d.handle(resp)