# 此模块负责下载器行为

import os
import urllib.request
import socket
import sys
import urllib.request
import urllib.error
from time import time

from .document import Document
from .link import Link


class Downloader:

    def __init__(self, timeout=None, encoding=None, path=None):
        self.__timeout = 10 if timeout == None else int(timeout)
        self.__encoding = encoding
        self.__path = path

    def __progress(self, existing=None, whole=None, f=None):
        """
        回调函数,显示下载进度
        :existing: 已下载
        :whole: 总量
        :f: 文件大小
        """
        pass

    def __rquest_header(self, url) -> object:
        """
        添加请求头,做伪装请求
        :url: 添加到亲求头的 URL
        """
        headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'\
        ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'}
        return urllib.request.Request(url=url, headers=headers)

    def __check_save_path(self) -> dict:
        """
        检查存储路径的完整性,不存在会创建,存在则忽略
        返回在本地存储的路径,例如:
        {js:__path/static/js/, css:__path/static/css/}
        """
        if (self.__path == None) or (self.__path == './'):
            main_path = './PageData'
        else:
            main_path = self.__path

        if main_path[-1] == '\\':
            main_path.replace('\\','/')
        if not main_path[-1] == '/':
            main_path += '/'
        
        # 生成规范化的存储路径
        static_path = {'main_path': os.path.normpath(main_path) + os.sep}
        for s in ['image', 'ico', 'js', 'css']:
            static_path[s] = os.path.normpath( os.path.join(main_path, 'static/' + s) ) + os.sep

        # 创建静态文件的存储目录
        for sp in static_path.keys():
            os.makedirs(static_path[sp], exist_ok=True)

        return static_path

    def __legal_name(self, name) -> str :
        """
        检查文件名称是否合法,如果文件名非法则返回处理后的合法名称
        """
        name = ''.join(name.split())  # 去除空格
        illegal = ['\\', '/', ':', '*', '\"', '‘', '’', '“', '”', '<', '>', '?', '|']
        legal = ''
        for s in name:
            if not s in illegal:
                legal += s
        return legal

    def download(self, url):
        """
        下载页面到本地
        :url:目标页面
        """
        headers = self.__rquest_header(url)

        try:
            http = urllib.request.urlopen(headers, timeout=self.__timeout)
        except urllib.error.HTTPError as httperr:
            print('\n[!!! 无法访问目标资源 ]:\n'\
            '1. 目标接受了连接请求,但无法找到要下载的文件\n'
            '2. 目标有防御机制,拒绝了下载器的连接\n')
            return False
        except urllib.error.URLError as urlerr:
            print('\n[!!! 无法访问目标页面 ]:\n'\
            '1. 请检查你的网络连接\n'\
            '2. 确保你提供的是一个可以访问的URL\n')
            return False

        paths = self.__check_save_path()
        if not paths:
            return False

        doc = Document(http.read())
        li = Link('')
        links = doc.get_link_all(True)
        
        # 静态文件的引入路径(目录)
        import_path = {
            'js':'stati/js/',
            'css':'static/css/',
            'image':'static/image/',
            'ico':'static/image/'
        }

        opener=urllib.request.build_opener()
        opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\
        'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36')]
        urllib.request.install_opener(opener)
        socket.setdefaulttimeout(self.__timeout)  # 为urllib.request.urlretrieve设置超时

        for link in links.keys():
            path = paths[link]
            for u in links[link]:
                li.set_link(u)
                fix_url = li.fix_link(url)  # 根据模板修复URL,模板:url
                print('下载:', u, end='\r')
                file_name = li.get_domainname() + '-' + li.get_filename()
                file_path = path + file_name
                if not os.path.exists(file_path):
                    try:
                        urllib.request.urlretrieve(fix_url, file_path)
                    except Exception as err:
                        print('\n失败:', fix_url, '\n')
                        continue
                
                doc.replace(u, import_path[link] + file_name)  # 替换文档的引入链接而为本地链接
                print('完成:', u)
        print('网页已经保存到:',paths['main_path'])
        html_name = doc.get_tag_str('title')
        html_name = self.__legal_name(html_name) + '.html'
        html_path = paths['main_path']+html_name
        f = open(html_path, 'w', encoding=self.__encoding)
        f.write(doc.prettify())
        f.close()