# encoding: utf-8
import os
from shutil import rmtree
import urllib.request
import re
import socket 
from urllib.parse import urlparse

from bs4 import BeautifulSoup
from bs4 import UnicodeDammit


class DownloadWebPage:

    def __init__(self, url, path='./data', static_path='/static', encoding=None, timeout=30):
        self.__url = url  # 存储目标 html 页面
        self.__path = path  # 保存下载文件的路径
        self.__static_path = static_path
        self.__encoding = encoding  # 存储编码
        self.__timeout = timeout

    def set_url(self, url):
        self.__url = url

    def set_path(self, path='./data'):
        self.__path = path

    def get_encode(self, data) -> str:
        """
        获取文档编码
        :param data:文档字符串
        :return:文档格式(字符串格式)
        """
        if self.__encoding is None:
            encode_obj = UnicodeDammit(data)
            return encode_obj.original_encoding
        return self.__encoding

    def __read(self, url) -> str:
        """
        读取文档内容
        :param url:
        :return: 文档内容(解码后的字符串)
        """
        try:
            with urllib.request.urlopen(url, timeout=self.__timeout) as doc:
                data = doc.read()
                encode = self.get_encode(data)
                return data.decode(encode)
            return None
        except urllib.error.HTTPError as e:
            print(url+'获取失败')
            print(e)
            pass
        except socket.timeout as e:
            print(url+'获取超时')
            print(e)
            pass
        

    def __check_link(self, link) -> str:
        """
        检测链接，不正确将修复
        :param link:
        :return:
        """
        link_url = urlparse(link)
        if link_url.scheme is '' or link_url.netloc is '':
            original_url = urlparse(self.__url)
            netloc = link_url.netloc if link_url.netloc is not '' else original_url.netloc
            scheme = link_url.scheme if link_url.scheme is not '' else original_url.scheme
            # return scheme + '://' + netloc + '/' + link_url.path
            return scheme + '://' + netloc + link_url.path
        return link

    def __get_bsobj(self, content):
        """
        :param content:
        :return:
        """
        return BeautifulSoup(content, features="html.parser", from_encoding=self.__encoding)

    def __get_filename(self, link) -> str:
        """
        获取文件名
        :param link:链接
        :return:
        """
        url = urlparse(link)
        name = os.path.basename(url.path)
        if re.match(r'[^/]*\.js[^/]*$', name):
            return name
        elif re.match(r'[^/]*\.css[^/]*$', name):
            return name
        elif not re.match(r'[^/]*\.html[^/]*$', name):
            if name is '':
                return 'index.html'
            return name + '.html'

    def save(self):
        """
        保存网页
        """
        bs = BeautifulSoup(self.__read(self.__url), features="html.parser", from_encoding=self.__encoding)
        if not os.path.isdir(self.__path):
            os.makedirs(self.__path)

            # JS
            local_path = self.__path + self.__static_path + '/js'
            os.makedirs(local_path)
            for js in bs.find_all('script', src=True):
                print(js)
                link_path = self.__check_link(js.get('src'))
                file_name = self.__get_filename(link_path)
                file_path = local_path + '/' + file_name
                print('开始下载'+file_name+'...')
                with open(file_path, 'w+', encoding='utf-8') as f:
                    data = self.__read(link_path)
                    js['src'] = '.' + self.__static_path + '/js' + '/' + self.__get_filename(link_path)
                    f.write(data)
                    print('完成'+file_name+'.')

            # CSS
            local_path = self.__path + self.__static_path + '/css'
            os.makedirs(local_path)
            for css in bs.find_all('link', rel='stylesheet', href=True):
                link_path = self.__check_link(css.get('href'))
                file_name = self.__get_filename(link_path)
                file_path = local_path + '/' + file_name
                print('开始下载'+file_name+'...')
                with open(file_path, 'w+', encoding='utf-8') as f:
                    data = self.__read(link_path)
                    css['href'] = '.' + self.__static_path + '/css' + '/' + self.__get_filename(link_path)
                    f.write(data)
                    print('完成'+file_name+'.')

            # Page
            file_name = self.__get_filename(self.__url)
            page_path = self.__path + '/' + file_name
            print('开始下载'+file_name+'...')
            with open(page_path, 'w+', encoding='utf-8') as f:
                data = str(bs)
                f.write(data)
                print('完成'+file_name+'.')
        else:
            rmtree(self.__path)
            self.save()