# -*- coding: utf-8 -*-
# 查看网页编码格式：document.charset
# 爬取网站

import requests
import os
import re
import numpy as np
from lxml import etree

ACCESS_BASE_PATH = '../4.scrapy框架/'
first_path = ''
second_path = 'files'  # 用来保存文件的地址
url_list_result = []

no_access_url_list = []  # 拒绝访问的网址（黑名单）

index_url_list = ['https://www.cbsnews.com/'
                  ]
heads = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}


# 拼接完整路径
def exe_url(resourceUrl, index_url):
    if resourceUrl.startswith('./'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('//'):
        resourceUrl = 'https:' + resourceUrl
    elif resourceUrl.startswith('/'):
        resourceUrl = index_url + resourceUrl[1:]
    elif resourceUrl.startswith('http') or resourceUrl.startswith('https'):
        # 不处理，这是我们想要的url格式
        pass
    else:
        resourceUrl = index_url + resourceUrl
    return resourceUrl


# 新建爬取网站的文件目录
def creat_file(path_param):
    if not os.path.exists(path_param):
        print(' > 目标目录不存在，创建：', path_param)
        os.makedirs(path_param)
    else:
        pass


# 根据网址形成文件名称
def creat_file_name(webset_param):
    new_file_name = ''
    if (webset_param.endswith('index.html')):
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1]
    elif (webset_param.endswith('.html')):
        new_file_name = webset_param.split('/')[-1]
    elif (webset_param.endswith('/')):
        new_file_name = webset_param.split('/')[-2] + '.html'
    else:
        new_file_name = webset_param.split('/')[-2] + webset_param.split('/')[-1] + '.html'
    if (new_file_name.startswith('region=world')):
        new_file_name = new_file_name.split('region=world')[-1]
    return replace_special_str(new_file_name)


# 特殊字符去除
def replace_special_str(new_file_name):
    if (('\\' in new_file_name) | ('/' in new_file_name)
            | (':' in new_file_name) | ('*' in new_file_name)
            | ('?' in new_file_name) | ('<' in new_file_name)
            | ('>' in new_file_name) | ('|' in new_file_name)):
        new_file_name = new_file_name.replace('\\', '')
        new_file_name = new_file_name.replace('/', '')
        new_file_name = new_file_name.replace(':', '')
        new_file_name = new_file_name.replace('*', '')
        new_file_name = new_file_name.replace('?', '')
        new_file_name = new_file_name.replace('<', '')
        new_file_name = new_file_name.replace('>', '')
        new_file_name = new_file_name.replace('|', '')
    return new_file_name


# 格式化访问路径
def formate_url(url_param):
    if ('.js' in url_param):
        url_param = url_param.split('.js')[0] + '.js'
    if ('.css' in url_param):
        url_param = url_param.split('.css')[0] + '.css'
    if ('.ico' in url_param):
        url_param = url_param.split('.ico')[0] + '.ico'
    if ('.js' in url_param):
        url_param = url_param.split('.js')[0] + '.js'
    if ('.css' in url_param):
        url_param = url_param.split('.css')[0] + '.css'
    if ('.ico' in url_param):
        url_param = url_param.split('.ico')[0] + '.ico'
    if ('.jpg' in url_param):
        url_param = url_param.split('.jpg')[0] + '.jpg'
    if ('.JPG' in url_param):
        url_param = url_param.split('.JPG')[0] + '.JPG'
    if ('.jpeg' in url_param):
        url_param = url_param.split('.jpeg')[0] + '.jpeg'
    if ('.JPEG' in url_param):
        url_param = url_param.split('.JPEG')[0] + '.JPEG'
    if ('.png' in url_param):
        url_param = url_param.split('.png')[0] + '.png'
    if ('.PNG' in url_param):
        url_param = url_param.split('.PNG')[0] + '.PNG'
    return url_param


# 访问网址，并爬取
def file_access_website(access_url, path_param, file_name):
    try:
        # 判断文件是否存在
        if (os.path.exists(f'{path_param}/{file_name}')):
            pass
        else:
            result = requests.get(url=access_url, headers=heads)
            with open(f'{path_param}/{file_name}', mode='w', encoding='utf-8') as file:
                file.write(result.text)  # 读取爬到网页的源代码
            result.close()
    except:
        no_access_url_list.append(access_url)
        pass
    else:
        pass
    finally:
        pass


# 解析需要下载的css、js文件路径
def get_link(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//link/@href')  # css、js 文件
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            if ((re.match(r'(\w+):\/\/.+$', resourceUrl))):
                if (('.js' in resourceUrl) | ('.css' in resourceUrl) | ('.ico' in resourceUrl)):
                    print("合法url")
                    resourceUrl = formate_url(resourceUrl)
                    file_name = replace_special_str(resourceUrl.strip())
                    print(resourceUrl)
                    page_text = page_text.replace(resourceUrl,
                                                  path_param.replace(path_param.split("/")[1] + '/', '') + '/' + file_name)
                    resourceUrl = exe_url(resourceUrl, url_param)

                    # 访问网址
                    file_access_website(resourceUrl, path_param, file_name)

    return page_text


def get_script(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//script/@src')  # css、js 文件
    print(url_list)

    # 遍历下载css、js文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            if ((re.match(r'(\w+):\/\/.+$', resourceUrl))):
                print("合法url")
                resourceUrl = formate_url(resourceUrl)
                file_name = replace_special_str(resourceUrl.strip())
                print(file_name)
                page_text = page_text.replace(resourceUrl,
                                              path_param.replace(path_param.split("/")[1] + '/', '') + '/' + file_name)
                resourceUrl = exe_url(resourceUrl, url_param)

                # 访问网址
                file_access_website(resourceUrl, path_param, file_name)

    return page_text


def get_imgs(page_text, path_param, url_param):
    html = etree.HTML(page_text)
    url_list = html.xpath('//img/@src')
    print(url_list)

    # 遍历下载 img 文件
    for resourceUrl in url_list:
        if (resourceUrl not in no_access_url_list):
            if ((re.match(r'(\w+):\/\/.+$', resourceUrl))):
                print("合法url: ", resourceUrl)
                resourceUrl = formate_url(resourceUrl)
                file_name = replace_special_str(resourceUrl.strip())
                print(file_name)
                page_text = page_text.replace(resourceUrl,
                                              path_param.replace(path_param.split("/")[1] + '/', '') + '/' + file_name)
                resourceUrl = exe_url(resourceUrl, url_param)
                try:
                    # 判断文件是否存在
                    if (os.path.exists(f'{path_param}/{file_name}')):
                        pass
                    else:
                        result = requests.get(url=resourceUrl, headers=heads)
                        with open(f'{path_param}/{file_name}', mode='wb') as file:
                            file.write(result.content)  # 读取爬到网页的源代码
                        result.close()
                except:
                    no_access_url_list.append(resourceUrl)
                    pass
                else:
                    pass
                finally:
                     pass

    return page_text


# 03. 爬取网页 html 文件
def get_internal_web(flag, domain_name, url_param, path_param, file_name_param):
    if (url_param.startswith(domain_name)):
        # 01. 新建爬取网站的文件
        creat_file(path_param)

        # 02. 访问网页信息，解析出该网页所包含的url信息
        index_result = requests.get(url=url_param, headers=heads)
        index_page_text = index_result.text
        # print(index_page_text)
        html = etree.HTML(index_page_text)
        url_list = html.xpath('//a/@href')  # html 文件
        url_list = np.unique(url_list)
        print('该网站包含的网址：', url_list)

        # 遍历首页中包含的 url_list
        for resourceUrl in url_list:
            if (resourceUrl.startswith('#') | (resourceUrl.startswith('/') & (len(resourceUrl) < 2))):
                pass
            elif ((len(resourceUrl.split('.com')[-1]) == 0) | (resourceUrl.startswith('domain_name'))):
                # 替换网址信息
                index_page_text = index_page_text.replace('<a href="' + resourceUrl + '"', '<a href="' + '#' + '"')
                pass
            else:
                new_file_webset = exe_url(resourceUrl, url_param)
                print('拼接后的网址：', new_file_webset)
                if (new_file_webset in url_list_result):
                    pass
                else:
                    # 递归访问网页
                    if (flag):
                        url_list_result.append(new_file_webset)
                    # get_internal_web(url_param, path_param)
                    # 跳转网页的 html 文件
                    new_file_name = creat_file_name(new_file_webset)
                    print('传过来的文件名：', file_name_param)
                    if (new_file_name.startswith('region=world')):
                        new_file_name = new_file_name.split('region=world')[-1]
                    print('新生成的文件名：', new_file_name)

                    # 替换网址信息
                    index_page_text = index_page_text.replace(resourceUrl, './' + new_file_name)

        print(len(url_list_result))
        index_page_text = get_link(index_page_text, path_param, url_param)
        index_page_text = get_script(index_page_text, path_param, url_param)
        index_page_text = get_imgs(index_page_text, path_param, url_param)
        index_page_text = index_page_text.replace('/./', './')
        if (file_name_param.startswith('region=world')):
            file_name_param = file_name_param.split('region=world')[-1]
        with open(f'./{path_param.split("/")[1]}/{file_name_param}', mode='w', encoding='UTF-8') as index_file:
            index_file.write(index_page_text)  # 读取爬到网页的源代码
        index_result.close()
        print('over')



if __name__ == "__main__":
    for index_url in index_url_list:
        first_path = index_url.split('.com')[-2].split('/')[-1].split('.')[-1]
        first_path = './' + first_path + '_files/files'
        domain_name = index_url.split('.com')[0]
        print('新建的文件名：', index_url.split('.com')[-2].split('/')[-1].split('.')[-1] + '.html')
        try:
            get_internal_web(True, domain_name, index_url, first_path,
                             index_url.split('.com')[-2].split('/')[-1].split('.')[-1] + '.html')
            i = 1
            for index_url in url_list_result:
                i = i + 1
                print(f'{i}二层网址：', index_url)
                get_internal_web(False, domain_name, index_url, first_path, creat_file_name(index_url))
        except:
            pass
        else:
            pass
        finally:
            pass
        print('over')
